Oleksij Rempel <linux@rempel-privat.de> <ore@pengutronix.de>
Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it>
Patrick Mochel <mochel@digitalimplant.org>
-Paul Burton <paul.burton@mips.com> <paul.burton@imgtec.com>
+Paul Burton <paulburton@kernel.org> <paul.burton@imgtec.com>
+Paul Burton <paulburton@kernel.org> <paul.burton@mips.com>
Peter A Jonsson <pj@ludd.ltu.se>
Peter Oruba <peter@oruba.de>
Peter Oruba <peter.oruba@amd.com>
Shuah Khan <shuah@kernel.org> <shuah.khan@hp.com>
Shuah Khan <shuah@kernel.org> <shuahkh@osg.samsung.com>
Shuah Khan <shuah@kernel.org> <shuah.kh@samsung.com>
+Simon Arlott <simon@octiron.net> <simon@fire.lp0.eu>
Simon Kelley <simon@thekelleys.org.uk>
Stéphane Witzmann <stephane.witzmann@ubpmes.univ-bpclermont.fr>
Stephen Hemminger <shemminger@osdl.org>
| ARM | MMU-500 | #841119,826419 | N/A |
+----------------+-----------------+-----------------+-----------------------------+
+----------------+-----------------+-----------------+-----------------------------+
+| Broadcom | Brahma-B53 | N/A | ARM64_ERRATUM_845719 |
++----------------+-----------------+-----------------+-----------------------------+
+| Broadcom | Brahma-B53 | N/A | ARM64_ERRATUM_843419 |
++----------------+-----------------+-----------------+-----------------------------+
++----------------+-----------------+-----------------+-----------------------------+
| Cavium | ThunderX ITS | #22375,24313 | CAVIUM_ERRATUM_22375 |
+----------------+-----------------+-----------------+-----------------------------+
| Cavium | ThunderX ITS | #23144 | CAVIUM_ERRATUM_23144 |
+----------------+-----------------+-----------------+-----------------------------+
| Qualcomm Tech. | Kryo/Falkor v1 | E1003 | QCOM_FALKOR_ERRATUM_1003 |
+----------------+-----------------+-----------------+-----------------------------+
-| Qualcomm Tech. | Falkor v1 | E1009 | QCOM_FALKOR_ERRATUM_1009 |
+| Qualcomm Tech. | Kryo/Falkor v1 | E1009 | QCOM_FALKOR_ERRATUM_1009 |
+----------------+-----------------+-----------------+-----------------------------+
| Qualcomm Tech. | QDF2400 ITS | E0065 | QCOM_QDF2400_ERRATUM_0065 |
+----------------+-----------------+-----------------+-----------------------------+
prog_flow_dissector
+Testing BPF
+===========
+
+.. toctree::
+ :maxdepth: 1
+
+ s390
+
+
.. Links:
.. _Documentation/networking/filter.txt: ../networking/filter.txt
.. _man-pages: https://www.kernel.org/doc/man-pages/
--- /dev/null
+===================
+Testing BPF on s390
+===================
+
+1. Introduction
+***************
+
+IBM Z are mainframe computers, which are descendants of IBM System/360 from
+year 1964. They are supported by the Linux kernel under the name "s390". This
+document describes how to test BPF in an s390 QEMU guest.
+
+2. One-time setup
+*****************
+
+The following is required to build and run the test suite:
+
+ * s390 GCC
+ * s390 development headers and libraries
+ * Clang with BPF support
+ * QEMU with s390 support
+ * Disk image with s390 rootfs
+
+Debian supports installing compiler and libraries for s390 out of the box.
+Users of other distros may use debootstrap in order to set up a Debian chroot::
+
+ sudo debootstrap \
+ --variant=minbase \
+ --include=sudo \
+ testing \
+ ./s390-toolchain
+ sudo mount --rbind /dev ./s390-toolchain/dev
+ sudo mount --rbind /proc ./s390-toolchain/proc
+ sudo mount --rbind /sys ./s390-toolchain/sys
+ sudo chroot ./s390-toolchain
+
+Once on Debian, the build prerequisites can be installed as follows::
+
+ sudo dpkg --add-architecture s390x
+ sudo apt-get update
+ sudo apt-get install \
+ bc \
+ bison \
+ cmake \
+ debootstrap \
+ dwarves \
+ flex \
+ g++ \
+ gcc \
+ g++-s390x-linux-gnu \
+ gcc-s390x-linux-gnu \
+ gdb-multiarch \
+ git \
+ make \
+ python3 \
+ qemu-system-misc \
+ qemu-utils \
+ rsync \
+ libcap-dev:s390x \
+ libelf-dev:s390x \
+ libncurses-dev
+
+Latest Clang targeting BPF can be installed as follows::
+
+ git clone https://github.com/llvm/llvm-project.git
+ ln -s ../../clang llvm-project/llvm/tools/
+ mkdir llvm-project-build
+ cd llvm-project-build
+ cmake \
+ -DLLVM_TARGETS_TO_BUILD=BPF \
+ -DCMAKE_BUILD_TYPE=Release \
+ -DCMAKE_INSTALL_PREFIX=/opt/clang-bpf \
+ ../llvm-project/llvm
+ make
+ sudo make install
+ export PATH=/opt/clang-bpf/bin:$PATH
+
+The disk image can be prepared using a loopback mount and debootstrap::
+
+ qemu-img create -f raw ./s390.img 1G
+ sudo losetup -f ./s390.img
+ sudo mkfs.ext4 /dev/loopX
+ mkdir ./s390.rootfs
+ sudo mount /dev/loopX ./s390.rootfs
+ sudo debootstrap \
+ --foreign \
+ --arch=s390x \
+ --variant=minbase \
+ --include=" \
+ iproute2, \
+ iputils-ping, \
+ isc-dhcp-client, \
+ kmod, \
+ libcap2, \
+ libelf1, \
+ netcat, \
+ procps" \
+ testing \
+ ./s390.rootfs
+ sudo umount ./s390.rootfs
+ sudo losetup -d /dev/loopX
+
+3. Compilation
+**************
+
+In addition to the usual Kconfig options required to run the BPF test suite, it
+is also helpful to select::
+
+ CONFIG_NET_9P=y
+ CONFIG_9P_FS=y
+ CONFIG_NET_9P_VIRTIO=y
+ CONFIG_VIRTIO_PCI=y
+
+as that would enable a very easy way to share files with the s390 virtual
+machine.
+
+Compiling kernel, modules and testsuite, as well as preparing gdb scripts to
+simplify debugging, can be done using the following commands::
+
+ make ARCH=s390 CROSS_COMPILE=s390x-linux-gnu- menuconfig
+ make ARCH=s390 CROSS_COMPILE=s390x-linux-gnu- bzImage modules scripts_gdb
+ make ARCH=s390 CROSS_COMPILE=s390x-linux-gnu- \
+ -C tools/testing/selftests \
+ TARGETS=bpf \
+ INSTALL_PATH=$PWD/tools/testing/selftests/kselftest_install \
+ install
+
+4. Running the test suite
+*************************
+
+The virtual machine can be started as follows::
+
+ qemu-system-s390x \
+ -cpu max,zpci=on \
+ -smp 2 \
+ -m 4G \
+ -kernel linux/arch/s390/boot/compressed/vmlinux \
+ -drive file=./s390.img,if=virtio,format=raw \
+ -nographic \
+ -append 'root=/dev/vda rw console=ttyS1' \
+ -virtfs local,path=./linux,security_model=none,mount_tag=linux \
+ -object rng-random,filename=/dev/urandom,id=rng0 \
+ -device virtio-rng-ccw,rng=rng0 \
+ -netdev user,id=net0 \
+ -device virtio-net-ccw,netdev=net0
+
+When using this on a real IBM Z, ``-enable-kvm`` may be added for better
+performance. When starting the virtual machine for the first time, disk image
+setup must be finalized using the following command::
+
+ /debootstrap/debootstrap --second-stage
+
+Directory with the code built on the host as well as ``/proc`` and ``/sys``
+need to be mounted as follows::
+
+ mkdir -p /linux
+ mount -t 9p linux /linux
+ mount -t proc proc /proc
+ mount -t sysfs sys /sys
+
+After that, the test suite can be run using the following commands::
+
+ cd /linux/tools/testing/selftests/kselftest_install
+ ./run_kselftest.sh
+
+As usual, tests can be also run individually::
+
+ cd /linux/tools/testing/selftests/bpf
+ ./test_verifier
+
+5. Debugging
+************
+
+It is possible to debug the s390 kernel using QEMU GDB stub, which is activated
+by passing ``-s`` to QEMU.
+
+It is preferable to turn KASLR off, so that gdb would know where to find the
+kernel image in memory, by building the kernel with::
+
+ RANDOMIZE_BASE=n
+
+GDB can then be attached using the following command::
+
+ gdb-multiarch -ex 'target remote localhost:1234' ./vmlinux
+
+6. Network
+**********
+
+In case one needs to use the network in the virtual machine in order to e.g.
+install additional packages, it can be configured using::
+
+ dhclient eth0
+
+7. Links
+********
+
+This document is a compilation of techniques, whose more comprehensive
+descriptions can be found by following these links:
+
+- `Debootstrap <https://wiki.debian.org/EmDebian/CrossDebootstrap>`_
+- `Multiarch <https://wiki.debian.org/Multiarch/HOWTO>`_
+- `Building LLVM <https://llvm.org/docs/CMake.html>`_
+- `Cross-compiling the kernel <https://wiki.gentoo.org/wiki/Embedded_Handbook/General/Cross-compiling_the_kernel>`_
+- `QEMU s390x Guest Support <https://wiki.qemu.org/Documentation/Platforms/S390X>`_
+- `Plan 9 folder sharing over Virtio <https://wiki.qemu.org/Documentation/9psetup>`_
+- `Using GDB with QEMU <https://wiki.osdev.org/Kernel_Debugging#Use_GDB_with_QEMU>`_
- description: Theobroma Systems RK3368-uQ7 with Haikou baseboard
items:
- - const: tsd,rk3368-uq7-haikou
+ - const: tsd,rk3368-lion-haikou
- const: rockchip,rk3368
- description: Theobroma Systems RK3399-Q7 with Haikou baseboard
items:
- - const: tsd,rk3399-q7-haikou
+ - const: tsd,rk3399-puma-haikou
- const: rockchip,rk3399
- description: Tronsmart Orion R68 Meta
* Advanced Interrupt Controller (AIC)
Required properties:
-- compatible: Should be "atmel,<chip>-aic"
- <chip> can be "at91rm9200", "sama5d2", "sama5d3" or "sama5d4"
+- compatible: Should be:
+ - "atmel,<chip>-aic" where <chip> can be "at91rm9200", "sama5d2",
+ "sama5d3" or "sama5d4"
+ - "microchip,<chip>-aic" where <chip> can be "sam9x60"
+
- interrupt-controller: Identifies the node as an interrupt controller.
- #interrupt-cells: The number of cells to define the interrupts. It should be 3.
The first cell is the IRQ number (aka "Peripheral IDentifier" on datasheet).
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
%YAML 1.2
---
-$id: http://devicetree.org/schemas/arm/allwinner,sun4i-a10-csi.yaml#
+$id: http://devicetree.org/schemas/media/allwinner,sun4i-a10-csi.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Allwinner A10 CMOS Sensor Interface (CSI) Device Tree Bindings
clocks:
items:
- description: The CSI interface clock
- - description: The CSI module clock
- description: The CSI ISP clock
- description: The CSI DRAM clock
clock-names:
items:
- const: bus
- - const: mod
- const: isp
- const: ram
compatible = "allwinner,sun7i-a20-csi0";
reg = <0x01c09000 0x1000>;
interrupts = <GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&ccu CLK_AHB_CSI0>, <&ccu CLK_CSI0>,
- <&ccu CLK_CSI_SCLK>, <&ccu CLK_DRAM_CSI0>;
- clock-names = "bus", "mod", "isp", "ram";
+ clocks = <&ccu CLK_AHB_CSI0>, <&ccu CLK_CSI_SCLK>, <&ccu CLK_DRAM_CSI0>;
+ clock-names = "bus", "isp", "ram";
resets = <&ccu RST_CSI0>;
port {
allOf:
- $ref: "/schemas/types.yaml#/definitions/string"
- enum: [ ADC0, ADC1, ADC10, ADC11, ADC12, ADC13, ADC14, ADC15,
- ADC2, ADC3, ADC4, ADC5, ADC6, ADC7, ADC8, ADC9, BMCINT, ESPI,
- ESPIALT, FSI1, FSI2, FWSPIABR, FWSPID, FWSPIWP, GPIT0, GPIT1,
- GPIT2, GPIT3, GPIT4, GPIT5, GPIT6, GPIT7, GPIU0, GPIU1, GPIU2,
- GPIU3, GPIU4, GPIU5, GPIU6, GPIU7, I2C1, I2C10, I2C11, I2C12,
- I2C13, I2C14, I2C15, I2C16, I2C2, I2C3, I2C4, I2C5, I2C6, I2C7,
- I2C8, I2C9, I3C3, I3C4, I3C5, I3C6, JTAGM, LHPD, LHSIRQ, LPC,
- LPCHC, LPCPD, LPCPME, LPCSMI, LSIRQ, MACLINK1, MACLINK2,
+ ADC2, ADC3, ADC4, ADC5, ADC6, ADC7, ADC8, ADC9, BMCINT, EMMC,
+ ESPI, ESPIALT, FSI1, FSI2, FWSPIABR, FWSPID, FWSPIWP, GPIT0,
+ GPIT1, GPIT2, GPIT3, GPIT4, GPIT5, GPIT6, GPIT7, GPIU0, GPIU1,
+ GPIU2, GPIU3, GPIU4, GPIU5, GPIU6, GPIU7, I2C1, I2C10, I2C11,
+ I2C12, I2C13, I2C14, I2C15, I2C16, I2C2, I2C3, I2C4, I2C5, I2C6,
+ I2C7, I2C8, I2C9, I3C3, I3C4, I3C5, I3C6, JTAGM, LHPD, LHSIRQ,
+ LPC, LPCHC, LPCPD, LPCPME, LPCSMI, LSIRQ, MACLINK1, MACLINK2,
MACLINK3, MACLINK4, MDIO1, MDIO2, MDIO3, MDIO4, NCTS1, NCTS2,
NCTS3, NCTS4, NDCD1, NDCD2, NDCD3, NDCD4, NDSR1, NDSR2, NDSR3,
NDSR4, NDTR1, NDTR2, NDTR3, NDTR4, NRI1, NRI2, NRI3, NRI4, NRTS1,
PWM8, PWM9, RGMII1, RGMII2, RGMII3, RGMII4, RMII1, RMII2, RMII3,
RMII4, RXD1, RXD2, RXD3, RXD4, SALT1, SALT10, SALT11, SALT12,
SALT13, SALT14, SALT15, SALT16, SALT2, SALT3, SALT4, SALT5,
- SALT6, SALT7, SALT8, SALT9, SD1, SD2, SD3, SD3DAT4, SD3DAT5,
- SD3DAT6, SD3DAT7, SGPM1, SGPS1, SIOONCTRL, SIOPBI, SIOPBO,
- SIOPWREQ, SIOPWRGD, SIOS3, SIOS5, SIOSCI, SPI1, SPI1ABR, SPI1CS1,
- SPI1WP, SPI2, SPI2CS1, SPI2CS2, TACH0, TACH1, TACH10, TACH11,
- TACH12, TACH13, TACH14, TACH15, TACH2, TACH3, TACH4, TACH5,
- TACH6, TACH7, TACH8, TACH9, THRU0, THRU1, THRU2, THRU3, TXD1,
- TXD2, TXD3, TXD4, UART10, UART11, UART12, UART13, UART6, UART7,
- UART8, UART9, VB, VGAHS, VGAVS, WDTRST1, WDTRST2, WDTRST3,
- WDTRST4, ]
+ SALT6, SALT7, SALT8, SALT9, SD1, SD2, SGPM1, SGPS1, SIOONCTRL,
+ SIOPBI, SIOPBO, SIOPWREQ, SIOPWRGD, SIOS3, SIOS5, SIOSCI, SPI1,
+ SPI1ABR, SPI1CS1, SPI1WP, SPI2, SPI2CS1, SPI2CS2, TACH0, TACH1,
+ TACH10, TACH11, TACH12, TACH13, TACH14, TACH15, TACH2, TACH3,
+ TACH4, TACH5, TACH6, TACH7, TACH8, TACH9, THRU0, THRU1, THRU2,
+ THRU3, TXD1, TXD2, TXD3, TXD4, UART10, UART11, UART12, UART13,
+ UART6, UART7, UART8, UART9, VB, VGAHS, VGAVS, WDTRST1, WDTRST2,
+ WDTRST3, WDTRST4, ]
groups:
allOf:
- $ref: "/schemas/types.yaml#/definitions/string"
- enum: [ ADC0, ADC1, ADC10, ADC11, ADC12, ADC13, ADC14, ADC15,
- ADC2, ADC3, ADC4, ADC5, ADC6, ADC7, ADC8, ADC9, BMCINT, ESPI,
- ESPIALT, FSI1, FSI2, FWSPIABR, FWSPID, FWQSPID, FWSPIWP, GPIT0,
- GPIT1, GPIT2, GPIT3, GPIT4, GPIT5, GPIT6, GPIT7, GPIU0, GPIU1,
- GPIU2, GPIU3, GPIU4, GPIU5, GPIU6, GPIU7, HVI3C3, HVI3C4, I2C1,
- I2C10, I2C11, I2C12, I2C13, I2C14, I2C15, I2C16, I2C2, I2C3,
- I2C4, I2C5, I2C6, I2C7, I2C8, I2C9, I3C3, I3C4, I3C5, I3C6,
- JTAGM, LHPD, LHSIRQ, LPC, LPCHC, LPCPD, LPCPME, LPCSMI, LSIRQ,
- MACLINK1, MACLINK2, MACLINK3, MACLINK4, MDIO1, MDIO2, MDIO3,
- MDIO4, NCTS1, NCTS2, NCTS3, NCTS4, NDCD1, NDCD2, NDCD3, NDCD4,
- NDSR1, NDSR2, NDSR3, NDSR4, NDTR1, NDTR2, NDTR3, NDTR4, NRI1,
- NRI2, NRI3, NRI4, NRTS1, NRTS2, NRTS3, NRTS4, OSCCLK, PEWAKE,
- PWM0, PWM1, PWM10G0, PWM10G1, PWM11G0, PWM11G1, PWM12G0, PWM12G1,
- PWM13G0, PWM13G1, PWM14G0, PWM14G1, PWM15G0, PWM15G1, PWM2, PWM3,
- PWM4, PWM5, PWM6, PWM7, PWM8G0, PWM8G1, PWM9G0, PWM9G1, QSPI1,
- QSPI2, RGMII1, RGMII2, RGMII3, RGMII4, RMII1, RMII2, RMII3,
- RMII4, RXD1, RXD2, RXD3, RXD4, SALT1, SALT10G0, SALT10G1,
- SALT11G0, SALT11G1, SALT12G0, SALT12G1, SALT13G0, SALT13G1,
- SALT14G0, SALT14G1, SALT15G0, SALT15G1, SALT16G0, SALT16G1,
- SALT2, SALT3, SALT4, SALT5, SALT6, SALT7, SALT8, SALT9G0,
- SALT9G1, SD1, SD2, SD3, SD3DAT4, SD3DAT5, SD3DAT6, SD3DAT7,
- SGPM1, SGPS1, SIOONCTRL, SIOPBI, SIOPBO, SIOPWREQ, SIOPWRGD,
- SIOS3, SIOS5, SIOSCI, SPI1, SPI1ABR, SPI1CS1, SPI1WP, SPI2,
- SPI2CS1, SPI2CS2, TACH0, TACH1, TACH10, TACH11, TACH12, TACH13,
- TACH14, TACH15, TACH2, TACH3, TACH4, TACH5, TACH6, TACH7, TACH8,
- TACH9, THRU0, THRU1, THRU2, THRU3, TXD1, TXD2, TXD3, TXD4,
- UART10, UART11, UART12G0, UART12G1, UART13G0, UART13G1, UART6,
- UART7, UART8, UART9, VB, VGAHS, VGAVS, WDTRST1, WDTRST2, WDTRST3,
- WDTRST4, ]
+ ADC2, ADC3, ADC4, ADC5, ADC6, ADC7, ADC8, ADC9, BMCINT, EMMCG1,
+ EMMCG4, EMMCG8, ESPI, ESPIALT, FSI1, FSI2, FWSPIABR, FWSPID,
+ FWQSPID, FWSPIWP, GPIT0, GPIT1, GPIT2, GPIT3, GPIT4, GPIT5,
+ GPIT6, GPIT7, GPIU0, GPIU1, GPIU2, GPIU3, GPIU4, GPIU5, GPIU6,
+ GPIU7, HVI3C3, HVI3C4, I2C1, I2C10, I2C11, I2C12, I2C13, I2C14,
+ I2C15, I2C16, I2C2, I2C3, I2C4, I2C5, I2C6, I2C7, I2C8, I2C9,
+ I3C3, I3C4, I3C5, I3C6, JTAGM, LHPD, LHSIRQ, LPC, LPCHC, LPCPD,
+ LPCPME, LPCSMI, LSIRQ, MACLINK1, MACLINK2, MACLINK3, MACLINK4,
+ MDIO1, MDIO2, MDIO3, MDIO4, NCTS1, NCTS2, NCTS3, NCTS4, NDCD1,
+ NDCD2, NDCD3, NDCD4, NDSR1, NDSR2, NDSR3, NDSR4, NDTR1, NDTR2,
+ NDTR3, NDTR4, NRI1, NRI2, NRI3, NRI4, NRTS1, NRTS2, NRTS3, NRTS4,
+ OSCCLK, PEWAKE, PWM0, PWM1, PWM10G0, PWM10G1, PWM11G0, PWM11G1,
+ PWM12G0, PWM12G1, PWM13G0, PWM13G1, PWM14G0, PWM14G1, PWM15G0,
+ PWM15G1, PWM2, PWM3, PWM4, PWM5, PWM6, PWM7, PWM8G0, PWM8G1,
+ PWM9G0, PWM9G1, QSPI1, QSPI2, RGMII1, RGMII2, RGMII3, RGMII4,
+ RMII1, RMII2, RMII3, RMII4, RXD1, RXD2, RXD3, RXD4, SALT1,
+ SALT10G0, SALT10G1, SALT11G0, SALT11G1, SALT12G0, SALT12G1,
+ SALT13G0, SALT13G1, SALT14G0, SALT14G1, SALT15G0, SALT15G1,
+ SALT16G0, SALT16G1, SALT2, SALT3, SALT4, SALT5, SALT6, SALT7,
+ SALT8, SALT9G0, SALT9G1, SD1, SD2, SD3, SGPM1, SGPS1, SIOONCTRL,
+ SIOPBI, SIOPBO, SIOPWREQ, SIOPWRGD, SIOS3, SIOS5, SIOSCI, SPI1,
+ SPI1ABR, SPI1CS1, SPI1WP, SPI2, SPI2CS1, SPI2CS2, TACH0, TACH1,
+ TACH10, TACH11, TACH12, TACH13, TACH14, TACH15, TACH2, TACH3,
+ TACH4, TACH5, TACH6, TACH7, TACH8, TACH9, THRU0, THRU1, THRU2,
+ THRU3, TXD1, TXD2, TXD3, TXD4, UART10, UART11, UART12G0,
+ UART12G1, UART13G0, UART13G1, UART6, UART7, UART8, UART9, VB,
+ VGAHS, VGAVS, WDTRST1, WDTRST2, WDTRST3, WDTRST4, ]
required:
- compatible
--- /dev/null
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/ptp/ptp-idtcm.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: IDT ClockMatrix (TM) PTP Clock Device Tree Bindings
+
+maintainers:
+ - Vincent Cheng <vincent.cheng.xh@renesas.com>
+
+properties:
+ compatible:
+ enum:
+ # For System Synchronizer
+ - idt,8a34000
+ - idt,8a34001
+ - idt,8a34002
+ - idt,8a34003
+ - idt,8a34004
+ - idt,8a34005
+ - idt,8a34006
+ - idt,8a34007
+ - idt,8a34008
+ - idt,8a34009
+ # For Port Synchronizer
+ - idt,8a34010
+ - idt,8a34011
+ - idt,8a34012
+ - idt,8a34013
+ - idt,8a34014
+ - idt,8a34015
+ - idt,8a34016
+ - idt,8a34017
+ - idt,8a34018
+ - idt,8a34019
+ # For Universal Frequency Translator (UFT)
+ - idt,8a34040
+ - idt,8a34041
+ - idt,8a34042
+ - idt,8a34043
+ - idt,8a34044
+ - idt,8a34045
+ - idt,8a34046
+ - idt,8a34047
+ - idt,8a34048
+ - idt,8a34049
+
+ reg:
+ maxItems: 1
+ description:
+ I2C slave address of the device.
+
+required:
+ - compatible
+ - reg
+
+examples:
+ - |
+ i2c@1 {
+ compatible = "abc,acme-1234";
+ reg = <0x01 0x400>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ phc@5b {
+ compatible = "idt,8a34000";
+ reg = <0x5b>;
+ };
+ };
properties:
compatible:
enum:
- - const: regulator-fixed
- - const: regulator-fixed-clock
+ - regulator-fixed
+ - regulator-fixed-clock
regulator-name: true
properties:
compatible:
- items:
- - enum:
- - sifive,rocket0
- - sifive,e5
- - sifive,e51
- - sifive,u54-mc
- - sifive,u54
- - sifive,u5
- - const: riscv
+ oneOf:
+ - items:
+ - enum:
+ - sifive,rocket0
+ - sifive,e5
+ - sifive,e51
+ - sifive,u54-mc
+ - sifive,u54
+ - sifive,u5
+ - const: riscv
+ - const: riscv # Simulator only
description:
Identifies that the hart uses the RISC-V instruction set
and identifies the type of the hart.
insensitive, letters in the riscv,isa string must be all
lowercase to simplify parsing.
- timebase-frequency:
- type: integer
- minimum: 1
- description:
- Specifies the clock frequency of the system timer in Hz.
- This value is common to all harts on a single system image.
+ # RISC-V requires 'timebase-frequency' in /cpus, so disallow it here
+ timebase-frequency: false
interrupt-controller:
type: object
required:
- riscv,isa
- - timebase-frequency
- interrupt-controller
examples:
.. SPDX-License-Identifier: GPL-2.0+
-==============================================================
-Linux* Base Driver for the Intel(R) PRO/100 Family of Adapters
-==============================================================
+=============================================================
+Linux Base Driver for the Intel(R) PRO/100 Family of Adapters
+=============================================================
June 1, 2018
In This Release
===============
-This file describes the Linux* Base Driver for the Intel(R) PRO/100 Family of
+This file describes the Linux Base Driver for the Intel(R) PRO/100 Family of
Adapters. This driver includes support for Itanium(R)2-based systems.
For questions related to hardware requirements, refer to the documentation
The latest release of ethtool can be found from
https://www.kernel.org/pub/software/network/ethtool/
-Enabling Wake on LAN* (WoL)
----------------------------
-WoL is provided through the ethtool* utility. For instructions on
+Enabling Wake on LAN (WoL)
+--------------------------
+WoL is provided through the ethtool utility. For instructions on
enabling WoL with ethtool, refer to the ethtool man page. WoL will be
enabled on the system during the next shut down or reboot. For this
driver version, in order to enable WoL, the e100 driver must be loaded
.. SPDX-License-Identifier: GPL-2.0+
-===========================================================
-Linux* Base Driver for Intel(R) Ethernet Network Connection
-===========================================================
+==========================================================
+Linux Base Driver for Intel(R) Ethernet Network Connection
+==========================================================
Intel Gigabit Linux driver.
Copyright(c) 1999 - 2013 Intel Corporation.
The latest release of ethtool can be found from
https://www.kernel.org/pub/software/network/ethtool/
-Enabling Wake on LAN* (WoL)
----------------------------
+Enabling Wake on LAN (WoL)
+--------------------------
- WoL is configured through the ethtool* utility.
+ WoL is configured through the ethtool utility.
WoL will be enabled on the system during the next shut down or reboot.
For this driver version, in order to enable WoL, the e1000 driver must be
.. SPDX-License-Identifier: GPL-2.0+
-======================================================
-Linux* Driver for Intel(R) Ethernet Network Connection
-======================================================
+=====================================================
+Linux Driver for Intel(R) Ethernet Network Connection
+=====================================================
Intel Gigabit Linux driver.
Copyright(c) 2008-2018 Intel Corporation.
manually set devices for 1 Gbps and higher.
Speed, duplex, and autonegotiation advertising are configured through the
-ethtool* utility.
+ethtool utility.
Caution: Only experienced network administrators should force speed and duplex
or change autonegotiation advertising manually. The settings at the switch must
operate only in full duplex and only at their native speed.
-Enabling Wake on LAN* (WoL)
----------------------------
-WoL is configured through the ethtool* utility.
+Enabling Wake on LAN (WoL)
+--------------------------
+WoL is configured through the ethtool utility.
WoL will be enabled on the system during the next shut down or reboot. For
this driver version, in order to enable WoL, the e1000e driver must be loaded
.. SPDX-License-Identifier: GPL-2.0+
-==============================================================
-Linux* Base Driver for Intel(R) Ethernet Multi-host Controller
-==============================================================
+=============================================================
+Linux Base Driver for Intel(R) Ethernet Multi-host Controller
+=============================================================
August 20, 2018
Copyright(c) 2015-2018 Intel Corporation.
Known Issues/Troubleshooting
============================
-Enabling SR-IOV in a 64-bit Microsoft* Windows Server* 2012/R2 guest OS under Linux KVM
----------------------------------------------------------------------------------------
+Enabling SR-IOV in a 64-bit Microsoft Windows Server 2012/R2 guest OS under Linux KVM
+-------------------------------------------------------------------------------------
KVM Hypervisor/VMM supports direct assignment of a PCIe device to a VM. This
includes traditional PCIe devices, as well as SR-IOV-capable devices based on
the Intel Ethernet Controller XL710.
.. SPDX-License-Identifier: GPL-2.0+
-==================================================================
-Linux* Base Driver for the Intel(R) Ethernet Controller 700 Series
-==================================================================
+=================================================================
+Linux Base Driver for the Intel(R) Ethernet Controller 700 Series
+=================================================================
Intel 40 Gigabit Linux driver.
Copyright(c) 1999-2018 Intel Corporation.
Network Adapter XXV710 based devices.
Speed, duplex, and autonegotiation advertising are configured through the
-ethtool* utility.
+ethtool utility.
Caution: Only experienced network administrators should force speed and duplex
or change autonegotiation advertising manually. The settings at the switch must
.. SPDX-License-Identifier: GPL-2.0+
-==================================================================
-Linux* Base Driver for Intel(R) Ethernet Adaptive Virtual Function
-==================================================================
+=================================================================
+Linux Base Driver for Intel(R) Ethernet Adaptive Virtual Function
+=================================================================
Intel Ethernet Adaptive Virtual Function Linux driver.
Copyright(c) 2013-2018 Intel Corporation.
Overview
========
-This file describes the iavf Linux* Base Driver. This driver was formerly
+This file describes the iavf Linux Base Driver. This driver was formerly
called i40evf.
The iavf driver supports the below mentioned virtual function devices and
.. SPDX-License-Identifier: GPL-2.0+
-===================================================================
-Linux* Base Driver for the Intel(R) Ethernet Connection E800 Series
-===================================================================
+==================================================================
+Linux Base Driver for the Intel(R) Ethernet Connection E800 Series
+==================================================================
Intel ice Linux driver.
Copyright(c) 2018 Intel Corporation.
.. SPDX-License-Identifier: GPL-2.0+
-===========================================================
-Linux* Base Driver for Intel(R) Ethernet Network Connection
-===========================================================
+==========================================================
+Linux Base Driver for Intel(R) Ethernet Network Connection
+==========================================================
Intel Gigabit Linux driver.
Copyright(c) 1999-2018 Intel Corporation.
https://www.kernel.org/pub/software/network/ethtool/
-Enabling Wake on LAN* (WoL)
----------------------------
-WoL is configured through the ethtool* utility.
+Enabling Wake on LAN (WoL)
+--------------------------
+WoL is configured through the ethtool utility.
WoL will be enabled on the system during the next shut down or reboot. For
this driver version, in order to enable WoL, the igb driver must be loaded
.. SPDX-License-Identifier: GPL-2.0+
-============================================================
-Linux* Base Virtual Function Driver for Intel(R) 1G Ethernet
-============================================================
+===========================================================
+Linux Base Virtual Function Driver for Intel(R) 1G Ethernet
+===========================================================
Intel Gigabit Virtual Function Linux driver.
Copyright(c) 1999-2018 Intel Corporation.
.. SPDX-License-Identifier: GPL-2.0+
-=============================================================================
-Linux* Base Driver for the Intel(R) Ethernet 10 Gigabit PCI Express Adapters
-=============================================================================
+===========================================================================
+Linux Base Driver for the Intel(R) Ethernet 10 Gigabit PCI Express Adapters
+===========================================================================
Intel 10 Gigabit Linux driver.
Copyright(c) 1999-2018 Intel Corporation.
Known Issues/Troubleshooting
============================
-Enabling SR-IOV in a 64-bit Microsoft* Windows Server* 2012/R2 guest OS
------------------------------------------------------------------------
+Enabling SR-IOV in a 64-bit Microsoft Windows Server 2012/R2 guest OS
+---------------------------------------------------------------------
Linux KVM Hypervisor/VMM supports direct assignment of a PCIe device to a VM.
This includes traditional PCIe devices, as well as SR-IOV-capable devices based
on the Intel Ethernet Controller XL710.
.. SPDX-License-Identifier: GPL-2.0+
-=============================================================
-Linux* Base Virtual Function Driver for Intel(R) 10G Ethernet
-=============================================================
+============================================================
+Linux Base Virtual Function Driver for Intel(R) 10G Ethernet
+============================================================
Intel 10 Gigabit Virtual Function Linux driver.
Copyright(c) 1999-2018 Intel Corporation.
.. SPDX-License-Identifier: GPL-2.0+
-==========================================================
-Linux* Driver for the Pensando(R) Ethernet adapter family
-==========================================================
+========================================================
+Linux Driver for the Pensando(R) Ethernet adapter family
+========================================================
Pensando Linux Ethernet driver.
Copyright(c) 2019 Pensando Systems, Inc
somaxconn - INTEGER
Limit of socket listen() backlog, known in userspace as SOMAXCONN.
- Defaults to 128. See also tcp_max_syn_backlog for additional tuning
- for TCP sockets.
+ Defaults to 4096. (Was 128 before linux-5.4)
+ See also tcp_max_syn_backlog for additional tuning for TCP sockets.
tcp_abort_on_overflow - BOOLEAN
If listening service is too slow to accept new connections,
up to ~64K of unswappable memory.
tcp_max_syn_backlog - INTEGER
- Maximal number of remembered connection requests, which have not
- received an acknowledgment from connecting client.
+ Maximal number of remembered connection requests (SYN_RECV),
+ which have not received an acknowledgment from connecting client.
+ This is a per-listener limit.
The minimal value is 128 for low memory machines, and it will
increase in proportion to the memory of machine.
If server suffers from overload, try increasing this number.
+ Remember to also check /proc/sys/net/core/somaxconn
+ A SYN_RECV request socket consumes about 304 bytes of memory.
tcp_max_tw_buckets - INTEGER
Maximal number of timewait sockets held by system simultaneously.
ARM/SPREADTRUM SoC SUPPORT
M: Orson Zhai <orsonzhai@gmail.com>
-M: Baolin Wang <baolin.wang@linaro.org>
+M: Baolin Wang <baolin.wang7@gmail.com>
M: Chunyan Zhang <zhang.lyra@gmail.com>
S: Maintained
F: arch/arm64/boot/dts/sprd
N: sprd
+N: sc27xx
+N: sc2731
ARM/STI ARCHITECTURE
M: Patrice Chotard <patrice.chotard@st.com>
F: arch/arm64/net/
BPF JIT for MIPS (32-BIT AND 64-BIT)
-M: Paul Burton <paul.burton@mips.com>
+M: Paul Burton <paulburton@kernel.org>
L: netdev@vger.kernel.org
L: bpf@vger.kernel.org
S: Maintained
N: kona
F: arch/arm/mach-bcm/
-BROADCOM BCM2835 ARM ARCHITECTURE
+BROADCOM BCM2711/BCM2835 ARM ARCHITECTURE
M: Eric Anholt <eric@anholt.net>
M: Stefan Wahren <wahrenst@gmx.net>
L: bcm-kernel-feedback-list@broadcom.com
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
T: git git://github.com/anholt/linux
S: Maintained
+N: bcm2711
N: bcm2835
F: drivers/staging/vc04_services
F: drivers/usb/gadget/udc/bcm63xx_udc.*
BROADCOM BCM7XXX ARM ARCHITECTURE
-M: Brian Norris <computersforpeace@gmail.com>
-M: Gregory Fong <gregory.0xf0@gmail.com>
M: Florian Fainelli <f.fainelli@gmail.com>
M: bcm-kernel-feedback-list@broadcom.com
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
F: drivers/usb/atm/ueagle-atm.c
IMGTEC ASCII LCD DRIVER
-M: Paul Burton <paul.burton@mips.com>
+M: Paul Burton <paulburton@kernel.org>
S: Maintained
F: Documentation/devicetree/bindings/auxdisplay/img-ascii-lcd.txt
F: drivers/auxdisplay/img-ascii-lcd.c
MIPS
M: Ralf Baechle <ralf@linux-mips.org>
-M: Paul Burton <paul.burton@mips.com>
+M: Paul Burton <paulburton@kernel.org>
M: James Hogan <jhogan@kernel.org>
L: linux-mips@vger.kernel.org
W: http://www.linux-mips.org/
F: drivers/platform/mips/
MIPS BOSTON DEVELOPMENT BOARD
-M: Paul Burton <paul.burton@mips.com>
+M: Paul Burton <paulburton@kernel.org>
L: linux-mips@vger.kernel.org
S: Maintained
F: Documentation/devicetree/bindings/clock/img,boston-clock.txt
F: include/dt-bindings/clock/boston-clock.h
MIPS GENERIC PLATFORM
-M: Paul Burton <paul.burton@mips.com>
+M: Paul Burton <paulburton@kernel.org>
L: linux-mips@vger.kernel.org
S: Supported
F: Documentation/devicetree/bindings/power/mti,mips-cpc.txt
NETWORKING [TLS]
M: Boris Pismenny <borisp@mellanox.com>
M: Aviad Yehezkel <aviadye@mellanox.com>
-M: Dave Watson <davejwatson@fb.com>
M: John Fastabend <john.fastabend@gmail.com>
M: Daniel Borkmann <daniel@iogearbox.net>
M: Jakub Kicinski <jakub.kicinski@netronome.com>
RISC-V ARCHITECTURE
M: Paul Walmsley <paul.walmsley@sifive.com>
-M: Palmer Dabbelt <palmer@sifive.com>
+M: Palmer Dabbelt <palmer@dabbelt.com>
M: Albert Ou <aou@eecs.berkeley.edu>
L: linux-riscv@lists.infradead.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux.git
F: drivers/media/mmc/siano/
SIFIVE DRIVERS
-M: Palmer Dabbelt <palmer@sifive.com>
+M: Palmer Dabbelt <palmer@dabbelt.com>
M: Paul Walmsley <paul.walmsley@sifive.com>
L: linux-riscv@lists.infradead.org
T: git git://github.com/sifive/riscv-linux.git
SIFIVE FU540 SYSTEM-ON-CHIP
M: Paul Walmsley <paul.walmsley@sifive.com>
-M: Palmer Dabbelt <palmer@sifive.com>
+M: Palmer Dabbelt <palmer@dabbelt.com>
L: linux-riscv@lists.infradead.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/pjw/sifive.git
S: Supported
VERSION = 5
PATCHLEVEL = 4
SUBLEVEL = 0
-EXTRAVERSION = -rc3
-NAME = Nesting Opossum
+EXTRAVERSION = -rc5
+NAME = Kleptomaniac Octopus
# *DOCUMENTATION*
# To see a list of typical targets execute "make help"
export KBUILD_VMLINUX_LIBS := $(libs-y1)
export KBUILD_LDS := arch/$(SRCARCH)/kernel/vmlinux.lds
export LDFLAGS_vmlinux
-# used by scripts/package/Makefile
+# used by scripts/Makefile.package
export KBUILD_ALLDIRS := $(sort $(filter-out arch/%,$(vmlinux-alldirs)) LICENSES arch include scripts tools)
vmlinux-deps := $(KBUILD_LDS) $(KBUILD_VMLINUX_OBJS) $(KBUILD_VMLINUX_LIBS)
clock-frequency = <33333333>;
};
+ reg_5v0: regulator-5v0 {
+ compatible = "regulator-fixed";
+
+ regulator-name = "5v0-supply";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ };
+
cpu_intc: cpu-interrupt-controller {
compatible = "snps,archs-intc";
interrupt-controller;
clocks = <&input_clk>;
cs-gpios = <&creg_gpio 0 GPIO_ACTIVE_LOW>,
<&creg_gpio 1 GPIO_ACTIVE_LOW>;
+
+ spi-flash@0 {
+ compatible = "sst26wf016b", "jedec,spi-nor";
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ spi-max-frequency = <4000000>;
+ };
+
+ adc@1 {
+ compatible = "ti,adc108s102";
+ reg = <1>;
+ vref-supply = <®_5v0>;
+ spi-max-frequency = <1000000>;
+ };
};
creg_gpio: gpio@14b0 {
CONFIG_DEVTMPFS=y
# CONFIG_STANDALONE is not set
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+CONFIG_MTD=y
+CONFIG_MTD_SPI_NOR=y
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_NETDEVICES=y
CONFIG_GPIO_DWAPB=y
CONFIG_GPIO_SNPS_CREG=y
# CONFIG_HWMON is not set
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
CONFIG_DRM=y
# CONFIG_DRM_FBDEV_EMULATION is not set
CONFIG_DRM_UDL=y
CONFIG_MMC_DW=y
CONFIG_DMADEVICES=y
CONFIG_DW_AXI_DMAC=y
+CONFIG_IIO=y
+CONFIG_TI_ADC108S102=y
CONFIG_EXT3_FS=y
CONFIG_VFAT_FS=y
CONFIG_TMPFS=y
/* loop thru all available h/w condition indexes */
for (i = 0; i < cc_bcr.c; i++) {
write_aux_reg(ARC_REG_CC_INDEX, i);
- cc_name.indiv.word0 = read_aux_reg(ARC_REG_CC_NAME0);
- cc_name.indiv.word1 = read_aux_reg(ARC_REG_CC_NAME1);
+ cc_name.indiv.word0 = le32_to_cpu(read_aux_reg(ARC_REG_CC_NAME0));
+ cc_name.indiv.word1 = le32_to_cpu(read_aux_reg(ARC_REG_CC_NAME1));
arc_pmu_map_hw_event(i, cc_name.str);
arc_pmu_add_raw_event_attr(i, cc_name.str);
reg = <0x70>;
#address-cells = <1>;
#size-cells = <0>;
+ i2c-mux-idle-disconnect;
i2c@0 {
/* FMC A */
#address-cells = <1>;
#size-cells = <0>;
reg = <0>;
- i2c-mux-idle-disconnect;
};
i2c@1 {
#address-cells = <1>;
#size-cells = <0>;
reg = <1>;
- i2c-mux-idle-disconnect;
};
i2c@2 {
#address-cells = <1>;
#size-cells = <0>;
reg = <2>;
- i2c-mux-idle-disconnect;
};
i2c@3 {
#address-cells = <1>;
#size-cells = <0>;
reg = <3>;
- i2c-mux-idle-disconnect;
};
i2c@4 {
#address-cells = <1>;
#size-cells = <0>;
reg = <4>;
- i2c-mux-idle-disconnect;
};
i2c@5 {
#address-cells = <1>;
#size-cells = <0>;
reg = <5>;
- i2c-mux-idle-disconnect;
ina230@40 { compatible = "ti,ina230"; reg = <0x40>; shunt-resistor = <5000>; };
ina230@41 { compatible = "ti,ina230"; reg = <0x41>; shunt-resistor = <5000>; };
#address-cells = <1>;
#size-cells = <0>;
reg = <6>;
- i2c-mux-idle-disconnect;
};
i2c@7 {
#address-cells = <1>;
#size-cells = <0>;
reg = <7>;
- i2c-mux-idle-disconnect;
u41: pca9575@20 {
compatible = "nxp,pca9575";
#address-cells = <1>;
#size-cells = <0>;
pinctrl-0 = <&emmc_gpio34 &gpclk2_gpio43>;
+ bus-width = <4>;
mmc-pwrseq = <&wifi_pwrseq>;
non-removable;
status = "okay";
reg = <0 0x40000000>;
};
+ leds {
+ /*
+ * Since there is no upstream GPIO driver yet,
+ * remove the incomplete node.
+ */
+ /delete-node/ act;
+ };
+
reg_3v3: fixed-regulator {
compatible = "regulator-fixed";
regulator-name = "3V3";
vin-supply = <&sw1c_reg>;
};
+&snvs_poweroff {
+ status = "okay";
+};
+
&iomuxc {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_hog>;
compatible = "fsl,imx7d-gpt", "fsl,imx6sx-gpt";
reg = <0x302d0000 0x10000>;
interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clks IMX7D_CLK_DUMMY>,
+ clocks = <&clks IMX7D_GPT1_ROOT_CLK>,
<&clks IMX7D_GPT1_ROOT_CLK>;
clock-names = "ipg", "per";
};
compatible = "fsl,imx7d-gpt", "fsl,imx6sx-gpt";
reg = <0x302e0000 0x10000>;
interrupts = <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clks IMX7D_CLK_DUMMY>,
+ clocks = <&clks IMX7D_GPT2_ROOT_CLK>,
<&clks IMX7D_GPT2_ROOT_CLK>;
clock-names = "ipg", "per";
status = "disabled";
compatible = "fsl,imx7d-gpt", "fsl,imx6sx-gpt";
reg = <0x302f0000 0x10000>;
interrupts = <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clks IMX7D_CLK_DUMMY>,
+ clocks = <&clks IMX7D_GPT3_ROOT_CLK>,
<&clks IMX7D_GPT3_ROOT_CLK>;
clock-names = "ipg", "per";
status = "disabled";
compatible = "fsl,imx7d-gpt", "fsl,imx6sx-gpt";
reg = <0x30300000 0x10000>;
interrupts = <GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clks IMX7D_CLK_DUMMY>,
+ clocks = <&clks IMX7D_GPT4_ROOT_CLK>,
<&clks IMX7D_GPT4_ROOT_CLK>;
clock-names = "ipg", "per";
status = "disabled";
&twl_gpio {
ti,use-leds;
};
+
+&twl_keypad {
+ status = "disabled";
+};
compatible = "ti,wl1285", "ti,wl1283";
reg = <2>;
/* gpio_100 with gpmc_wait2 pad as wakeirq */
- interrupts-extended = <&gpio4 4 IRQ_TYPE_EDGE_RISING>,
+ interrupts-extended = <&gpio4 4 IRQ_TYPE_LEVEL_HIGH>,
<&omap4_pmx_core 0x4e>;
interrupt-names = "irq", "wakeup";
ref-clock-frequency = <26000000>;
compatible = "ti,wl1271";
reg = <2>;
/* gpio_53 with gpmc_ncs3 pad as wakeup */
- interrupts-extended = <&gpio2 21 IRQ_TYPE_EDGE_RISING>,
+ interrupts-extended = <&gpio2 21 IRQ_TYPE_LEVEL_HIGH>,
<&omap4_pmx_core 0x3a>;
interrupt-names = "irq", "wakeup";
ref-clock-frequency = <38400000>;
compatible = "ti,wl1281";
reg = <2>;
interrupt-parent = <&gpio1>;
- interrupts = <21 IRQ_TYPE_EDGE_RISING>; /* gpio 53 */
+ interrupts = <21 IRQ_TYPE_LEVEL_HIGH>; /* gpio 53 */
ref-clock-frequency = <26000000>;
tcxo-clock-frequency = <26000000>;
};
compatible = "ti,wl1271";
reg = <2>;
interrupt-parent = <&gpio2>;
- interrupts = <9 IRQ_TYPE_EDGE_RISING>; /* gpio 41 */
+ interrupts = <9 IRQ_TYPE_LEVEL_HIGH>; /* gpio 41 */
ref-clock-frequency = <38400000>;
};
};
pinctrl-names = "default";
pinctrl-0 = <&wlcore_irq_pin>;
interrupt-parent = <&gpio1>;
- interrupts = <14 IRQ_TYPE_EDGE_RISING>; /* gpio 14 */
+ interrupts = <14 IRQ_TYPE_LEVEL_HIGH>; /* gpio 14 */
ref-clock-frequency = <26000000>;
};
};
};
};
- gpu_cm: clock-controller@1500 {
+ gpu_cm: gpu_cm@1500 {
compatible = "ti,omap4-cm";
reg = <0x1500 0x100>;
#address-cells = <1>;
<STM32_PINMUX('F', 6, AF9)>; /* QSPI_BK1_IO3 */
bias-disable;
drive-push-pull;
- slew-rate = <3>;
+ slew-rate = <1>;
};
pins2 {
pinmux = <STM32_PINMUX('B', 6, AF10)>; /* QSPI_BK1_NCS */
bias-pull-up;
drive-push-pull;
- slew-rate = <3>;
+ slew-rate = <1>;
};
};
<STM32_PINMUX('G', 7, AF11)>; /* QSPI_BK2_IO3 */
bias-disable;
drive-push-pull;
- slew-rate = <3>;
+ slew-rate = <1>;
};
pins2 {
pinmux = <STM32_PINMUX('C', 0, AF10)>; /* QSPI_BK2_NCS */
bias-pull-up;
drive-push-pull;
- slew-rate = <3>;
+ slew-rate = <1>;
};
};
compatible = "allwinner,sun7i-a20-csi0";
reg = <0x01c09000 0x1000>;
interrupts = <GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&ccu CLK_AHB_CSI0>, <&ccu CLK_CSI0>,
- <&ccu CLK_CSI_SCLK>, <&ccu CLK_DRAM_CSI0>;
- clock-names = "bus", "mod", "isp", "ram";
+ clocks = <&ccu CLK_AHB_CSI0>, <&ccu CLK_CSI_SCLK>, <&ccu CLK_DRAM_CSI0>;
+ clock-names = "bus", "isp", "ram";
resets = <&ccu RST_CSI0>;
status = "disabled";
};
#address-cells = <1>;
#size-cells = <0>;
reg = <0x70>;
+ i2c-mux-idle-disconnect;
sff0_i2c: i2c@1 {
#address-cells = <1>;
reg = <0x71>;
#address-cells = <1>;
#size-cells = <0>;
+ i2c-mux-idle-disconnect;
sff5_i2c: i2c@1 {
#address-cells = <1>;
CONFIG_FIRMWARE_EDID=y
CONFIG_FB_DA8XX=y
CONFIG_BACKLIGHT_PWM=m
+CONFIG_BACKLIGHT_GPIO=m
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_LOGO=y
CONFIG_SOUND=m
CONFIG_VIDEO_OV5645=m
CONFIG_IMX_IPUV3_CORE=y
CONFIG_DRM=y
+CONFIG_DRM_MSM=y
CONFIG_DRM_PANEL_LVDS=y
CONFIG_DRM_PANEL_SIMPLE=y
CONFIG_DRM_PANEL_SEIKO_43WVF1G=y
CONFIG_DRM_OMAP_CONNECTOR_ANALOG_TV=m
CONFIG_DRM_OMAP_PANEL_DPI=m
CONFIG_DRM_OMAP_PANEL_DSI_CM=m
-CONFIG_DRM_OMAP_PANEL_SONY_ACX565AKM=m
-CONFIG_DRM_OMAP_PANEL_LGPHILIPS_LB035Q02=m
-CONFIG_DRM_OMAP_PANEL_SHARP_LS037V7DW01=m
-CONFIG_DRM_OMAP_PANEL_TPO_TD028TTEC1=m
-CONFIG_DRM_OMAP_PANEL_TPO_TD043MTEA1=m
-CONFIG_DRM_OMAP_PANEL_NEC_NL8048HL11=m
CONFIG_DRM_TILCDC=m
CONFIG_DRM_PANEL_SIMPLE=m
CONFIG_DRM_TI_TFP410=m
+CONFIG_DRM_PANEL_LG_LB035Q02=m
+CONFIG_DRM_PANEL_NEC_NL8048HL11=m
+CONFIG_DRM_PANEL_SHARP_LS037V7DW01=m
+CONFIG_DRM_PANEL_SONY_ACX565AKM=m
+CONFIG_DRM_PANEL_TPO_TD028TTEC1=m
+CONFIG_DRM_PANEL_TPO_TD043MTEA1=m
CONFIG_FB=y
CONFIG_FIRMWARE_EDID=y
CONFIG_FB_MODE_HELPERS=y
#ifndef __ASSEMBLY__
#ifdef CONFIG_CPU_CP15_MMU
-static inline unsigned int get_domain(void)
+static __always_inline unsigned int get_domain(void)
{
unsigned int domain;
return domain;
}
-static inline void set_domain(unsigned val)
+static __always_inline void set_domain(unsigned int val)
{
asm volatile(
"mcr p15, 0, %0, c3, c0 @ set domain"
isb();
}
#else
-static inline unsigned int get_domain(void)
+static __always_inline unsigned int get_domain(void)
{
return 0;
}
-static inline void set_domain(unsigned val)
+static __always_inline void set_domain(unsigned int val)
{
}
#endif
* perform such accesses (eg, via list poison values) which could then
* be exploited for priviledge escalation.
*/
-static inline unsigned int uaccess_save_and_enable(void)
+static __always_inline unsigned int uaccess_save_and_enable(void)
{
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
unsigned int old_domain = get_domain();
#endif
}
-static inline void uaccess_restore(unsigned int flags)
+static __always_inline void uaccess_restore(unsigned int flags)
{
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
/* Restore the user access mask */
* The following fragment of code is executed with the MMU on in MMU mode,
* and uses absolute addresses; this is not position independent.
*
- * r0 = cp#15 control register
+ * r0 = cp#15 control register (exc_ret for M-class)
* r1 = machine ID
* r2 = atags/dtb pointer
* r9 = processor ID
#ifdef CONFIG_CPU_CP15
.long cr_alignment @ r3
#else
- .long 0 @ r3
+M_CLASS(.long exc_ret) @ r3
+AR_CLASS(.long 0) @ r3
#endif
.size __mmap_switched_data, . - __mmap_switched_data
bic r0, r0, #V7M_SCB_CCR_IC
#endif
str r0, [r12, V7M_SCB_CCR]
+ /* Pass exc_ret to __mmap_switched */
+ mov r0, r10
#endif /* CONFIG_CPU_CP15 elif CONFIG_CPU_V7M */
ret lr
ENDPROC(__after_proc_init)
};
static const struct dma_slave_map dm365_edma_map[] = {
- { "davinci-mcbsp.0", "tx", EDMA_FILTER_PARAM(0, 2) },
- { "davinci-mcbsp.0", "rx", EDMA_FILTER_PARAM(0, 3) },
+ { "davinci-mcbsp", "tx", EDMA_FILTER_PARAM(0, 2) },
+ { "davinci-mcbsp", "rx", EDMA_FILTER_PARAM(0, 3) },
{ "davinci_voicecodec", "tx", EDMA_FILTER_PARAM(0, 2) },
{ "davinci_voicecodec", "rx", EDMA_FILTER_PARAM(0, 3) },
{ "spi_davinci.2", "tx", EDMA_FILTER_PARAM(0, 10) },
.reset_name = "mmu",
.assert_reset = omap_device_assert_hardreset,
.deassert_reset = omap_device_deassert_hardreset,
+ .device_enable = omap_device_enable,
+ .device_idle = omap_device_idle,
+};
+
+static struct iommu_platform_data omap3_iommu_isp_pdata = {
+ .device_enable = omap_device_enable,
+ .device_idle = omap_device_idle,
};
static int omap3_sbc_t3730_twl_callback(struct device *dev,
.reset_name = "mmu_cache",
.assert_reset = omap_device_assert_hardreset,
.deassert_reset = omap_device_deassert_hardreset,
+ .device_enable = omap_device_enable,
+ .device_idle = omap_device_idle,
};
#endif
#ifdef CONFIG_ARCH_OMAP3
OF_DEV_AUXDATA("ti,omap2-iommu", 0x5d000000, "5d000000.mmu",
&omap3_iommu_pdata),
+ OF_DEV_AUXDATA("ti,omap2-iommu", 0x480bd400, "480bd400.mmu",
+ &omap3_iommu_isp_pdata),
OF_DEV_AUXDATA("ti,omap3-smartreflex-core", 0x480cb000,
"480cb000.smartreflex", &omap_sr_pdata[OMAP_SR_CORE]),
OF_DEV_AUXDATA("ti,omap3-smartreflex-mpu-iva", 0x480c9000,
__put32_unaligned_check("strbt", val, addr)
static void
-do_alignment_finish_ldst(unsigned long addr, unsigned long instr, struct pt_regs *regs, union offset_union offset)
+do_alignment_finish_ldst(unsigned long addr, u32 instr, struct pt_regs *regs, union offset_union offset)
{
if (!LDST_U_BIT(instr))
offset.un = -offset.un;
}
static int
-do_alignment_ldrhstrh(unsigned long addr, unsigned long instr, struct pt_regs *regs)
+do_alignment_ldrhstrh(unsigned long addr, u32 instr, struct pt_regs *regs)
{
unsigned int rd = RD_BITS(instr);
}
static int
-do_alignment_ldrdstrd(unsigned long addr, unsigned long instr,
- struct pt_regs *regs)
+do_alignment_ldrdstrd(unsigned long addr, u32 instr, struct pt_regs *regs)
{
unsigned int rd = RD_BITS(instr);
unsigned int rd2;
}
static int
-do_alignment_ldrstr(unsigned long addr, unsigned long instr, struct pt_regs *regs)
+do_alignment_ldrstr(unsigned long addr, u32 instr, struct pt_regs *regs)
{
unsigned int rd = RD_BITS(instr);
* PU = 10 A B
*/
static int
-do_alignment_ldmstm(unsigned long addr, unsigned long instr, struct pt_regs *regs)
+do_alignment_ldmstm(unsigned long addr, u32 instr, struct pt_regs *regs)
{
unsigned int rd, rn, correction, nr_regs, regbits;
unsigned long eaddr, newaddr;
* processor for us.
*/
if (addr != eaddr) {
- pr_err("LDMSTM: PC = %08lx, instr = %08lx, "
+ pr_err("LDMSTM: PC = %08lx, instr = %08x, "
"addr = %08lx, eaddr = %08lx\n",
instruction_pointer(regs), instr, addr, eaddr);
show_regs(regs);
* 2. Register name Rt from ARMv7 is same as Rd from ARMv6 (Rd is Rt)
*/
static void *
-do_alignment_t32_to_handler(unsigned long *pinstr, struct pt_regs *regs,
+do_alignment_t32_to_handler(u32 *pinstr, struct pt_regs *regs,
union offset_union *poffset)
{
- unsigned long instr = *pinstr;
+ u32 instr = *pinstr;
u16 tinst1 = (instr >> 16) & 0xffff;
u16 tinst2 = instr & 0xffff;
return NULL;
}
+static int alignment_get_arm(struct pt_regs *regs, u32 *ip, u32 *inst)
+{
+ u32 instr = 0;
+ int fault;
+
+ if (user_mode(regs))
+ fault = get_user(instr, ip);
+ else
+ fault = probe_kernel_address(ip, instr);
+
+ *inst = __mem_to_opcode_arm(instr);
+
+ return fault;
+}
+
+static int alignment_get_thumb(struct pt_regs *regs, u16 *ip, u16 *inst)
+{
+ u16 instr = 0;
+ int fault;
+
+ if (user_mode(regs))
+ fault = get_user(instr, ip);
+ else
+ fault = probe_kernel_address(ip, instr);
+
+ *inst = __mem_to_opcode_thumb16(instr);
+
+ return fault;
+}
+
static int
do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
{
union offset_union uninitialized_var(offset);
- unsigned long instr = 0, instrptr;
- int (*handler)(unsigned long addr, unsigned long instr, struct pt_regs *regs);
+ unsigned long instrptr;
+ int (*handler)(unsigned long addr, u32 instr, struct pt_regs *regs);
unsigned int type;
- unsigned int fault;
+ u32 instr = 0;
u16 tinstr = 0;
int isize = 4;
int thumb2_32b = 0;
+ int fault;
if (interrupts_enabled(regs))
local_irq_enable();
if (thumb_mode(regs)) {
u16 *ptr = (u16 *)(instrptr & ~1);
- fault = probe_kernel_address(ptr, tinstr);
- tinstr = __mem_to_opcode_thumb16(tinstr);
+
+ fault = alignment_get_thumb(regs, ptr, &tinstr);
if (!fault) {
if (cpu_architecture() >= CPU_ARCH_ARMv7 &&
IS_T32(tinstr)) {
/* Thumb-2 32-bit */
- u16 tinst2 = 0;
- fault = probe_kernel_address(ptr + 1, tinst2);
- tinst2 = __mem_to_opcode_thumb16(tinst2);
+ u16 tinst2;
+ fault = alignment_get_thumb(regs, ptr + 1, &tinst2);
instr = __opcode_thumb32_compose(tinstr, tinst2);
thumb2_32b = 1;
} else {
}
}
} else {
- fault = probe_kernel_address((void *)instrptr, instr);
- instr = __mem_to_opcode_arm(instr);
+ fault = alignment_get_arm(regs, (void *)instrptr, &instr);
}
if (fault) {
* Oops, we didn't handle the instruction.
*/
pr_err("Alignment trap: not handling instruction "
- "%0*lx at [<%08lx>]\n",
+ "%0*x at [<%08lx>]\n",
isize << 1,
isize == 2 ? tinstr : instr, instrptr);
ai_skipped += 1;
ai_user += 1;
if (ai_usermode & UM_WARN)
- printk("Alignment trap: %s (%d) PC=0x%08lx Instr=0x%0*lx "
+ printk("Alignment trap: %s (%d) PC=0x%08lx Instr=0x%0*x "
"Address=0x%08lx FSR 0x%03x\n", current->comm,
task_pid_nr(current), instrptr,
isize << 1,
dsb
mov r6, lr @ save LR
ldr sp, =init_thread_union + THREAD_START_SP
- stmia sp, {r0-r3, r12}
cpsie i
svc #0
1: cpsid i
- ldr r0, =exc_ret
- orr lr, lr, #EXC_RET_THREADMODE_PROCESSSTACK
- str lr, [r0]
+ /* Calculate exc_ret */
+ orr r10, lr, #EXC_RET_THREADMODE_PROCESSSTACK
ldmia sp, {r0-r3, r12}
str r5, [r12, #11 * 4] @ restore the original SVC vector entry
mov lr, r6 @ restore LR
reg = <1>;
};
};
+
+®_dc1sw {
+ /*
+ * Ethernet PHY needs 30ms to properly power up and some more
+ * to initialize. 100ms should be plenty of time to finish
+ * whole process.
+ */
+ regulator-enable-ramp-delay = <100000>;
+};
};
®_dc1sw {
+ /*
+ * Ethernet PHY needs 30ms to properly power up and some more
+ * to initialize. 100ms should be plenty of time to finish
+ * whole process.
+ */
+ regulator-enable-ramp-delay = <100000>;
regulator-name = "vcc-phy";
};
clock-output-names = "ext-osc32k";
};
- pmu {
- compatible = "arm,cortex-a53-pmu";
- interrupts = <GIC_SPI 152 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 153 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 154 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 155 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-affinity = <&cpu0>, <&cpu1>, <&cpu2>, <&cpu3>;
- };
-
psci {
compatible = "arm,psci-0.2";
method = "smc";
pinmux: pinmux@14029c {
compatible = "pinctrl-single";
- reg = <0x0014029c 0x250>;
+ reg = <0x0014029c 0x26c>;
#address-cells = <1>;
#size-cells = <1>;
pinctrl-single,register-width = <32>;
pinctrl-single,function-mask = <0xf>;
pinctrl-single,gpio-range = <
- &range 0 154 MODE_GPIO
+ &range 0 91 MODE_GPIO
+ &range 95 60 MODE_GPIO
>;
range: gpio-range {
#pinctrl-single,gpio-range-cells = <3>;
<&pinmux 108 16 27>,
<&pinmux 135 77 6>,
<&pinmux 141 67 4>,
- <&pinmux 145 149 6>,
- <&pinmux 151 91 4>;
+ <&pinmux 145 149 6>;
};
i2c1: i2c@e0000 {
i-cache-line-size = <64>;
i-cache-sets = <192>;
next-level-cache = <&cluster0_l2>;
- cpu-idle-states = <&cpu_pw20>;
+ cpu-idle-states = <&cpu_pw15>;
};
cpu@1 {
i-cache-line-size = <64>;
i-cache-sets = <192>;
next-level-cache = <&cluster0_l2>;
- cpu-idle-states = <&cpu_pw20>;
+ cpu-idle-states = <&cpu_pw15>;
};
cpu@100 {
i-cache-line-size = <64>;
i-cache-sets = <192>;
next-level-cache = <&cluster1_l2>;
- cpu-idle-states = <&cpu_pw20>;
+ cpu-idle-states = <&cpu_pw15>;
};
cpu@101 {
i-cache-line-size = <64>;
i-cache-sets = <192>;
next-level-cache = <&cluster1_l2>;
- cpu-idle-states = <&cpu_pw20>;
+ cpu-idle-states = <&cpu_pw15>;
};
cpu@200 {
i-cache-line-size = <64>;
i-cache-sets = <192>;
next-level-cache = <&cluster2_l2>;
- cpu-idle-states = <&cpu_pw20>;
+ cpu-idle-states = <&cpu_pw15>;
};
cpu@201 {
i-cache-line-size = <64>;
i-cache-sets = <192>;
next-level-cache = <&cluster2_l2>;
- cpu-idle-states = <&cpu_pw20>;
+ cpu-idle-states = <&cpu_pw15>;
};
cpu@300 {
i-cache-line-size = <64>;
i-cache-sets = <192>;
next-level-cache = <&cluster3_l2>;
- cpu-idle-states = <&cpu_pw20>;
+ cpu-idle-states = <&cpu_pw15>;
};
cpu@301 {
i-cache-line-size = <64>;
i-cache-sets = <192>;
next-level-cache = <&cluster3_l2>;
- cpu-idle-states = <&cpu_pw20>;
+ cpu-idle-states = <&cpu_pw15>;
};
cpu@400 {
i-cache-line-size = <64>;
i-cache-sets = <192>;
next-level-cache = <&cluster4_l2>;
- cpu-idle-states = <&cpu_pw20>;
+ cpu-idle-states = <&cpu_pw15>;
};
cpu@401 {
i-cache-line-size = <64>;
i-cache-sets = <192>;
next-level-cache = <&cluster4_l2>;
- cpu-idle-states = <&cpu_pw20>;
+ cpu-idle-states = <&cpu_pw15>;
};
cpu@500 {
i-cache-line-size = <64>;
i-cache-sets = <192>;
next-level-cache = <&cluster5_l2>;
- cpu-idle-states = <&cpu_pw20>;
+ cpu-idle-states = <&cpu_pw15>;
};
cpu@501 {
i-cache-line-size = <64>;
i-cache-sets = <192>;
next-level-cache = <&cluster5_l2>;
- cpu-idle-states = <&cpu_pw20>;
+ cpu-idle-states = <&cpu_pw15>;
};
cpu@600 {
i-cache-line-size = <64>;
i-cache-sets = <192>;
next-level-cache = <&cluster6_l2>;
- cpu-idle-states = <&cpu_pw20>;
+ cpu-idle-states = <&cpu_pw15>;
};
cpu@601 {
i-cache-line-size = <64>;
i-cache-sets = <192>;
next-level-cache = <&cluster6_l2>;
- cpu-idle-states = <&cpu_pw20>;
+ cpu-idle-states = <&cpu_pw15>;
};
cpu@700 {
i-cache-line-size = <64>;
i-cache-sets = <192>;
next-level-cache = <&cluster7_l2>;
- cpu-idle-states = <&cpu_pw20>;
+ cpu-idle-states = <&cpu_pw15>;
};
cpu@701 {
i-cache-line-size = <64>;
i-cache-sets = <192>;
next-level-cache = <&cluster7_l2>;
- cpu-idle-states = <&cpu_pw20>;
+ cpu-idle-states = <&cpu_pw15>;
};
cluster0_l2: l2-cache0 {
cache-level = <2>;
};
- cpu_pw20: cpu-pw20 {
+ cpu_pw15: cpu-pw15 {
compatible = "arm,idle-state";
- idle-state-name = "PW20";
+ idle-state-name = "PW15";
arm,psci-suspend-param = <0x0>;
entry-latency-us = <2000>;
exit-latency-us = <2000>;
compatible = "fsl,imx8mm-usdhc", "fsl,imx7d-usdhc";
reg = <0x30b40000 0x10000>;
interrupts = <GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clk IMX8MM_CLK_DUMMY>,
+ clocks = <&clk IMX8MM_CLK_IPG_ROOT>,
<&clk IMX8MM_CLK_NAND_USDHC_BUS>,
<&clk IMX8MM_CLK_USDHC1_ROOT>;
clock-names = "ipg", "ahb", "per";
compatible = "fsl,imx8mm-usdhc", "fsl,imx7d-usdhc";
reg = <0x30b50000 0x10000>;
interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clk IMX8MM_CLK_DUMMY>,
+ clocks = <&clk IMX8MM_CLK_IPG_ROOT>,
<&clk IMX8MM_CLK_NAND_USDHC_BUS>,
<&clk IMX8MM_CLK_USDHC2_ROOT>;
clock-names = "ipg", "ahb", "per";
compatible = "fsl,imx8mm-usdhc", "fsl,imx7d-usdhc";
reg = <0x30b60000 0x10000>;
interrupts = <GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clk IMX8MM_CLK_DUMMY>,
+ clocks = <&clk IMX8MM_CLK_IPG_ROOT>,
<&clk IMX8MM_CLK_NAND_USDHC_BUS>,
<&clk IMX8MM_CLK_USDHC3_ROOT>;
clock-names = "ipg", "ahb", "per";
compatible = "fsl,imx8mn-usdhc", "fsl,imx7d-usdhc";
reg = <0x30b40000 0x10000>;
interrupts = <GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clk IMX8MN_CLK_DUMMY>,
+ clocks = <&clk IMX8MN_CLK_IPG_ROOT>,
<&clk IMX8MN_CLK_NAND_USDHC_BUS>,
<&clk IMX8MN_CLK_USDHC1_ROOT>;
clock-names = "ipg", "ahb", "per";
compatible = "fsl,imx8mn-usdhc", "fsl,imx7d-usdhc";
reg = <0x30b50000 0x10000>;
interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clk IMX8MN_CLK_DUMMY>,
+ clocks = <&clk IMX8MN_CLK_IPG_ROOT>,
<&clk IMX8MN_CLK_NAND_USDHC_BUS>,
<&clk IMX8MN_CLK_USDHC2_ROOT>;
clock-names = "ipg", "ahb", "per";
compatible = "fsl,imx8mn-usdhc", "fsl,imx7d-usdhc";
reg = <0x30b60000 0x10000>;
interrupts = <GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clk IMX8MN_CLK_DUMMY>,
+ clocks = <&clk IMX8MN_CLK_IPG_ROOT>,
<&clk IMX8MN_CLK_NAND_USDHC_BUS>,
<&clk IMX8MN_CLK_USDHC3_ROOT>;
clock-names = "ipg", "ahb", "per";
regulator-min-microvolt = <900000>;
regulator-max-microvolt = <1000000>;
gpios = <&gpio3 19 GPIO_ACTIVE_HIGH>;
- states = <1000000 0x0
- 900000 0x1>;
+ states = <1000000 0x1
+ 900000 0x0>;
regulator-always-on;
};
};
"fsl,imx7d-usdhc";
reg = <0x30b40000 0x10000>;
interrupts = <GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clk IMX8MQ_CLK_DUMMY>,
+ clocks = <&clk IMX8MQ_CLK_IPG_ROOT>,
<&clk IMX8MQ_CLK_NAND_USDHC_BUS>,
<&clk IMX8MQ_CLK_USDHC1_ROOT>;
clock-names = "ipg", "ahb", "per";
"fsl,imx7d-usdhc";
reg = <0x30b50000 0x10000>;
interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clk IMX8MQ_CLK_DUMMY>,
+ clocks = <&clk IMX8MQ_CLK_IPG_ROOT>,
<&clk IMX8MQ_CLK_NAND_USDHC_BUS>,
<&clk IMX8MQ_CLK_USDHC2_ROOT>;
clock-names = "ipg", "ahb", "per";
gpio = <&gpiosb 0 GPIO_ACTIVE_HIGH>;
};
- usb3_phy: usb3-phy {
- compatible = "usb-nop-xceiv";
- vcc-supply = <&exp_usb3_vbus>;
- };
-
vsdc_reg: vsdc-reg {
compatible = "regulator-gpio";
regulator-name = "vsdc";
status = "okay";
};
+&comphy2 {
+ connector {
+ compatible = "usb-a-connector";
+ phy-supply = <&exp_usb3_vbus>;
+ };
+};
+
&usb3 {
status = "okay";
phys = <&comphy2 0>;
- usb-phy = <&usb3_phy>;
};
&mdio {
power-supply = <&pp3300_disp>;
panel-timing {
- clock-frequency = <266604720>;
+ clock-frequency = <266666667>;
hactive = <2400>;
hfront-porch = <48>;
hback-porch = <84>;
status = "okay";
u2phy0_host: host-port {
- phy-supply = <&vcc5v0_host>;
+ phy-supply = <&vcc5v0_typec>;
status = "okay";
};
&usbdrd_dwc3_0 {
status = "okay";
- dr_mode = "otg";
+ dr_mode = "host";
};
&usbdrd3_1 {
regulator-always-on;
regulator-boot-on;
regulator-min-microvolt = <800000>;
- regulator-max-microvolt = <1400000>;
+ regulator-max-microvolt = <1700000>;
vin-supply = <&vcc5v0_sys>;
};
};
rk808: pmic@1b {
compatible = "rockchip,rk808";
reg = <0x1b>;
- interrupt-parent = <&gpio1>;
- interrupts = <21 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-parent = <&gpio3>;
+ interrupts = <10 IRQ_TYPE_LEVEL_LOW>;
#clock-cells = <1>;
clock-output-names = "xin32k", "rk808-clkout2";
pinctrl-names = "default";
pmic {
pmic_int_l: pmic-int-l {
- rockchip,pins = <1 RK_PC5 RK_FUNC_GPIO &pcfg_pull_up>;
+ rockchip,pins = <3 RK_PB2 RK_FUNC_GPIO &pcfg_pull_up>;
};
vsel1_gpio: vsel1-gpio {
&sdmmc {
bus-width = <4>;
- cap-mmc-highspeed;
cap-sd-highspeed;
cd-gpios = <&gpio0 7 GPIO_ACTIVE_LOW>;
disable-wp;
&sdhci {
bus-width = <8>;
- mmc-hs400-1_8v;
- mmc-hs400-enhanced-strobe;
+ mmc-hs200-1_8v;
non-removable;
status = "okay";
};
#define CAVIUM_CPU_PART_THUNDERX_83XX 0x0A3
#define CAVIUM_CPU_PART_THUNDERX2 0x0AF
+#define BRCM_CPU_PART_BRAHMA_B53 0x100
#define BRCM_CPU_PART_VULCAN 0x516
#define QCOM_CPU_PART_FALKOR_V1 0x800
#define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
#define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX)
#define MIDR_CAVIUM_THUNDERX2 MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX2)
+#define MIDR_BRAHMA_B53 MIDR_CPU_MODEL(ARM_CPU_IMP_BRCM, BRCM_CPU_PART_BRAHMA_B53)
#define MIDR_BRCM_VULCAN MIDR_CPU_MODEL(ARM_CPU_IMP_BRCM, BRCM_CPU_PART_VULCAN)
#define MIDR_QCOM_FALKOR_V1 MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR_V1)
#define MIDR_QCOM_FALKOR MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR)
#define PROT_DEFAULT (_PROT_DEFAULT | PTE_MAYBE_NG)
#define PROT_SECT_DEFAULT (_PROT_SECT_DEFAULT | PMD_MAYBE_NG)
-#define PROT_DEVICE_nGnRnE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRnE))
-#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRE))
-#define PROT_NORMAL_NC (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_NC))
-#define PROT_NORMAL_WT (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_WT))
-#define PROT_NORMAL (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL))
+#define PROT_DEVICE_nGnRnE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRnE))
+#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRE))
+#define PROT_NORMAL_NC (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_NC))
+#define PROT_NORMAL_WT (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_WT))
+#define PROT_NORMAL (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL))
#define PROT_SECT_DEVICE_nGnRE (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_DEVICE_nGnRE))
#define PROT_SECT_NORMAL (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL))
#define PAGE_S2_DEVICE __pgprot(_PROT_DEFAULT | PAGE_S2_MEMATTR(DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_S2_XN)
#define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN)
-#define PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE)
-#define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE)
+/* shared+writable pages are clean by default, hence PTE_RDONLY|PTE_WRITE */
+#define PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE)
+#define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_WRITE)
#define PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN)
#define PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN)
#define PAGE_EXECONLY __pgprot(_PAGE_DEFAULT | PTE_RDONLY | PTE_NG | PTE_PXN)
MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
+ MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
{},
};
MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
+ MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
{ /* sentinel */ }
};
#endif
#ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI
-
-static const struct midr_range arm64_repeat_tlbi_cpus[] = {
+static const struct arm64_cpu_capabilities arm64_repeat_tlbi_list[] = {
#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009
- MIDR_RANGE(MIDR_QCOM_FALKOR_V1, 0, 0, 0, 0),
+ {
+ ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0)
+ },
+ {
+ .midr_range.model = MIDR_QCOM_KRYO,
+ .matches = is_kryo_midr,
+ },
#endif
#ifdef CONFIG_ARM64_ERRATUM_1286807
- MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 0),
+ {
+ ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 0),
+ },
#endif
{},
};
-
#endif
#ifdef CONFIG_CAVIUM_ERRATUM_27456
};
#endif
+#ifdef CONFIG_ARM64_ERRATUM_845719
+static const struct midr_range erratum_845719_list[] = {
+ /* Cortex-A53 r0p[01234] */
+ MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
+ /* Brahma-B53 r0p[0] */
+ MIDR_REV(MIDR_BRAHMA_B53, 0, 0),
+ {},
+};
+#endif
+
+#ifdef CONFIG_ARM64_ERRATUM_843419
+static const struct arm64_cpu_capabilities erratum_843419_list[] = {
+ {
+ /* Cortex-A53 r0p[01234] */
+ .matches = is_affected_midr_range,
+ ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
+ MIDR_FIXED(0x4, BIT(8)),
+ },
+ {
+ /* Brahma-B53 r0p[0] */
+ .matches = is_affected_midr_range,
+ ERRATA_MIDR_REV(MIDR_BRAHMA_B53, 0, 0),
+ },
+ {},
+};
+#endif
+
const struct arm64_cpu_capabilities arm64_errata[] = {
#ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
{
#endif
#ifdef CONFIG_ARM64_ERRATUM_843419
{
- /* Cortex-A53 r0p[01234] */
.desc = "ARM erratum 843419",
.capability = ARM64_WORKAROUND_843419,
- ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
- MIDR_FIXED(0x4, BIT(8)),
+ .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
+ .matches = cpucap_multi_entry_cap_matches,
+ .match_list = erratum_843419_list,
},
#endif
#ifdef CONFIG_ARM64_ERRATUM_845719
{
- /* Cortex-A53 r0p[01234] */
.desc = "ARM erratum 845719",
.capability = ARM64_WORKAROUND_845719,
- ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
+ ERRATA_MIDR_RANGE_LIST(erratum_845719_list),
},
#endif
#ifdef CONFIG_CAVIUM_ERRATUM_23154
{
.desc = "Qualcomm Technologies Falkor/Kryo erratum 1003",
.capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003,
+ .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
.matches = cpucap_multi_entry_cap_matches,
.match_list = qcom_erratum_1003_list,
},
{
.desc = "Qualcomm erratum 1009, ARM erratum 1286807",
.capability = ARM64_WORKAROUND_REPEAT_TLBI,
- ERRATA_MIDR_RANGE_LIST(arm64_repeat_tlbi_cpus),
+ .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
+ .matches = cpucap_multi_entry_cap_matches,
+ .match_list = arm64_repeat_tlbi_list,
},
#endif
#ifdef CONFIG_ARM64_ERRATUM_858921
*/
val = ((pmcr & ~ARMV8_PMU_PMCR_MASK)
| (ARMV8_PMU_PMCR_MASK & 0xdecafbad)) & (~ARMV8_PMU_PMCR_E);
+ if (!system_supports_32bit_el0())
+ val |= ARMV8_PMU_PMCR_LC;
__vcpu_sys_reg(vcpu, r->reg) = val;
}
val = __vcpu_sys_reg(vcpu, PMCR_EL0);
val &= ~ARMV8_PMU_PMCR_MASK;
val |= p->regval & ARMV8_PMU_PMCR_MASK;
+ if (!system_supports_32bit_el0())
+ val |= ARMV8_PMU_PMCR_LC;
__vcpu_sys_reg(vcpu, PMCR_EL0) = val;
kvm_pmu_handle_pmcr(vcpu, val);
kvm_vcpu_pmu_restore_guest(vcpu);
* Here we will start up CPU1 in the background and ask it to
* reconfigure itself then go back to sleep.
*/
- memcpy((void *)0xa0000200, &bmips_smp_movevec, 0x20);
+ memcpy((void *)0xa0000200, bmips_smp_movevec, 0x20);
__sync();
set_c0_cause(C_SW0);
cpumask_set_cpu(1, &bmips_booted_mask);
#endif
}
-extern char bmips_reset_nmi_vec;
-extern char bmips_reset_nmi_vec_end;
-extern char bmips_smp_movevec;
-extern char bmips_smp_int_vec;
-extern char bmips_smp_int_vec_end;
+extern char bmips_reset_nmi_vec[];
+extern char bmips_reset_nmi_vec_end[];
+extern char bmips_smp_movevec[];
+extern char bmips_smp_int_vec[];
+extern char bmips_smp_int_vec_end[];
extern int bmips_smp_enabled;
extern int bmips_cpu_offset;
#define VDSO_HAS_CLOCK_GETRES 1
+#define __VDSO_USE_SYSCALL ULLONG_MAX
+
#ifdef CONFIG_MIPS_CLOCK_VSYSCALL
static __always_inline long gettimeofday_fallback(
break;
#endif
default:
- cycle_now = 0;
+ cycle_now = __VDSO_USE_SYSCALL;
break;
}
static inline void bmips_nmi_handler_setup(void)
{
- bmips_wr_vec(BMIPS_NMI_RESET_VEC, &bmips_reset_nmi_vec,
- &bmips_reset_nmi_vec_end);
- bmips_wr_vec(BMIPS_WARM_RESTART_VEC, &bmips_smp_int_vec,
- &bmips_smp_int_vec_end);
+ bmips_wr_vec(BMIPS_NMI_RESET_VEC, bmips_reset_nmi_vec,
+ bmips_reset_nmi_vec_end);
+ bmips_wr_vec(BMIPS_WARM_RESTART_VEC, bmips_smp_int_vec,
+ bmips_smp_int_vec_end);
}
struct reset_vec_info {
int restore_scratch)
{
if (restore_scratch) {
+ /*
+ * Ensure the MFC0 below observes the value written to the
+ * KScratch register by the prior MTC0.
+ */
+ if (scratch_reg >= 0)
+ uasm_i_ehb(p);
+
/* Reset default page size */
if (PM_DEFAULT_MASK >> 16) {
uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16);
uasm_i_mtc0(p, 0, C0_PAGEMASK);
uasm_il_b(p, r, lid);
}
- if (scratch_reg >= 0) {
- uasm_i_ehb(p);
+ if (scratch_reg >= 0)
UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg);
- } else {
+ else
UASM_i_LW(p, 1, scratchpad_offset(0), 0);
- }
} else {
/* Reset default page size */
if (PM_DEFAULT_MASK >> 16) {
}
if (mode != not_refill && check_for_high_segbits) {
uasm_l_large_segbits_fault(l, *p);
+
+ if (mode == refill_scratch && scratch_reg >= 0)
+ uasm_i_ehb(p);
+
/*
* We get here if we are an xsseg address, or if we are
* an xuseg address above (PGDIR_SHIFT+PGDIR_BITS) boundary.
uasm_i_jr(p, ptr);
if (mode == refill_scratch) {
- if (scratch_reg >= 0) {
- uasm_i_ehb(p);
+ if (scratch_reg >= 0)
UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg);
- } else {
+ else
UASM_i_LW(p, 1, scratchpad_offset(0), 0);
- }
} else {
uasm_i_nop(p);
}
copy %rp, %r26
LDREG -FTRACE_FRAME_SIZE-PT_SZ_ALGN(%sp), %r25
ldo -8(%r25), %r25
- copy %r3, %arg2
+ ldo -FTRACE_FRAME_SIZE(%r1), %arg2
b,l ftrace_function_trampoline, %rp
copy %r1, %arg3 /* struct pt_regs */
struct kvmppc_xive *xive = dev->private;
struct kvmppc_xive_vcpu *xc;
int i, r = -EBUSY;
+ u32 vp_id;
pr_devel("connect_vcpu(cpu=%d)\n", cpu);
return -EPERM;
if (vcpu->arch.irq_type != KVMPPC_IRQ_DEFAULT)
return -EBUSY;
- if (kvmppc_xive_find_server(vcpu->kvm, cpu)) {
- pr_devel("Duplicate !\n");
- return -EEXIST;
- }
if (cpu >= (KVM_MAX_VCPUS * vcpu->kvm->arch.emul_smt_mode)) {
pr_devel("Out of bounds !\n");
return -EINVAL;
}
- xc = kzalloc(sizeof(*xc), GFP_KERNEL);
- if (!xc)
- return -ENOMEM;
/* We need to synchronize with queue provisioning */
mutex_lock(&xive->lock);
+
+ vp_id = kvmppc_xive_vp(xive, cpu);
+ if (kvmppc_xive_vp_in_use(xive->kvm, vp_id)) {
+ pr_devel("Duplicate !\n");
+ r = -EEXIST;
+ goto bail;
+ }
+
+ xc = kzalloc(sizeof(*xc), GFP_KERNEL);
+ if (!xc) {
+ r = -ENOMEM;
+ goto bail;
+ }
+
vcpu->arch.xive_vcpu = xc;
xc->xive = xive;
xc->vcpu = vcpu;
xc->server_num = cpu;
- xc->vp_id = kvmppc_xive_vp(xive, cpu);
+ xc->vp_id = vp_id;
xc->mfrr = 0xff;
xc->valid = true;
return xive->vp_base + kvmppc_pack_vcpu_id(xive->kvm, server);
}
+static inline bool kvmppc_xive_vp_in_use(struct kvm *kvm, u32 vp_id)
+{
+ struct kvm_vcpu *vcpu = NULL;
+ int i;
+
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ if (vcpu->arch.xive_vcpu && vp_id == vcpu->arch.xive_vcpu->vp_id)
+ return true;
+ }
+ return false;
+}
+
/*
* Mapping between guest priorities and host priorities
* is as follow.
struct kvmppc_xive *xive = dev->private;
struct kvmppc_xive_vcpu *xc = NULL;
int rc;
+ u32 vp_id;
pr_devel("native_connect_vcpu(server=%d)\n", server_num);
mutex_lock(&xive->lock);
- if (kvmppc_xive_find_server(vcpu->kvm, server_num)) {
+ vp_id = kvmppc_xive_vp(xive, server_num);
+ if (kvmppc_xive_vp_in_use(xive->kvm, vp_id)) {
pr_devel("Duplicate !\n");
rc = -EEXIST;
goto bail;
xc->vcpu = vcpu;
xc->server_num = server_num;
- xc->vp_id = kvmppc_xive_vp(xive, server_num);
+ xc->vp_id = vp_id;
xc->valid = true;
vcpu->arch.irq_type = KVMPPC_IRQ_XIVE;
#include <asm/asm.h>
-#ifdef CONFIG_GENERIC_BUG
#define __INSN_LENGTH_MASK _UL(0x3)
#define __INSN_LENGTH_32 _UL(0x3)
#define __COMPRESSED_INSN_MASK _UL(0xffff)
#define __BUG_INSN_32 _UL(0x00100073) /* ebreak */
#define __BUG_INSN_16 _UL(0x9002) /* c.ebreak */
-#ifndef __ASSEMBLY__
typedef u32 bug_insn_t;
#ifdef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
RISCV_SHORT " %2"
#endif
+#ifdef CONFIG_GENERIC_BUG
#define __BUG_FLAGS(flags) \
do { \
__asm__ __volatile__ ( \
"i" (flags), \
"i" (sizeof(struct bug_entry))); \
} while (0)
-
-#endif /* !__ASSEMBLY__ */
#else /* CONFIG_GENERIC_BUG */
-#ifndef __ASSEMBLY__
#define __BUG_FLAGS(flags) do { \
__asm__ __volatile__ ("ebreak\n"); \
} while (0)
-#endif /* !__ASSEMBLY__ */
#endif /* CONFIG_GENERIC_BUG */
#define BUG() do { \
#include <asm-generic/bug.h>
-#ifndef __ASSEMBLY__
-
struct pt_regs;
struct task_struct;
-extern void die(struct pt_regs *regs, const char *str);
-extern void do_trap(struct pt_regs *regs, int signo, int code,
- unsigned long addr);
-
-#endif /* !__ASSEMBLY__ */
+void die(struct pt_regs *regs, const char *str);
+void do_trap(struct pt_regs *regs, int signo, int code, unsigned long addr);
#endif /* _ASM_RISCV_BUG_H */
#include <linux/types.h>
#include <asm/mmiowb.h>
+#include <asm/pgtable.h>
extern void __iomem *ioremap(phys_addr_t offset, unsigned long size);
#endif
/*
+ * I/O port access constants.
+ */
+#define IO_SPACE_LIMIT (PCI_IO_SIZE - 1)
+#define PCI_IOBASE ((void __iomem *)PCI_IO_START)
+
+/*
* Emulation routines for the port-mapped IO space used by some PCI drivers.
* These are defined as being "fully synchronous", but also "not guaranteed to
* be fully ordered with respect to other memory and I/O operations". We're
#ifndef _ASM_RISCV_IRQ_H
#define _ASM_RISCV_IRQ_H
+#include <linux/interrupt.h>
+#include <linux/linkage.h>
+
#define NR_IRQS 0
void riscv_timer_interrupt(void);
#define _ASM_RISCV_PGTABLE_H
#include <linux/mmzone.h>
+#include <linux/sizes.h>
#include <asm/pgtable-bits.h>
#define VMALLOC_SIZE (KERN_VIRT_SIZE >> 1)
#define VMALLOC_END (PAGE_OFFSET - 1)
#define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE)
+#define PCI_IO_SIZE SZ_16M
/*
* Roughly size the vmemmap space to be large enough to fit enough
#define vmemmap ((struct page *)VMEMMAP_START)
-#define FIXADDR_TOP (VMEMMAP_START)
+#define PCI_IO_END VMEMMAP_START
+#define PCI_IO_START (PCI_IO_END - PCI_IO_SIZE)
+#define FIXADDR_TOP PCI_IO_START
+
#ifdef CONFIG_64BIT
#define FIXADDR_SIZE PMD_SIZE
#else
return __pte((pfn << _PAGE_PFN_SHIFT) | pgprot_val(prot));
}
-static inline pte_t mk_pte(struct page *page, pgprot_t prot)
-{
- return pfn_pte(page_to_pfn(page), prot);
-}
+#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
#define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
-#ifdef CONFIG_FLATMEM
#define kern_addr_valid(addr) (1) /* FIXME */
-#endif
extern void *dtb_early_va;
extern void setup_bootmem(void);
#ifndef _ASM_RISCV_SWITCH_TO_H
#define _ASM_RISCV_SWITCH_TO_H
+#include <linux/sched/task_stack.h>
#include <asm/processor.h>
#include <asm/ptrace.h>
#include <asm/csr.h>
#include <asm/processor.h>
#include <asm/hwcap.h>
#include <asm/smp.h>
+#include <asm/switch_to.h>
unsigned long elf_hwcap __read_mostly;
#ifdef CONFIG_FPU
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 SiFive, Inc.
+ */
+#ifndef __ASM_HEAD_H
+#define __ASM_HEAD_H
+
+#include <linux/linkage.h>
+#include <linux/init.h>
+
+extern atomic_t hart_lottery;
+
+asmlinkage void do_page_fault(struct pt_regs *regs);
+asmlinkage void __init setup_vm(uintptr_t dtb_pa);
+
+extern void *__cpu_up_stack_pointer[];
+extern void *__cpu_up_task_pointer[];
+
+void __init parse_dtb(void);
+
+#endif /* __ASM_HEAD_H */
return 0;
}
-asmlinkage void __irq_entry do_IRQ(struct pt_regs *regs)
+asmlinkage __visible void __irq_entry do_IRQ(struct pt_regs *regs)
{
struct pt_regs *old_regs = set_irq_regs(regs);
#include <linux/elf.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/moduleloader.h>
unsigned long module_emit_got_entry(struct module *mod, unsigned long val)
{
* Copyright (C) 2017 SiFive
*/
+#include <linux/cpu.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/sched/task_stack.h>
#include <asm/csr.h>
#include <asm/string.h>
#include <asm/switch_to.h>
+#include <asm/thread_info.h>
extern asmlinkage void ret_from_fork(void);
extern asmlinkage void ret_from_kernel_thread(void);
* Allows PTRACE_SYSCALL to work. These are called from entry.S in
* {handle,ret_from}_syscall.
*/
-void do_syscall_trace_enter(struct pt_regs *regs)
+__visible void do_syscall_trace_enter(struct pt_regs *regs)
{
if (test_thread_flag(TIF_SYSCALL_TRACE))
if (tracehook_report_syscall_entry(regs))
audit_syscall_entry(regs->a7, regs->a0, regs->a1, regs->a2, regs->a3);
}
-void do_syscall_trace_exit(struct pt_regs *regs)
+__visible void do_syscall_trace_exit(struct pt_regs *regs)
{
audit_syscall_exit(regs);
*/
#include <linux/reboot.h>
+#include <linux/pm.h>
#include <asm/sbi.h>
static void default_power_off(void)
#include <asm/tlbflush.h>
#include <asm/thread_info.h>
+#include "head.h"
+
#ifdef CONFIG_DUMMY_CONSOLE
struct screen_info screen_info = {
.orig_video_lines = 30,
#ifdef CONFIG_FPU
static long restore_fp_state(struct pt_regs *regs,
- union __riscv_fp_state *sc_fpregs)
+ union __riscv_fp_state __user *sc_fpregs)
{
long err;
struct __riscv_d_ext_state __user *state = &sc_fpregs->d;
}
static long save_fp_state(struct pt_regs *regs,
- union __riscv_fp_state *sc_fpregs)
+ union __riscv_fp_state __user *sc_fpregs)
{
long err;
struct __riscv_d_ext_state __user *state = &sc_fpregs->d;
* notification of userspace execution resumption
* - triggered by the _TIF_WORK_MASK flags
*/
-asmlinkage void do_notify_resume(struct pt_regs *regs,
- unsigned long thread_info_flags)
+asmlinkage __visible void do_notify_resume(struct pt_regs *regs,
+ unsigned long thread_info_flags)
{
/* Handle pending signal delivery */
if (thread_info_flags & _TIF_SIGPENDING)
* Copyright (C) 2017 SiFive
*/
+#include <linux/cpu.h>
#include <linux/interrupt.h>
+#include <linux/profile.h>
#include <linux/smp.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
#include <asm/tlbflush.h>
#include <asm/sections.h>
#include <asm/sbi.h>
+#include <asm/smp.h>
+
+#include "head.h"
void *__cpu_up_stack_pointer[NR_CPUS];
void *__cpu_up_task_pointer[NR_CPUS];
/*
* C entry point for a secondary processor.
*/
-asmlinkage void __init smp_callin(void)
+asmlinkage __visible void __init smp_callin(void)
{
struct mm_struct *mm = &init_mm;
#include <linux/syscalls.h>
#include <asm-generic/syscalls.h>
#include <asm/vdso.h>
+#include <asm/syscall.h>
#undef __SYSCALL
#define __SYSCALL(nr, call) [nr] = (call),
#include <linux/clocksource.h>
#include <linux/delay.h>
#include <asm/sbi.h>
+#include <asm/processor.h>
unsigned long riscv_timebase;
EXPORT_SYMBOL_GPL(riscv_timebase);
* Copyright (C) 2012 Regents of the University of California
*/
+#include <linux/cpu.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/sched.h>
}
#define DO_ERROR_INFO(name, signo, code, str) \
-asmlinkage void name(struct pt_regs *regs) \
+asmlinkage __visible void name(struct pt_regs *regs) \
{ \
do_trap_error(regs, signo, code, regs->sepc, "Oops - " str); \
}
DO_ERROR_INFO(do_trap_ecall_m,
SIGILL, ILL_ILLTRP, "environment call from M-mode");
-#ifdef CONFIG_GENERIC_BUG
static inline unsigned long get_break_insn_length(unsigned long pc)
{
bug_insn_t insn;
return 0;
return (((insn & __INSN_LENGTH_MASK) == __INSN_LENGTH_32) ? 4UL : 2UL);
}
-#endif /* CONFIG_GENERIC_BUG */
-asmlinkage void do_trap_break(struct pt_regs *regs)
+asmlinkage __visible void do_trap_break(struct pt_regs *regs)
{
- if (user_mode(regs)) {
- force_sig_fault(SIGTRAP, TRAP_BRKPT,
- (void __user *)(regs->sepc));
- return;
- }
-#ifdef CONFIG_GENERIC_BUG
- {
- enum bug_trap_type type;
-
- type = report_bug(regs->sepc, regs);
- if (type == BUG_TRAP_TYPE_WARN) {
- regs->sepc += get_break_insn_length(regs->sepc);
- return;
- }
- }
-#endif /* CONFIG_GENERIC_BUG */
-
- die(regs, "Kernel BUG");
+ if (user_mode(regs))
+ force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->sepc);
+ else if (report_bug(regs->sepc, regs) == BUG_TRAP_TYPE_WARN)
+ regs->sepc += get_break_insn_length(regs->sepc);
+ else
+ die(regs, "Kernel BUG");
}
#ifdef CONFIG_GENERIC_BUG
* Copyright (C) 2015 Regents of the University of California
*/
+#include <linux/elf.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/binfmts.h>
struct vdso_data data;
u8 page[PAGE_SIZE];
} vdso_data_store __page_aligned_data;
-struct vdso_data *vdso_data = &vdso_data_store.data;
+static struct vdso_data *vdso_data = &vdso_data_store.data;
static int __init vdso_init(void)
{
#include <linux/mm.h>
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>
+#include <asm/mmu_context.h>
/*
* When necessary, performs a deferred icache flush for the given MM context,
#include <asm/ptrace.h>
#include <asm/tlbflush.h>
+#include "../kernel/head.h"
+
/*
* This routine handles page faults. It determines the address and the
* problem, and then passes it off to one of the appropriate routines.
#include <asm/pgtable.h>
#include <asm/io.h>
+#include "../kernel/head.h"
+
unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
__page_aligned_bss;
EXPORT_SYMBOL(empty_zero_page);
*/
#ifndef __riscv_cmodel_medany
-#error "setup_vm() is called from head.S before relocate so it should "
- "not use absolute addressing."
+#error "setup_vm() is called from head.S before relocate so it should not use absolute addressing."
#endif
asmlinkage void __init setup_vm(uintptr_t dtb_pa)
zone_sizes_init();
}
-#ifdef CONFIG_SPARSEMEM
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
struct vmem_altmap *altmap)
{
return IRQ_HANDLED;
}
-int __init sifive_l2_init(void)
+static int __init sifive_l2_init(void)
{
struct device_node *np;
struct resource res;
dynsym = (Elf64_Sym *) vmlinux.dynsym_start;
for (rela = rela_start; rela < rela_end; rela++) {
loc = rela->r_offset + offset;
- val = rela->r_addend + offset;
+ val = rela->r_addend;
r_sym = ELF64_R_SYM(rela->r_info);
- if (r_sym)
- val += dynsym[r_sym].st_value;
+ if (r_sym) {
+ if (dynsym[r_sym].st_shndx != SHN_UNDEF)
+ val += dynsym[r_sym].st_value + offset;
+ } else {
+ /*
+ * 0 == undefined symbol table index (STN_UNDEF),
+ * used for R_390_RELATIVE, only add KASLR offset
+ */
+ val += offset;
+ }
r_type = ELF64_R_TYPE(rela->r_info);
rc = arch_kexec_do_relocs(r_type, (void *) loc, val, 0);
if (rc)
*(u32 *)loc = val;
break;
case R_390_64: /* Direct 64 bit. */
+ case R_390_GLOB_DAT:
*(u64 *)loc = val;
break;
case R_390_PC16: /* PC relative 16 bit. */
spin_unlock_irq(&ubd_dev->lock);
- if (ret < 0)
- blk_mq_requeue_request(req, true);
+ if (ret < 0) {
+ if (ret == -ENOMEM)
+ res = BLK_STS_RESOURCE;
+ else
+ res = BLK_STS_DEV_RESOURCE;
+ }
return res;
}
struct mem_vector immovable_mem[MAX_NUMNODES*2];
/*
- * Max length of 64-bit hex address string is 19, prefix "0x" + 16 hex
- * digits, and '\0' for termination.
- */
-#define MAX_ADDR_LEN 19
-
-static acpi_physical_address get_cmdline_acpi_rsdp(void)
-{
- acpi_physical_address addr = 0;
-
-#ifdef CONFIG_KEXEC
- char val[MAX_ADDR_LEN] = { };
- int ret;
-
- ret = cmdline_find_option("acpi_rsdp", val, MAX_ADDR_LEN);
- if (ret < 0)
- return 0;
-
- if (kstrtoull(val, 16, &addr))
- return 0;
-#endif
- return addr;
-}
-
-/*
* Search EFI system tables for RSDP. If both ACPI_20_TABLE_GUID and
* ACPI_TABLE_GUID are found, take the former, which has more features.
*/
}
#if defined(CONFIG_RANDOMIZE_BASE) && defined(CONFIG_MEMORY_HOTREMOVE)
+/*
+ * Max length of 64-bit hex address string is 19, prefix "0x" + 16 hex
+ * digits, and '\0' for termination.
+ */
+#define MAX_ADDR_LEN 19
+
+static acpi_physical_address get_cmdline_acpi_rsdp(void)
+{
+ acpi_physical_address addr = 0;
+
+#ifdef CONFIG_KEXEC
+ char val[MAX_ADDR_LEN] = { };
+ int ret;
+
+ ret = cmdline_find_option("acpi_rsdp", val, MAX_ADDR_LEN);
+ if (ret < 0)
+ return 0;
+
+ if (kstrtoull(val, 16, &addr))
+ return 0;
+#endif
+ return addr;
+}
+
/* Compute SRAT address from RSDP. */
static unsigned long get_acpi_srat_table(void)
{
#include <asm/e820/types.h>
#include <asm/setup.h>
#include <asm/desc.h>
+#include <asm/boot.h>
#include "../string.h"
#include "eboot.h"
status = efi_relocate_kernel(sys_table, &bzimage_addr,
hdr->init_size, hdr->init_size,
hdr->pref_address,
- hdr->kernel_alignment);
+ hdr->kernel_alignment,
+ LOAD_PHYSICAL_ADDR);
if (status != EFI_SUCCESS) {
efi_printk(sys_table, "efi_relocate_kernel() failed!\n");
goto fail;
{
const unsigned long kernel_total_size = VO__end - VO__text;
unsigned long virt_addr = LOAD_PHYSICAL_ADDR;
+ unsigned long needed_size;
/* Retain x86 boot parameters pointer passed from startup_32/64. */
boot_params = rmode;
free_mem_ptr = heap; /* Heap */
free_mem_end_ptr = heap + BOOT_HEAP_SIZE;
+ /*
+ * The memory hole needed for the kernel is the larger of either
+ * the entire decompressed kernel plus relocation table, or the
+ * entire decompressed kernel plus .bss and .brk sections.
+ *
+ * On X86_64, the memory is mapped with PMD pages. Round the
+ * size up so that the full extent of PMD pages mapped is
+ * included in the check against the valid memory table
+ * entries. This ensures the full mapped area is usable RAM
+ * and doesn't include any reserved areas.
+ */
+ needed_size = max(output_len, kernel_total_size);
+#ifdef CONFIG_X86_64
+ needed_size = ALIGN(needed_size, MIN_KERNEL_ALIGN);
+#endif
+
/* Report initial kernel position details. */
debug_putaddr(input_data);
debug_putaddr(input_len);
debug_putaddr(output);
debug_putaddr(output_len);
debug_putaddr(kernel_total_size);
+ debug_putaddr(needed_size);
#ifdef CONFIG_X86_64
/* Report address of 32-bit trampoline */
debug_putaddr(trampoline_32bit);
#endif
- /*
- * The memory hole needed for the kernel is the larger of either
- * the entire decompressed kernel plus relocation table, or the
- * entire decompressed kernel plus .bss and .brk sections.
- */
choose_random_location((unsigned long)input_data, input_len,
(unsigned long *)&output,
- max(output_len, kernel_total_size),
+ needed_size,
&virt_addr);
/* Validate memory location choices. */
struct hw_perf_event *hwc, u64 config)
{
config &= ~perf_ibs->cnt_mask;
- wrmsrl(hwc->config_base, config);
+ if (boot_cpu_data.x86 == 0x10)
+ wrmsrl(hwc->config_base, config);
config &= ~perf_ibs->enable_mask;
wrmsrl(hwc->config_base, config);
}
},
.msr = MSR_AMD64_IBSOPCTL,
.config_mask = IBS_OP_CONFIG_MASK,
- .cnt_mask = IBS_OP_MAX_CNT,
+ .cnt_mask = IBS_OP_MAX_CNT | IBS_OP_CUR_CNT |
+ IBS_OP_CUR_CNT_RAND,
.enable_mask = IBS_OP_ENABLE,
.valid_mask = IBS_OP_VAL,
.max_period = IBS_OP_MAX_CNT << 4,
if (event->attr.sample_type & PERF_SAMPLE_RAW)
offset_max = perf_ibs->offset_max;
else if (check_rip)
- offset_max = 2;
+ offset_max = 3;
else
offset_max = 1;
do {
* link as the 2nd entry in the table
*/
if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries)) {
- TOPA_ENTRY(&tp->topa, 1)->base = page_to_phys(p);
+ TOPA_ENTRY(&tp->topa, 1)->base = page_to_phys(p) >> TOPA_SHIFT;
TOPA_ENTRY(&tp->topa, 1)->end = 1;
}
local64_set(&event->hw.prev_count, uncore_read_counter(box, event));
uncore_enable_event(box, event);
- if (box->n_active == 1) {
- uncore_enable_box(box);
+ if (box->n_active == 1)
uncore_pmu_start_hrtimer(box);
- }
}
void uncore_pmu_event_stop(struct perf_event *event, int flags)
WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
hwc->state |= PERF_HES_STOPPED;
- if (box->n_active == 0) {
- uncore_disable_box(box);
+ if (box->n_active == 0)
uncore_pmu_cancel_hrtimer(box);
- }
}
if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
return ret;
}
+static void uncore_pmu_enable(struct pmu *pmu)
+{
+ struct intel_uncore_pmu *uncore_pmu;
+ struct intel_uncore_box *box;
+
+ uncore_pmu = container_of(pmu, struct intel_uncore_pmu, pmu);
+ if (!uncore_pmu)
+ return;
+
+ box = uncore_pmu_to_box(uncore_pmu, smp_processor_id());
+ if (!box)
+ return;
+
+ if (uncore_pmu->type->ops->enable_box)
+ uncore_pmu->type->ops->enable_box(box);
+}
+
+static void uncore_pmu_disable(struct pmu *pmu)
+{
+ struct intel_uncore_pmu *uncore_pmu;
+ struct intel_uncore_box *box;
+
+ uncore_pmu = container_of(pmu, struct intel_uncore_pmu, pmu);
+ if (!uncore_pmu)
+ return;
+
+ box = uncore_pmu_to_box(uncore_pmu, smp_processor_id());
+ if (!box)
+ return;
+
+ if (uncore_pmu->type->ops->disable_box)
+ uncore_pmu->type->ops->disable_box(box);
+}
+
static ssize_t uncore_get_attr_cpumask(struct device *dev,
struct device_attribute *attr, char *buf)
{
pmu->pmu = (struct pmu) {
.attr_groups = pmu->type->attr_groups,
.task_ctx_nr = perf_invalid_context,
+ .pmu_enable = uncore_pmu_enable,
+ .pmu_disable = uncore_pmu_disable,
.event_init = uncore_pmu_event_init,
.add = uncore_pmu_event_add,
.del = uncore_pmu_event_del,
return -EINVAL;
}
-static inline void uncore_disable_box(struct intel_uncore_box *box)
-{
- if (box->pmu->type->ops->disable_box)
- box->pmu->type->ops->disable_box(box);
-}
-
-static inline void uncore_enable_box(struct intel_uncore_box *box)
-{
- if (box->pmu->type->ops->enable_box)
- box->pmu->type->ops->enable_box(box);
-}
-
static inline void uncore_disable_event(struct intel_uncore_box *box,
struct perf_event *event)
{
}
if (ms_hyperv.hints & HV_X64_APIC_ACCESS_RECOMMENDED) {
- pr_info("Hyper-V: Using MSR based APIC access\n");
+ pr_info("Hyper-V: Using enlightened APIC (%s mode)",
+ x2apic_enabled() ? "x2apic" : "xapic");
+ /*
+ * With x2apic, architectural x2apic MSRs are equivalent to the
+ * respective synthetic MSRs, so there's no need to override
+ * the apic accessors. The only exception is
+ * hv_apic_eoi_write, because it benefits from lazy EOI when
+ * available, but it works for both xapic and x2apic modes.
+ */
apic_set_eoi_write(hv_apic_eoi_write);
- apic->read = hv_apic_read;
- apic->write = hv_apic_write;
- apic->icr_write = hv_apic_icr_write;
- apic->icr_read = hv_apic_icr_read;
+ if (!x2apic_enabled()) {
+ apic->read = hv_apic_read;
+ apic->write = hv_apic_write;
+ apic->icr_write = hv_apic_icr_write;
+ apic->icr_read = hv_apic_icr_read;
+ }
}
}
int (*set_nested_state)(struct kvm_vcpu *vcpu,
struct kvm_nested_state __user *user_kvm_nested_state,
struct kvm_nested_state *kvm_state);
- void (*get_vmcs12_pages)(struct kvm_vcpu *vcpu);
+ bool (*get_vmcs12_pages)(struct kvm_vcpu *vcpu);
int (*smi_allowed)(struct kvm_vcpu *vcpu);
int (*pre_enter_smm)(struct kvm_vcpu *vcpu, char *smstate);
#include <asm/cpufeatures.h>
#include <asm/alternative.h>
+#include <linux/stringify.h>
/*
* The hypercall definitions differ in the low word of the %edx argument
*/
/* Old port-based version */
-#define VMWARE_HYPERVISOR_PORT "0x5658"
-#define VMWARE_HYPERVISOR_PORT_HB "0x5659"
+#define VMWARE_HYPERVISOR_PORT 0x5658
+#define VMWARE_HYPERVISOR_PORT_HB 0x5659
/* Current vmcall / vmmcall version */
#define VMWARE_HYPERVISOR_HB BIT(0)
/* The low bandwidth call. The low word of edx is presumed clear. */
#define VMWARE_HYPERCALL \
- ALTERNATIVE_2("movw $" VMWARE_HYPERVISOR_PORT ", %%dx; inl (%%dx)", \
+ ALTERNATIVE_2("movw $" __stringify(VMWARE_HYPERVISOR_PORT) ", %%dx; " \
+ "inl (%%dx), %%eax", \
"vmcall", X86_FEATURE_VMCALL, \
"vmmcall", X86_FEATURE_VMW_VMMCALL)
* HB and OUT bits set.
*/
#define VMWARE_HYPERCALL_HB_OUT \
- ALTERNATIVE_2("movw $" VMWARE_HYPERVISOR_PORT_HB ", %%dx; rep outsb", \
+ ALTERNATIVE_2("movw $" __stringify(VMWARE_HYPERVISOR_PORT_HB) ", %%dx; " \
+ "rep outsb", \
"vmcall", X86_FEATURE_VMCALL, \
"vmmcall", X86_FEATURE_VMW_VMMCALL)
* HB bit set.
*/
#define VMWARE_HYPERCALL_HB_IN \
- ALTERNATIVE_2("movw $" VMWARE_HYPERVISOR_PORT_HB ", %%dx; rep insb", \
+ ALTERNATIVE_2("movw $" __stringify(VMWARE_HYPERVISOR_PORT_HB) ", %%dx; " \
+ "rep insb", \
"vmcall", X86_FEATURE_VMCALL, \
"vmmcall", X86_FEATURE_VMW_VMMCALL)
#endif
{
struct cluster_mask *cmsk = per_cpu(cluster_masks, dead_cpu);
- cpumask_clear_cpu(dead_cpu, &cmsk->mask);
+ if (cmsk)
+ cpumask_clear_cpu(dead_cpu, &cmsk->mask);
free_cpumask_var(per_cpu(ipi_mask, dead_cpu));
return 0;
}
int hv_host_info_ecx;
int hv_host_info_edx;
+#ifdef CONFIG_PARAVIRT
+ pv_info.name = "Hyper-V";
+#endif
+
/*
* Extract the features and hints
*/
* we might write invalid pmds, when the kernel is relocated
* cleanup_highmap() fixes this up along with the mappings
* beyond _end.
+ *
+ * Only the region occupied by the kernel image has so far
+ * been checked against the table of usable memory regions
+ * provided by the firmware, so invalidate pages outside that
+ * region. A page table entry that maps to a reserved area of
+ * memory would allow processor speculation into that area,
+ * and on some hardware (particularly the UV platform) even
+ * speculative access to some reserved areas is caught as an
+ * error, causing the BIOS to halt the system.
*/
pmd = fixup_pointer(level2_kernel_pgt, physaddr);
- for (i = 0; i < PTRS_PER_PMD; i++) {
+
+ /* invalidate pages before the kernel image */
+ for (i = 0; i < pmd_index((unsigned long)_text); i++)
+ pmd[i] &= ~_PAGE_PRESENT;
+
+ /* fixup pages that are part of the kernel image */
+ for (; i <= pmd_index((unsigned long)_end); i++)
if (pmd[i] & _PAGE_PRESENT)
pmd[i] += load_delta;
- }
+
+ /* invalidate pages after the kernel image */
+ for (; i < PTRS_PER_PMD; i++)
+ pmd[i] &= ~_PAGE_PRESENT;
/*
* Fixup phys_base - remove the memory encryption mask to obtain
/* cpuid 7.0.ecx*/
const u32 kvm_cpuid_7_0_ecx_x86_features =
- F(AVX512VBMI) | F(LA57) | F(PKU) | 0 /*OSPKE*/ |
+ F(AVX512VBMI) | F(LA57) | F(PKU) | 0 /*OSPKE*/ | F(RDPID) |
F(AVX512_VPOPCNTDQ) | F(UMIP) | F(AVX512_VBMI2) | F(GFNI) |
F(VAES) | F(VPCLMULQDQ) | F(AVX512_VNNI) | F(AVX512_BITALG) |
F(CLDEMOTE) | F(MOVDIRI) | F(MOVDIR64B) | 0 /*WAITPKG*/;
(LVT_MASK | APIC_MODE_MASK | APIC_INPUT_POLARITY | \
APIC_LVT_REMOTE_IRR | APIC_LVT_LEVEL_TRIGGER)
-static inline u8 kvm_xapic_id(struct kvm_lapic *apic)
-{
- return kvm_lapic_get_reg(apic, APIC_ID) >> 24;
-}
-
static inline u32 kvm_x2apic_id(struct kvm_lapic *apic)
{
return apic->vcpu->vcpu_id;
return apic_base & (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE);
}
+static inline u8 kvm_xapic_id(struct kvm_lapic *apic)
+{
+ return kvm_lapic_get_reg(apic, APIC_ID) >> 24;
+}
+
#endif
static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
{
vcpu->arch.efer = efer;
- if (!npt_enabled && !(efer & EFER_LMA))
- efer &= ~EFER_LME;
+
+ if (!npt_enabled) {
+ /* Shadow paging assumes NX to be available. */
+ efer |= EFER_NX;
+
+ if (!(efer & EFER_LMA))
+ efer &= ~EFER_LME;
+ }
to_svm(vcpu)->vmcb->save.efer = efer | EFER_SVME;
mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
int ret = 0;
struct vcpu_svm *svm = to_svm(vcpu);
u32 ldr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_LDR);
+ u32 id = kvm_xapic_id(vcpu->arch.apic);
if (ldr == svm->ldr_reg)
return 0;
avic_invalidate_logical_id_entry(vcpu);
if (ldr)
- ret = avic_ldr_write(vcpu, vcpu->vcpu_id, ldr);
+ ret = avic_ldr_write(vcpu, id, ldr);
if (!ret)
svm->ldr_reg = ldr;
{
u64 *old, *new;
struct vcpu_svm *svm = to_svm(vcpu);
- u32 apic_id_reg = kvm_lapic_get_reg(vcpu->arch.apic, APIC_ID);
- u32 id = (apic_id_reg >> 24) & 0xff;
+ u32 id = kvm_xapic_id(vcpu->arch.apic);
if (vcpu->vcpu_id == id)
return 0;
static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12);
-static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
+static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
{
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx->nested.apic_access_page = NULL;
}
page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->apic_access_addr);
- /*
- * If translation failed, no matter: This feature asks
- * to exit when accessing the given address, and if it
- * can never be accessed, this feature won't do
- * anything anyway.
- */
if (!is_error_page(page)) {
vmx->nested.apic_access_page = page;
hpa = page_to_phys(vmx->nested.apic_access_page);
vmcs_write64(APIC_ACCESS_ADDR, hpa);
} else {
- secondary_exec_controls_clearbit(vmx,
- SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES);
+ pr_debug_ratelimited("%s: no backing 'struct page' for APIC-access address in vmcs12\n",
+ __func__);
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ vcpu->run->internal.suberror =
+ KVM_INTERNAL_ERROR_EMULATION;
+ vcpu->run->internal.ndata = 0;
+ return false;
}
}
exec_controls_setbit(vmx, CPU_BASED_USE_MSR_BITMAPS);
else
exec_controls_clearbit(vmx, CPU_BASED_USE_MSR_BITMAPS);
+ return true;
}
/*
/*
* If from_vmentry is false, this is being called from state restore (either RSM
* or KVM_SET_NESTED_STATE). Otherwise it's called from vmlaunch/vmresume.
-+ *
-+ * Returns:
-+ * 0 - success, i.e. proceed with actual VMEnter
-+ * 1 - consistency check VMExit
-+ * -1 - consistency check VMFail
+ *
+ * Returns:
+ * NVMX_ENTRY_SUCCESS: Entered VMX non-root mode
+ * NVMX_ENTRY_VMFAIL: Consistency check VMFail
+ * NVMX_ENTRY_VMEXIT: Consistency check VMExit
+ * NVMX_ENTRY_KVM_INTERNAL_ERROR: KVM internal error
*/
-int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry)
+enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
+ bool from_vmentry)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
prepare_vmcs02_early(vmx, vmcs12);
if (from_vmentry) {
- nested_get_vmcs12_pages(vcpu);
+ if (unlikely(!nested_get_vmcs12_pages(vcpu)))
+ return NVMX_VMENTRY_KVM_INTERNAL_ERROR;
if (nested_vmx_check_vmentry_hw(vcpu)) {
vmx_switch_vmcs(vcpu, &vmx->vmcs01);
- return -1;
+ return NVMX_VMENTRY_VMFAIL;
}
if (nested_vmx_check_guest_state(vcpu, vmcs12, &exit_qual))
* returned as far as L1 is concerned. It will only return (and set
* the success flag) when L2 exits (see nested_vmx_vmexit()).
*/
- return 0;
+ return NVMX_VMENTRY_SUCCESS;
/*
* A failed consistency check that leads to a VMExit during L1's
vmx_switch_vmcs(vcpu, &vmx->vmcs01);
if (!from_vmentry)
- return 1;
+ return NVMX_VMENTRY_VMEXIT;
load_vmcs12_host_state(vcpu, vmcs12);
vmcs12->vm_exit_reason = exit_reason | VMX_EXIT_REASONS_FAILED_VMENTRY;
vmcs12->exit_qualification = exit_qual;
if (enable_shadow_vmcs || vmx->nested.hv_evmcs)
vmx->nested.need_vmcs12_to_shadow_sync = true;
- return 1;
+ return NVMX_VMENTRY_VMEXIT;
}
/*
static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
{
struct vmcs12 *vmcs12;
+ enum nvmx_vmentry_status status;
struct vcpu_vmx *vmx = to_vmx(vcpu);
u32 interrupt_shadow = vmx_get_interrupt_shadow(vcpu);
- int ret;
if (!nested_vmx_check_permission(vcpu))
return 1;
* the nested entry.
*/
vmx->nested.nested_run_pending = 1;
- ret = nested_vmx_enter_non_root_mode(vcpu, true);
- vmx->nested.nested_run_pending = !ret;
- if (ret > 0)
- return 1;
- else if (ret)
- return nested_vmx_failValid(vcpu,
- VMXERR_ENTRY_INVALID_CONTROL_FIELD);
+ status = nested_vmx_enter_non_root_mode(vcpu, true);
+ if (unlikely(status != NVMX_VMENTRY_SUCCESS))
+ goto vmentry_failed;
/* Hide L1D cache contents from the nested guest. */
vmx->vcpu.arch.l1tf_flush_l1d = true;
return kvm_vcpu_halt(vcpu);
}
return 1;
+
+vmentry_failed:
+ vmx->nested.nested_run_pending = 0;
+ if (status == NVMX_VMENTRY_KVM_INTERNAL_ERROR)
+ return 0;
+ if (status == NVMX_VMENTRY_VMEXIT)
+ return 1;
+ WARN_ON_ONCE(status != NVMX_VMENTRY_VMFAIL);
+ return nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
}
/*
#include "vmcs12.h"
#include "vmx.h"
+/*
+ * Status returned by nested_vmx_enter_non_root_mode():
+ */
+enum nvmx_vmentry_status {
+ NVMX_VMENTRY_SUCCESS, /* Entered VMX non-root mode */
+ NVMX_VMENTRY_VMFAIL, /* Consistency check VMFail */
+ NVMX_VMENTRY_VMEXIT, /* Consistency check VMExit */
+ NVMX_VMENTRY_KVM_INTERNAL_ERROR,/* KVM internal error */
+};
+
void vmx_leave_nested(struct kvm_vcpu *vcpu);
void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps,
bool apicv);
__init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *));
void nested_vmx_vcpu_setup(void);
void nested_vmx_free_vcpu(struct kvm_vcpu *vcpu);
-int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry);
+enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
+ bool from_vmentry);
bool nested_vmx_exit_reflected(struct kvm_vcpu *vcpu, u32 exit_reason);
void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
u32 exit_intr_info, unsigned long exit_qualification);
u64 guest_efer = vmx->vcpu.arch.efer;
u64 ignore_bits = 0;
- if (!enable_ept) {
- /*
- * NX is needed to handle CR0.WP=1, CR4.SMEP=1. Testing
- * host CPUID is more efficient than testing guest CPUID
- * or CR4. Host SMEP is anyway a requirement for guest SMEP.
- */
- if (boot_cpu_has(X86_FEATURE_SMEP))
- guest_efer |= EFER_NX;
- else if (!(guest_efer & EFER_NX))
- ignore_bits |= EFER_NX;
- }
+ /* Shadow paging assumes NX to be available. */
+ if (!enable_ept)
+ guest_efer |= EFER_NX;
/*
* LMA and LME handled by hardware; SCE meaningless outside long mode.
return 1;
}
-static int handle_unexpected_vmexit(struct kvm_vcpu *vcpu)
-{
- kvm_skip_emulated_instruction(vcpu);
- WARN_ONCE(1, "Unexpected VM-Exit Reason = 0x%x",
- vmcs_read32(VM_EXIT_REASON));
- return 1;
-}
-
/*
* The exit handlers return 1 if the exit was handled fully and guest execution
* may resume. Otherwise they set the kvm_run parameter to indicate what needs
[EXIT_REASON_INVVPID] = handle_vmx_instruction,
[EXIT_REASON_RDRAND] = handle_invalid_op,
[EXIT_REASON_RDSEED] = handle_invalid_op,
- [EXIT_REASON_XSAVES] = handle_unexpected_vmexit,
- [EXIT_REASON_XRSTORS] = handle_unexpected_vmexit,
[EXIT_REASON_PML_FULL] = handle_pml_full,
[EXIT_REASON_INVPCID] = handle_invpcid,
[EXIT_REASON_VMFUNC] = handle_vmx_instruction,
[EXIT_REASON_PREEMPTION_TIMER] = handle_preemption_timer,
[EXIT_REASON_ENCLS] = handle_encls,
- [EXIT_REASON_UMWAIT] = handle_unexpected_vmexit,
- [EXIT_REASON_TPAUSE] = handle_unexpected_vmexit,
};
static const int kvm_vmx_max_exit_handlers =
asmlinkage __visible void kvm_spurious_fault(void)
{
/* Fault while not rebooting. We want the trace. */
- if (!kvm_rebooting)
- BUG();
+ BUG_ON(!kvm_rebooting);
}
EXPORT_SYMBOL_GPL(kvm_spurious_fault);
static void kvmclock_reset(struct kvm_vcpu *vcpu)
{
vcpu->arch.pv_time_enabled = false;
+ vcpu->arch.time = 0;
}
static void kvm_vcpu_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa)
case MSR_KVM_SYSTEM_TIME: {
struct kvm_arch *ka = &vcpu->kvm->arch;
- kvmclock_reset(vcpu);
-
if (vcpu->vcpu_id == 0 && !msr_info->host_initiated) {
bool tmp = (msr == MSR_KVM_SYSTEM_TIME);
kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu);
/* we verify if the enable bit is set... */
+ vcpu->arch.pv_time_enabled = false;
if (!(data & 1))
break;
- if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
+ if (!kvm_gfn_to_hva_cache_init(vcpu->kvm,
&vcpu->arch.pv_time, data & ~1ULL,
sizeof(struct pvclock_vcpu_time_info)))
- vcpu->arch.pv_time_enabled = false;
- else
vcpu->arch.pv_time_enabled = true;
break;
bool req_immediate_exit = false;
if (kvm_request_pending(vcpu)) {
- if (kvm_check_request(KVM_REQ_GET_VMCS12_PAGES, vcpu))
- kvm_x86_ops->get_vmcs12_pages(vcpu);
+ if (kvm_check_request(KVM_REQ_GET_VMCS12_PAGES, vcpu)) {
+ if (unlikely(!kvm_x86_ops->get_vmcs12_pages(vcpu))) {
+ r = 0;
+ goto out;
+ }
+ }
if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu))
kvm_mmu_unload(vcpu);
if (kvm_check_request(KVM_REQ_MIGRATE_TIMER, vcpu))
endif
obj-y := init.o init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \
- pat.o pgtable.o physaddr.o setup_nx.o tlb.o cpu_entry_area.o
+ pat.o pgtable.o physaddr.o setup_nx.o tlb.o cpu_entry_area.o maccess.o
# Make sure __phys_addr has no stackprotector
nostackp := $(call cc-option, -fno-stack-protector)
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/uaccess.h>
+#include <linux/kernel.h>
+
+#ifdef CONFIG_X86_64
+static __always_inline u64 canonical_address(u64 vaddr, u8 vaddr_bits)
+{
+ return ((s64)vaddr << (64 - vaddr_bits)) >> (64 - vaddr_bits);
+}
+
+static __always_inline bool invalid_probe_range(u64 vaddr)
+{
+ /*
+ * Range covering the highest possible canonical userspace address
+ * as well as non-canonical address range. For the canonical range
+ * we also need to include the userspace guard page.
+ */
+ return vaddr < TASK_SIZE_MAX + PAGE_SIZE ||
+ canonical_address(vaddr, boot_cpu_data.x86_virt_bits) != vaddr;
+}
+#else
+static __always_inline bool invalid_probe_range(u64 vaddr)
+{
+ return vaddr < TASK_SIZE_MAX;
+}
+#endif
+
+long probe_kernel_read_strict(void *dst, const void *src, size_t size)
+{
+ if (unlikely(invalid_probe_range((unsigned long)src)))
+ return -EFAULT;
+
+ return __probe_kernel_read(dst, src, size);
+}
+
+long strncpy_from_unsafe_strict(char *dst, const void *unsafe_addr, long count)
+{
+ if (unlikely(invalid_probe_range((unsigned long)unsafe_addr)))
+ return -EFAULT;
+
+ return __strncpy_from_unsafe(dst, unsafe_addr, count);
+}
printk(KERN_INFO "Xen version: %d.%d%s%s\n",
version >> 16, version & 0xffff, extra.extraversion,
xen_feature(XENFEAT_mmu_pt_update_preserve_ad) ? " (preserve-AD)" : "");
+
+#ifdef CONFIG_X86_32
+ pr_warn("WARNING! WARNING! WARNING! WARNING! WARNING! WARNING! WARNING!\n"
+ "Support for running as 32-bit PV-guest under Xen will soon be removed\n"
+ "from the Linux kernel!\n"
+ "Please use either a 64-bit kernel or switch to HVM or PVH mode!\n"
+ "WARNING! WARNING! WARNING! WARNING! WARNING! WARNING! WARNING!\n");
+#endif
}
static void __init xen_pv_init_platform(void)
goto einval;
}
- spin_lock_irq(&iocg->ioc->lock);
+ spin_lock(&iocg->ioc->lock);
iocg->cfg_weight = v;
weight_updated(iocg);
- spin_unlock_irq(&iocg->ioc->lock);
+ spin_unlock(&iocg->ioc->lock);
blkg_conf_finish(&ctx);
return nbytes;
nfit_device_lock(dev);
nd_desc = dev_get_drvdata(dev);
if (!nd_desc) {
- device_unlock(dev);
+ nfit_device_unlock(dev);
return rc;
}
acpi_desc = to_acpi_desc(nd_desc);
unsigned long event, void *data)
{
struct cpufreq_policy *policy = data;
- int cpu = policy->cpu;
if (event == CPUFREQ_CREATE_POLICY) {
- acpi_thermal_cpufreq_init(cpu);
- acpi_processor_ppc_init(cpu);
+ acpi_thermal_cpufreq_init(policy);
+ acpi_processor_ppc_init(policy);
} else if (event == CPUFREQ_REMOVE_POLICY) {
- acpi_processor_ppc_exit(cpu);
- acpi_thermal_cpufreq_exit(cpu);
+ acpi_processor_ppc_exit(policy);
+ acpi_thermal_cpufreq_exit(policy);
}
return 0;
pr->performance_platform_limit = (int)ppc;
if (ppc >= pr->performance->state_count ||
- unlikely(!dev_pm_qos_request_active(&pr->perflib_req)))
+ unlikely(!freq_qos_request_active(&pr->perflib_req)))
return 0;
- ret = dev_pm_qos_update_request(&pr->perflib_req,
+ ret = freq_qos_update_request(&pr->perflib_req,
pr->performance->states[ppc].core_frequency * 1000);
if (ret < 0) {
pr_warn("Failed to update perflib freq constraint: CPU%d (%d)\n",
ignore_ppc = 0;
}
-void acpi_processor_ppc_init(int cpu)
+void acpi_processor_ppc_init(struct cpufreq_policy *policy)
{
- struct acpi_processor *pr = per_cpu(processors, cpu);
- int ret;
+ unsigned int cpu;
- if (!pr)
- return;
+ for_each_cpu(cpu, policy->related_cpus) {
+ struct acpi_processor *pr = per_cpu(processors, cpu);
+ int ret;
+
+ if (!pr)
+ continue;
- ret = dev_pm_qos_add_request(get_cpu_device(cpu),
- &pr->perflib_req, DEV_PM_QOS_MAX_FREQUENCY,
- INT_MAX);
- if (ret < 0)
- pr_err("Failed to add freq constraint for CPU%d (%d)\n", cpu,
- ret);
+ ret = freq_qos_add_request(&policy->constraints,
+ &pr->perflib_req,
+ FREQ_QOS_MAX, INT_MAX);
+ if (ret < 0)
+ pr_err("Failed to add freq constraint for CPU%d (%d)\n",
+ cpu, ret);
+ }
}
-void acpi_processor_ppc_exit(int cpu)
+void acpi_processor_ppc_exit(struct cpufreq_policy *policy)
{
- struct acpi_processor *pr = per_cpu(processors, cpu);
+ unsigned int cpu;
- if (pr)
- dev_pm_qos_remove_request(&pr->perflib_req);
+ for_each_cpu(cpu, policy->related_cpus) {
+ struct acpi_processor *pr = per_cpu(processors, cpu);
+
+ if (pr)
+ freq_qos_remove_request(&pr->perflib_req);
+ }
}
static int acpi_processor_get_performance_control(struct acpi_processor *pr)
pr = per_cpu(processors, i);
- if (unlikely(!dev_pm_qos_request_active(&pr->thermal_req)))
+ if (unlikely(!freq_qos_request_active(&pr->thermal_req)))
continue;
policy = cpufreq_cpu_get(i);
cpufreq_cpu_put(policy);
- ret = dev_pm_qos_update_request(&pr->thermal_req, max_freq);
+ ret = freq_qos_update_request(&pr->thermal_req, max_freq);
if (ret < 0) {
pr_warn("Failed to update thermal freq constraint: CPU%d (%d)\n",
pr->id, ret);
return 0;
}
-void acpi_thermal_cpufreq_init(int cpu)
+void acpi_thermal_cpufreq_init(struct cpufreq_policy *policy)
{
- struct acpi_processor *pr = per_cpu(processors, cpu);
- int ret;
+ unsigned int cpu;
- if (!pr)
- return;
-
- ret = dev_pm_qos_add_request(get_cpu_device(cpu),
- &pr->thermal_req, DEV_PM_QOS_MAX_FREQUENCY,
- INT_MAX);
- if (ret < 0)
- pr_err("Failed to add freq constraint for CPU%d (%d)\n", cpu,
- ret);
+ for_each_cpu(cpu, policy->related_cpus) {
+ struct acpi_processor *pr = per_cpu(processors, cpu);
+ int ret;
+
+ if (!pr)
+ continue;
+
+ ret = freq_qos_add_request(&policy->constraints,
+ &pr->thermal_req,
+ FREQ_QOS_MAX, INT_MAX);
+ if (ret < 0)
+ pr_err("Failed to add freq constraint for CPU%d (%d)\n",
+ cpu, ret);
+ }
}
-void acpi_thermal_cpufreq_exit(int cpu)
+void acpi_thermal_cpufreq_exit(struct cpufreq_policy *policy)
{
- struct acpi_processor *pr = per_cpu(processors, cpu);
+ unsigned int cpu;
+
+ for_each_cpu(cpu, policy->related_cpus) {
+ struct acpi_processor *pr = per_cpu(processors, policy->cpu);
- if (pr)
- dev_pm_qos_remove_request(&pr->thermal_req);
+ if (pr)
+ freq_qos_remove_request(&pr->thermal_req);
+ }
}
#else /* ! CONFIG_CPU_FREQ */
static int cpufreq_get_max_state(unsigned int cpu)
*/
rstc = of_reset_control_array_get_optional_shared(dev->dev.of_node);
if (IS_ERR(rstc)) {
- if (PTR_ERR(rstc) != -EPROBE_DEFER)
- dev_err(&dev->dev, "Can't get amba reset!\n");
- return PTR_ERR(rstc);
+ ret = PTR_ERR(rstc);
+ if (ret != -EPROBE_DEFER)
+ dev_err(&dev->dev, "can't get reset: %d\n",
+ ret);
+ goto err_reset;
}
reset_control_deassert(rstc);
reset_control_put(rstc);
release_resource(&dev->res);
err_out:
return ret;
+
+ err_reset:
+ amba_put_disable_pclk(dev);
+ iounmap(tmp);
+ dev_pm_domain_detach(&dev->dev, true);
+ goto err_release;
}
/*
#define SZ_1K 0x400
#endif
-#ifndef SZ_4M
-#define SZ_4M 0x400000
-#endif
-
#define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
enum {
if (proc->tsk != current->group_leader)
return -EINVAL;
- if ((vma->vm_end - vma->vm_start) > SZ_4M)
- vma->vm_end = vma->vm_start + SZ_4M;
-
binder_debug(BINDER_DEBUG_OPEN_CLOSE,
"%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
__func__, proc->pid, vma->vm_start, vma->vm_end,
#include <asm/cacheflush.h>
#include <linux/uaccess.h>
#include <linux/highmem.h>
+#include <linux/sizes.h>
#include "binder_alloc.h"
#include "binder_trace.h"
alloc->buffer = (void __user *)vma->vm_start;
mutex_unlock(&binder_alloc_mmap_lock);
- alloc->pages = kcalloc((vma->vm_end - vma->vm_start) / PAGE_SIZE,
+ alloc->buffer_size = min_t(unsigned long, vma->vm_end - vma->vm_start,
+ SZ_4M);
+ alloc->pages = kcalloc(alloc->buffer_size / PAGE_SIZE,
sizeof(alloc->pages[0]),
GFP_KERNEL);
if (alloc->pages == NULL) {
failure_string = "alloc page array";
goto err_alloc_pages_failed;
}
- alloc->buffer_size = vma->vm_end - vma->vm_start;
buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
if (!buffer) {
{
int rc, i;
- if (hpriv->ahci_regulator) {
- rc = regulator_enable(hpriv->ahci_regulator);
- if (rc)
- return rc;
- }
+ rc = regulator_enable(hpriv->ahci_regulator);
+ if (rc)
+ return rc;
- if (hpriv->phy_regulator) {
- rc = regulator_enable(hpriv->phy_regulator);
- if (rc)
- goto disable_ahci_pwrs;
- }
+ rc = regulator_enable(hpriv->phy_regulator);
+ if (rc)
+ goto disable_ahci_pwrs;
for (i = 0; i < hpriv->nports; i++) {
if (!hpriv->target_pwrs[i])
if (hpriv->target_pwrs[i])
regulator_disable(hpriv->target_pwrs[i]);
- if (hpriv->phy_regulator)
- regulator_disable(hpriv->phy_regulator);
+ regulator_disable(hpriv->phy_regulator);
disable_ahci_pwrs:
- if (hpriv->ahci_regulator)
- regulator_disable(hpriv->ahci_regulator);
+ regulator_disable(hpriv->ahci_regulator);
return rc;
}
EXPORT_SYMBOL_GPL(ahci_platform_enable_regulators);
regulator_disable(hpriv->target_pwrs[i]);
}
- if (hpriv->ahci_regulator)
- regulator_disable(hpriv->ahci_regulator);
- if (hpriv->phy_regulator)
- regulator_disable(hpriv->phy_regulator);
+ regulator_disable(hpriv->ahci_regulator);
+ regulator_disable(hpriv->phy_regulator);
}
EXPORT_SYMBOL_GPL(ahci_platform_disable_regulators);
/**
struct regulator *target_pwr;
int rc = 0;
- target_pwr = regulator_get_optional(dev, "target");
+ target_pwr = regulator_get(dev, "target");
if (!IS_ERR(target_pwr))
hpriv->target_pwrs[port] = target_pwr;
hpriv->clks[i] = clk;
}
- hpriv->ahci_regulator = devm_regulator_get_optional(dev, "ahci");
+ hpriv->ahci_regulator = devm_regulator_get(dev, "ahci");
if (IS_ERR(hpriv->ahci_regulator)) {
rc = PTR_ERR(hpriv->ahci_regulator);
- if (rc == -EPROBE_DEFER)
+ if (rc != 0)
goto err_out;
- rc = 0;
- hpriv->ahci_regulator = NULL;
}
- hpriv->phy_regulator = devm_regulator_get_optional(dev, "phy");
+ hpriv->phy_regulator = devm_regulator_get(dev, "phy");
if (IS_ERR(hpriv->phy_regulator)) {
rc = PTR_ERR(hpriv->phy_regulator);
if (rc == -EPROBE_DEFER)
spin_lock_irqsave(&dev->power.lock, flags);
- switch (type) {
- case DEV_PM_QOS_RESUME_LATENCY:
+ if (type == DEV_PM_QOS_RESUME_LATENCY) {
ret = IS_ERR_OR_NULL(qos) ? PM_QOS_RESUME_LATENCY_NO_CONSTRAINT
: pm_qos_read_value(&qos->resume_latency);
- break;
- case DEV_PM_QOS_MIN_FREQUENCY:
- ret = IS_ERR_OR_NULL(qos) ? PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE
- : pm_qos_read_value(&qos->min_frequency);
- break;
- case DEV_PM_QOS_MAX_FREQUENCY:
- ret = IS_ERR_OR_NULL(qos) ? PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE
- : pm_qos_read_value(&qos->max_frequency);
- break;
- default:
+ } else {
WARN_ON(1);
ret = 0;
}
req->dev->power.set_latency_tolerance(req->dev, value);
}
break;
- case DEV_PM_QOS_MIN_FREQUENCY:
- ret = pm_qos_update_target(&qos->min_frequency,
- &req->data.pnode, action, value);
- break;
- case DEV_PM_QOS_MAX_FREQUENCY:
- ret = pm_qos_update_target(&qos->max_frequency,
- &req->data.pnode, action, value);
- break;
case DEV_PM_QOS_FLAGS:
ret = pm_qos_update_flags(&qos->flags, &req->data.flr,
action, value);
c->no_constraint_value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT;
c->type = PM_QOS_MIN;
- c = &qos->min_frequency;
- plist_head_init(&c->list);
- c->target_value = PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE;
- c->default_value = PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE;
- c->no_constraint_value = PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE;
- c->type = PM_QOS_MAX;
- c->notifiers = ++n;
- BLOCKING_INIT_NOTIFIER_HEAD(n);
-
- c = &qos->max_frequency;
- plist_head_init(&c->list);
- c->target_value = PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE;
- c->default_value = PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE;
- c->no_constraint_value = PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE;
- c->type = PM_QOS_MIN;
- c->notifiers = ++n;
- BLOCKING_INIT_NOTIFIER_HEAD(n);
-
INIT_LIST_HEAD(&qos->flags.list);
spin_lock_irq(&dev->power.lock);
memset(req, 0, sizeof(*req));
}
- c = &qos->min_frequency;
- plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
- apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE);
- memset(req, 0, sizeof(*req));
- }
-
- c = &qos->max_frequency;
- plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
- apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE);
- memset(req, 0, sizeof(*req));
- }
-
f = &qos->flags;
list_for_each_entry_safe(req, tmp, &f->list, data.flr.node) {
apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
switch(req->type) {
case DEV_PM_QOS_RESUME_LATENCY:
case DEV_PM_QOS_LATENCY_TOLERANCE:
- case DEV_PM_QOS_MIN_FREQUENCY:
- case DEV_PM_QOS_MAX_FREQUENCY:
curr_value = req->data.pnode.prio;
break;
case DEV_PM_QOS_FLAGS:
ret = blocking_notifier_chain_register(dev->power.qos->resume_latency.notifiers,
notifier);
break;
- case DEV_PM_QOS_MIN_FREQUENCY:
- ret = blocking_notifier_chain_register(dev->power.qos->min_frequency.notifiers,
- notifier);
- break;
- case DEV_PM_QOS_MAX_FREQUENCY:
- ret = blocking_notifier_chain_register(dev->power.qos->max_frequency.notifiers,
- notifier);
- break;
default:
WARN_ON(1);
ret = -EINVAL;
ret = blocking_notifier_chain_unregister(dev->power.qos->resume_latency.notifiers,
notifier);
break;
- case DEV_PM_QOS_MIN_FREQUENCY:
- ret = blocking_notifier_chain_unregister(dev->power.qos->min_frequency.notifiers,
- notifier);
- break;
- case DEV_PM_QOS_MAX_FREQUENCY:
- ret = blocking_notifier_chain_unregister(dev->power.qos->max_frequency.notifiers,
- notifier);
- break;
default:
WARN_ON(1);
ret = -EINVAL;
struct nbd_device *nbd = cmd->nbd;
struct nbd_config *config;
+ if (!mutex_trylock(&cmd->lock))
+ return BLK_EH_RESET_TIMER;
+
if (!refcount_inc_not_zero(&nbd->config_refs)) {
cmd->status = BLK_STS_TIMEOUT;
+ mutex_unlock(&cmd->lock);
goto done;
}
config = nbd->config;
- if (!mutex_trylock(&cmd->lock)) {
- nbd_config_put(nbd);
- return BLK_EH_RESET_TIMER;
- }
-
if (config->num_connections > 1) {
dev_err_ratelimited(nbd_to_dev(nbd),
"Connection timed out, retrying (%d/%d alive)\n",
ret = -ENOENT;
goto out;
}
+ if (cmd->status != BLK_STS_OK) {
+ dev_err(disk_to_dev(nbd->disk), "Command already handled %p\n",
+ req);
+ ret = -ENOENT;
+ goto out;
+ }
if (test_bit(NBD_CMD_REQUEUED, &cmd->flags)) {
dev_err(disk_to_dev(nbd->disk), "Raced with timeout on req %p\n",
req);
{
struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
+ mutex_lock(&cmd->lock);
cmd->status = BLK_STS_IOERR;
+ mutex_unlock(&cmd->lock);
+
blk_mq_complete_request(req);
return true;
}
return ret;
}
+static struct socket *nbd_get_socket(struct nbd_device *nbd, unsigned long fd,
+ int *err)
+{
+ struct socket *sock;
+
+ *err = 0;
+ sock = sockfd_lookup(fd, err);
+ if (!sock)
+ return NULL;
+
+ if (sock->ops->shutdown == sock_no_shutdown) {
+ dev_err(disk_to_dev(nbd->disk), "Unsupported socket: shutdown callout must be supported.\n");
+ *err = -EINVAL;
+ return NULL;
+ }
+
+ return sock;
+}
+
static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
bool netlink)
{
struct nbd_sock *nsock;
int err;
- sock = sockfd_lookup(arg, &err);
+ sock = nbd_get_socket(nbd, arg, &err);
if (!sock)
return err;
int i;
int err;
- sock = sockfd_lookup(arg, &err);
+ sock = nbd_get_socket(nbd, arg, &err);
if (!sock)
return err;
* @clk_disable_quirk: module specific clock disable quirk
* @reset_done_quirk: module specific reset done quirk
* @module_enable_quirk: module specific enable quirk
+ * @module_disable_quirk: module specific disable quirk
*/
struct sysc {
struct device *dev;
void (*clk_disable_quirk)(struct sysc *sysc);
void (*reset_done_quirk)(struct sysc *sysc);
void (*module_enable_quirk)(struct sysc *sysc);
+ void (*module_disable_quirk)(struct sysc *sysc);
};
static void sysc_parse_dts_quirks(struct sysc *ddata, struct device_node *np,
if (ddata->offsets[SYSC_SYSCONFIG] == -ENODEV)
return 0;
+ if (ddata->module_disable_quirk)
+ ddata->module_disable_quirk(ddata);
+
regbits = ddata->cap->regbits;
reg = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
SYSC_MODULE_QUIRK_SGX),
SYSC_QUIRK("wdt", 0, 0, 0x10, 0x14, 0x502a0500, 0xfffff0f0,
SYSC_MODULE_QUIRK_WDT),
+ /* Watchdog on am3 and am4 */
+ SYSC_QUIRK("wdt", 0x44e35000, 0, 0x10, 0x14, 0x502a0500, 0xfffff0f0,
+ SYSC_MODULE_QUIRK_WDT | SYSC_QUIRK_SWSUP_SIDLE),
#ifdef DEBUG
SYSC_QUIRK("adc", 0, 0, 0x10, -1, 0x47300001, 0xffffffff, 0),
!(val & 0x10), 100,
MAX_MODULE_SOFTRESET_WAIT);
if (error)
- dev_warn(ddata->dev, "wdt disable spr failed\n");
+ dev_warn(ddata->dev, "wdt disable step1 failed\n");
- sysc_write(ddata, wps, 0x5555);
+ sysc_write(ddata, spr, 0x5555);
error = readl_poll_timeout(ddata->module_va + wps, val,
!(val & 0x10), 100,
MAX_MODULE_SOFTRESET_WAIT);
if (error)
- dev_warn(ddata->dev, "wdt disable wps failed\n");
+ dev_warn(ddata->dev, "wdt disable step2 failed\n");
}
static void sysc_init_module_quirks(struct sysc *ddata)
if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_SGX)
ddata->module_enable_quirk = sysc_module_enable_quirk_sgx;
- if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_WDT)
+ if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_WDT) {
ddata->reset_done_quirk = sysc_reset_done_quirk_wdt;
+ ddata->module_disable_quirk = sysc_reset_done_quirk_wdt;
+ }
}
static int sysc_clockdomain_init(struct sysc *ddata)
if (ret != 1) \
return -EINVAL; \
\
- ret = dev_pm_qos_update_request(policy->object##_freq_req, val);\
+ ret = freq_qos_update_request(policy->object##_freq_req, val);\
return ret >= 0 ? count : ret; \
}
goto err_free_real_cpus;
}
+ freq_constraints_init(&policy->constraints);
+
policy->nb_min.notifier_call = cpufreq_notifier_min;
policy->nb_max.notifier_call = cpufreq_notifier_max;
- ret = dev_pm_qos_add_notifier(dev, &policy->nb_min,
- DEV_PM_QOS_MIN_FREQUENCY);
+ ret = freq_qos_add_notifier(&policy->constraints, FREQ_QOS_MIN,
+ &policy->nb_min);
if (ret) {
dev_err(dev, "Failed to register MIN QoS notifier: %d (%*pbl)\n",
ret, cpumask_pr_args(policy->cpus));
goto err_kobj_remove;
}
- ret = dev_pm_qos_add_notifier(dev, &policy->nb_max,
- DEV_PM_QOS_MAX_FREQUENCY);
+ ret = freq_qos_add_notifier(&policy->constraints, FREQ_QOS_MAX,
+ &policy->nb_max);
if (ret) {
dev_err(dev, "Failed to register MAX QoS notifier: %d (%*pbl)\n",
ret, cpumask_pr_args(policy->cpus));
return policy;
err_min_qos_notifier:
- dev_pm_qos_remove_notifier(dev, &policy->nb_min,
- DEV_PM_QOS_MIN_FREQUENCY);
+ freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MIN,
+ &policy->nb_min);
err_kobj_remove:
cpufreq_policy_put_kobj(policy);
err_free_real_cpus:
static void cpufreq_policy_free(struct cpufreq_policy *policy)
{
- struct device *dev = get_cpu_device(policy->cpu);
unsigned long flags;
int cpu;
per_cpu(cpufreq_cpu_data, cpu) = NULL;
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
- dev_pm_qos_remove_notifier(dev, &policy->nb_max,
- DEV_PM_QOS_MAX_FREQUENCY);
- dev_pm_qos_remove_notifier(dev, &policy->nb_min,
- DEV_PM_QOS_MIN_FREQUENCY);
+ freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MAX,
+ &policy->nb_max);
+ freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MIN,
+ &policy->nb_min);
+
+ /* Cancel any pending policy->update work before freeing the policy. */
+ cancel_work_sync(&policy->update);
if (policy->max_freq_req) {
/*
*/
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
CPUFREQ_REMOVE_POLICY, policy);
- dev_pm_qos_remove_request(policy->max_freq_req);
+ freq_qos_remove_request(policy->max_freq_req);
}
- dev_pm_qos_remove_request(policy->min_freq_req);
+ freq_qos_remove_request(policy->min_freq_req);
kfree(policy->min_freq_req);
cpufreq_policy_put_kobj(policy);
cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
if (new_policy) {
- struct device *dev = get_cpu_device(cpu);
-
for_each_cpu(j, policy->related_cpus) {
per_cpu(cpufreq_cpu_data, j) = policy;
add_cpu_dev_symlink(policy, j);
if (!policy->min_freq_req)
goto out_destroy_policy;
- ret = dev_pm_qos_add_request(dev, policy->min_freq_req,
- DEV_PM_QOS_MIN_FREQUENCY,
- policy->min);
+ ret = freq_qos_add_request(&policy->constraints,
+ policy->min_freq_req, FREQ_QOS_MIN,
+ policy->min);
if (ret < 0) {
/*
- * So we don't call dev_pm_qos_remove_request() for an
+ * So we don't call freq_qos_remove_request() for an
* uninitialized request.
*/
kfree(policy->min_freq_req);
policy->min_freq_req = NULL;
-
- dev_err(dev, "Failed to add min-freq constraint (%d)\n",
- ret);
goto out_destroy_policy;
}
/*
* This must be initialized right here to avoid calling
- * dev_pm_qos_remove_request() on uninitialized request in case
+ * freq_qos_remove_request() on uninitialized request in case
* of errors.
*/
policy->max_freq_req = policy->min_freq_req + 1;
- ret = dev_pm_qos_add_request(dev, policy->max_freq_req,
- DEV_PM_QOS_MAX_FREQUENCY,
- policy->max);
+ ret = freq_qos_add_request(&policy->constraints,
+ policy->max_freq_req, FREQ_QOS_MAX,
+ policy->max);
if (ret < 0) {
policy->max_freq_req = NULL;
- dev_err(dev, "Failed to add max-freq constraint (%d)\n",
- ret);
goto out_destroy_policy;
}
struct cpufreq_policy *new_policy)
{
struct cpufreq_governor *old_gov;
- struct device *cpu_dev = get_cpu_device(policy->cpu);
int ret;
pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
* PM QoS framework collects all the requests from users and provide us
* the final aggregated value here.
*/
- new_policy->min = dev_pm_qos_read_value(cpu_dev, DEV_PM_QOS_MIN_FREQUENCY);
- new_policy->max = dev_pm_qos_read_value(cpu_dev, DEV_PM_QOS_MAX_FREQUENCY);
+ new_policy->min = freq_qos_read_value(&policy->constraints, FREQ_QOS_MIN);
+ new_policy->max = freq_qos_read_value(&policy->constraints, FREQ_QOS_MAX);
/* verify the cpu speed can be set within this limit */
ret = cpufreq_driver->verify(new_policy);
break;
}
- ret = dev_pm_qos_update_request(policy->max_freq_req, policy->max);
+ ret = freq_qos_update_request(policy->max_freq_req, policy->max);
if (ret < 0)
break;
}
static struct cpufreq_driver intel_pstate;
-static void update_qos_request(enum dev_pm_qos_req_type type)
+static void update_qos_request(enum freq_qos_req_type type)
{
int max_state, turbo_max, freq, i, perf_pct;
- struct dev_pm_qos_request *req;
+ struct freq_qos_request *req;
struct cpufreq_policy *policy;
for_each_possible_cpu(i) {
else
turbo_max = cpu->pstate.turbo_pstate;
- if (type == DEV_PM_QOS_MIN_FREQUENCY) {
+ if (type == FREQ_QOS_MIN) {
perf_pct = global.min_perf_pct;
} else {
req++;
freq = DIV_ROUND_UP(turbo_max * perf_pct, 100);
freq *= cpu->pstate.scaling;
- if (dev_pm_qos_update_request(req, freq) < 0)
+ if (freq_qos_update_request(req, freq) < 0)
pr_warn("Failed to update freq constraint: CPU%d\n", i);
}
}
if (intel_pstate_driver == &intel_pstate)
intel_pstate_update_policies();
else
- update_qos_request(DEV_PM_QOS_MAX_FREQUENCY);
+ update_qos_request(FREQ_QOS_MAX);
mutex_unlock(&intel_pstate_driver_lock);
if (intel_pstate_driver == &intel_pstate)
intel_pstate_update_policies();
else
- update_qos_request(DEV_PM_QOS_MIN_FREQUENCY);
+ update_qos_request(FREQ_QOS_MIN);
mutex_unlock(&intel_pstate_driver_lock);
static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
int max_state, turbo_max, min_freq, max_freq, ret;
- struct dev_pm_qos_request *req;
+ struct freq_qos_request *req;
struct cpudata *cpu;
struct device *dev;
max_freq = DIV_ROUND_UP(turbo_max * global.max_perf_pct, 100);
max_freq *= cpu->pstate.scaling;
- ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_MIN_FREQUENCY,
- min_freq);
+ ret = freq_qos_add_request(&policy->constraints, req, FREQ_QOS_MIN,
+ min_freq);
if (ret < 0) {
dev_err(dev, "Failed to add min-freq constraint (%d)\n", ret);
goto free_req;
}
- ret = dev_pm_qos_add_request(dev, req + 1, DEV_PM_QOS_MAX_FREQUENCY,
- max_freq);
+ ret = freq_qos_add_request(&policy->constraints, req + 1, FREQ_QOS_MAX,
+ max_freq);
if (ret < 0) {
dev_err(dev, "Failed to add max-freq constraint (%d)\n", ret);
goto remove_min_req;
return 0;
remove_min_req:
- dev_pm_qos_remove_request(req);
+ freq_qos_remove_request(req);
free_req:
kfree(req);
pstate_exit:
static int intel_cpufreq_cpu_exit(struct cpufreq_policy *policy)
{
- struct dev_pm_qos_request *req;
+ struct freq_qos_request *req;
req = policy->driver_data;
- dev_pm_qos_remove_request(req + 1);
- dev_pm_qos_remove_request(req);
+ freq_qos_remove_request(req + 1);
+ freq_qos_remove_request(req);
kfree(req);
return intel_pstate_cpu_exit(policy);
static void cbe_cpufreq_handle_pmi(pmi_message_t pmi_msg)
{
struct cpufreq_policy *policy;
- struct dev_pm_qos_request *req;
+ struct freq_qos_request *req;
u8 node, slow_mode;
int cpu, ret;
req = policy->driver_data;
- ret = dev_pm_qos_update_request(req,
+ ret = freq_qos_update_request(req,
policy->freq_table[slow_mode].frequency);
if (ret < 0)
pr_warn("Failed to update freq constraint: %d\n", ret);
void cbe_cpufreq_pmi_policy_init(struct cpufreq_policy *policy)
{
- struct dev_pm_qos_request *req;
+ struct freq_qos_request *req;
int ret;
if (!cbe_cpufreq_has_pmi)
if (!req)
return;
- ret = dev_pm_qos_add_request(get_cpu_device(policy->cpu), req,
- DEV_PM_QOS_MAX_FREQUENCY,
- policy->freq_table[0].frequency);
+ ret = freq_qos_add_request(&policy->constraints, req, FREQ_QOS_MAX,
+ policy->freq_table[0].frequency);
if (ret < 0) {
pr_err("Failed to add freq constraint (%d)\n", ret);
kfree(req);
void cbe_cpufreq_pmi_policy_exit(struct cpufreq_policy *policy)
{
- struct dev_pm_qos_request *req = policy->driver_data;
+ struct freq_qos_request *req = policy->driver_data;
if (cbe_cpufreq_has_pmi) {
- dev_pm_qos_remove_request(req);
+ freq_qos_remove_request(req);
kfree(req);
}
}
int ret;
struct cpuidle_driver *drv = &haltpoll_driver;
+ /* Do not load haltpoll if idle= is passed */
+ if (boot_option_idle_override != IDLE_NO_OVERRIDE)
+ return -ENODEV;
+
cpuidle_poll_state_init(drv);
if (!kvm_para_available() ||
tp->write_seq = snd_isn;
tp->snd_nxt = snd_isn;
tp->snd_una = snd_isn;
- inet_sk(sk)->inet_id = tp->write_seq ^ jiffies;
+ inet_sk(sk)->inet_id = prandom_u32();
assign_rxopt(sk, opt);
if (tp->rcv_wnd > (RCV_BUFSIZ_M << 10))
return peekmsg(sk, msg, len, nonblock, flags);
if (sk_can_busy_loop(sk) &&
- skb_queue_empty(&sk->sk_receive_queue) &&
+ skb_queue_empty_lockless(&sk->sk_receive_queue) &&
sk->sk_state == TCP_ESTABLISHED)
sk_busy_loop(sk, nonblock);
if (!sdma->script_number)
sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1;
+ if (sdma->script_number > sizeof(struct sdma_script_start_addrs)
+ / sizeof(s32)) {
+ dev_err(sdma->dev,
+ "SDMA script number %d not match with firmware.\n",
+ sdma->script_number);
+ return;
+ }
+
for (i = 0; i < sdma->script_number; i++)
if (addr_arr[i] > 0)
saddr_arr[i] = addr_arr[i];
/* remove all transactions, including active transaction */
spin_lock_irqsave(&bchan->vc.lock, flag);
+ /*
+ * If we have transactions queued, then some might be committed to the
+ * hardware in the desc fifo. The only way to reset the desc fifo is
+ * to do a hardware reset (either by pipe or the entire block).
+ * bam_chan_init_hw() will trigger a pipe reset, and also reinit the
+ * pipe. If the pipe is left disabled (default state after pipe reset)
+ * and is accessed by a connected hardware engine, a fatal error in
+ * the BAM will occur. There is a small window where this could happen
+ * with bam_chan_init_hw(), but it is assumed that the caller has
+ * stopped activity on any attached hardware engine. Make sure to do
+ * this first so that the BAM hardware doesn't cause memory corruption
+ * by accessing freed resources.
+ */
+ if (!list_empty(&bchan->desc_list)) {
+ async_desc = list_first_entry(&bchan->desc_list,
+ struct bam_async_desc, desc_node);
+ bam_chan_init_hw(bchan, async_desc->dir);
+ }
+
list_for_each_entry_safe(async_desc, tmp,
&bchan->desc_list, desc_node) {
list_add(&async_desc->vd.node, &bchan->vc.desc_issued);
#define SPRD_DMA_SRC_TRSF_STEP_OFFSET 0
#define SPRD_DMA_TRSF_STEP_MASK GENMASK(15, 0)
+/* SPRD DMA_SRC_BLK_STEP register definition */
+#define SPRD_DMA_LLIST_HIGH_MASK GENMASK(31, 28)
+#define SPRD_DMA_LLIST_HIGH_SHIFT 28
+
/* define DMA channel mode & trigger mode mask */
#define SPRD_DMA_CHN_MODE_MASK GENMASK(7, 0)
#define SPRD_DMA_TRG_MODE_MASK GENMASK(7, 0)
struct sprd_dma_chn channels[0];
};
+static void sprd_dma_free_desc(struct virt_dma_desc *vd);
static bool sprd_dma_filter_fn(struct dma_chan *chan, void *param);
static struct of_dma_filter_info sprd_dma_info = {
.filter_fn = sprd_dma_filter_fn,
static void sprd_dma_free_chan_resources(struct dma_chan *chan)
{
struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
+ struct virt_dma_desc *cur_vd = NULL;
unsigned long flags;
spin_lock_irqsave(&schan->vc.lock, flags);
+ if (schan->cur_desc)
+ cur_vd = &schan->cur_desc->vd;
+
sprd_dma_stop(schan);
spin_unlock_irqrestore(&schan->vc.lock, flags);
+ if (cur_vd)
+ sprd_dma_free_desc(cur_vd);
+
vchan_free_chan_resources(&schan->vc);
pm_runtime_put(chan->device->dev);
}
u32 int_mode = flags & SPRD_DMA_INT_MASK;
int src_datawidth, dst_datawidth, src_step, dst_step;
u32 temp, fix_mode = 0, fix_en = 0;
+ phys_addr_t llist_ptr;
if (dir == DMA_MEM_TO_DEV) {
src_step = sprd_dma_get_step(slave_cfg->src_addr_width);
* Set the link-list pointer point to next link-list
* configuration's physical address.
*/
- hw->llist_ptr = schan->linklist.phy_addr + temp;
+ llist_ptr = schan->linklist.phy_addr + temp;
+ hw->llist_ptr = lower_32_bits(llist_ptr);
+ hw->src_blk_step = (upper_32_bits(llist_ptr) << SPRD_DMA_LLIST_HIGH_SHIFT) &
+ SPRD_DMA_LLIST_HIGH_MASK;
} else {
hw->llist_ptr = 0;
+ hw->src_blk_step = 0;
}
hw->frg_step = 0;
- hw->src_blk_step = 0;
hw->des_blk_step = 0;
return 0;
}
static int sprd_dma_terminate_all(struct dma_chan *chan)
{
struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
+ struct virt_dma_desc *cur_vd = NULL;
unsigned long flags;
LIST_HEAD(head);
spin_lock_irqsave(&schan->vc.lock, flags);
+ if (schan->cur_desc)
+ cur_vd = &schan->cur_desc->vd;
+
sprd_dma_stop(schan);
vchan_get_all_descriptors(&schan->vc, &head);
spin_unlock_irqrestore(&schan->vc.lock, flags);
+ if (cur_vd)
+ sprd_dma_free_desc(cur_vd);
+
vchan_dma_desc_free_list(&schan->vc, &head);
return 0;
}
#define ADMA_CH_CONFIG_MAX_BURST_SIZE 16
#define ADMA_CH_CONFIG_WEIGHT_FOR_WRR(val) ((val) & 0xf)
#define ADMA_CH_CONFIG_MAX_BUFS 8
+#define TEGRA186_ADMA_CH_CONFIG_OUTSTANDING_REQS(reqs) (reqs << 4)
#define ADMA_CH_FIFO_CTRL 0x2c
#define TEGRA210_ADMA_CH_FIFO_CTRL_TXSIZE(val) (((val) & 0xf) << 8)
* @ch_req_tx_shift: Register offset for AHUB transmit channel select.
* @ch_req_rx_shift: Register offset for AHUB receive channel select.
* @ch_base_offset: Register offset of DMA channel registers.
+ * @has_outstanding_reqs: If DMA channel can have outstanding requests.
* @ch_fifo_ctrl: Default value for channel FIFO CTRL register.
* @ch_req_mask: Mask for Tx or Rx channel select.
* @ch_req_max: Maximum number of Tx or Rx channels available.
unsigned int ch_req_max;
unsigned int ch_reg_size;
unsigned int nr_channels;
+ bool has_outstanding_reqs;
};
/*
ADMA_CH_CTRL_FLOWCTRL_EN;
ch_regs->config |= cdata->adma_get_burst_config(burst_size);
ch_regs->config |= ADMA_CH_CONFIG_WEIGHT_FOR_WRR(1);
+ if (cdata->has_outstanding_reqs)
+ ch_regs->config |= TEGRA186_ADMA_CH_CONFIG_OUTSTANDING_REQS(8);
ch_regs->fifo_ctrl = cdata->ch_fifo_ctrl;
ch_regs->tc = desc->period_len & ADMA_CH_TC_COUNT_MASK;
.ch_req_tx_shift = 28,
.ch_req_rx_shift = 24,
.ch_base_offset = 0,
+ .has_outstanding_reqs = false,
.ch_fifo_ctrl = TEGRA210_FIFO_CTRL_DEFAULT,
.ch_req_mask = 0xf,
.ch_req_max = 10,
.ch_req_tx_shift = 27,
.ch_req_rx_shift = 22,
.ch_base_offset = 0x10000,
+ .has_outstanding_reqs = true,
.ch_fifo_ctrl = TEGRA186_FIFO_CTRL_DEFAULT,
.ch_req_mask = 0x1f,
.ch_req_max = 20,
enum dma_transfer_direction dir, unsigned long tx_flags, void *context)
{
struct cppi41_channel *c = to_cpp41_chan(chan);
+ struct dma_async_tx_descriptor *txd = NULL;
+ struct cppi41_dd *cdd = c->cdd;
struct cppi41_desc *d;
struct scatterlist *sg;
unsigned int i;
+ int error;
+
+ error = pm_runtime_get(cdd->ddev.dev);
+ if (error < 0) {
+ pm_runtime_put_noidle(cdd->ddev.dev);
+
+ return NULL;
+ }
+
+ if (cdd->is_suspended)
+ goto err_out_not_ready;
d = c->desc;
for_each_sg(sgl, sg, sg_len, i) {
d++;
}
- return &c->txd;
+ txd = &c->txd;
+
+err_out_not_ready:
+ pm_runtime_mark_last_busy(cdd->ddev.dev);
+ pm_runtime_put_autosuspend(cdd->ddev.dev);
+
+ return txd;
}
static void cppi41_compute_td_desc(struct cppi41_desc *d)
#define XILINX_DMA_DMACR_CIRC_EN BIT(1)
#define XILINX_DMA_DMACR_RUNSTOP BIT(0)
#define XILINX_DMA_DMACR_FSYNCSRC_MASK GENMASK(6, 5)
+#define XILINX_DMA_DMACR_DELAY_MASK GENMASK(31, 24)
+#define XILINX_DMA_DMACR_FRAME_COUNT_MASK GENMASK(23, 16)
+#define XILINX_DMA_DMACR_MASTER_MASK GENMASK(11, 8)
#define XILINX_DMA_REG_DMASR 0x0004
#define XILINX_DMA_DMASR_EOL_LATE_ERR BIT(15)
node);
hw = &segment->hw;
- xilinx_write(chan, XILINX_DMA_REG_SRCDSTADDR, hw->buf_addr);
+ xilinx_write(chan, XILINX_DMA_REG_SRCDSTADDR,
+ xilinx_prep_dma_addr_t(hw->buf_addr));
/* Start the transfer */
dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
chan->config.gen_lock = cfg->gen_lock;
chan->config.master = cfg->master;
+ dmacr &= ~XILINX_DMA_DMACR_GENLOCK_EN;
if (cfg->gen_lock && chan->genlock) {
dmacr |= XILINX_DMA_DMACR_GENLOCK_EN;
+ dmacr &= ~XILINX_DMA_DMACR_MASTER_MASK;
dmacr |= cfg->master << XILINX_DMA_DMACR_MASTER_SHIFT;
}
chan->config.delay = cfg->delay;
if (cfg->coalesc <= XILINX_DMA_DMACR_FRAME_COUNT_MAX) {
+ dmacr &= ~XILINX_DMA_DMACR_FRAME_COUNT_MASK;
dmacr |= cfg->coalesc << XILINX_DMA_DMACR_FRAME_COUNT_SHIFT;
chan->config.coalesc = cfg->coalesc;
}
if (cfg->delay <= XILINX_DMA_DMACR_DELAY_MAX) {
+ dmacr &= ~XILINX_DMA_DMACR_DELAY_MASK;
dmacr |= cfg->delay << XILINX_DMA_DMACR_DELAY_SHIFT;
chan->config.delay = cfg->delay;
}
if (!ghes_pvt)
return;
+ if (atomic_dec_return(&ghes_init))
+ return;
+
mci = ghes_pvt->mci;
+ ghes_pvt = NULL;
edac_mc_del_mc(mci->pdev);
edac_mc_free(mci);
}
config EFI_RCI2_TABLE
bool "EFI Runtime Configuration Interface Table Version 2 Support"
+ depends on X86 || COMPILE_TEST
help
Displays the content of the Runtime Configuration Interface
Table version 2 on Dell EMC PowerEdge systems as a binary
sizeof(*seed) + size);
if (seed != NULL) {
pr_notice("seeding entropy pool\n");
- add_device_randomness(seed->bits, seed->size);
+ add_bootloader_randomness(seed->bits, seed->size);
early_memunmap(seed, sizeof(*seed) + size);
} else {
pr_err("Could not map UEFI random seed!\n");
lib-$(CONFIG_ARM) += arm32-stub.o
lib-$(CONFIG_ARM64) += arm64-stub.o
+CFLAGS_arm32-stub.o := -DTEXT_OFFSET=$(TEXT_OFFSET)
CFLAGS_arm64-stub.o := -DTEXT_OFFSET=$(TEXT_OFFSET)
#
unsigned long dram_base,
efi_loaded_image_t *image)
{
+ unsigned long kernel_base;
efi_status_t status;
/*
* loaded. These assumptions are made by the decompressor,
* before any memory map is available.
*/
- dram_base = round_up(dram_base, SZ_128M);
+ kernel_base = round_up(dram_base, SZ_128M);
- status = reserve_kernel_base(sys_table, dram_base, reserve_addr,
+ /*
+ * Note that some platforms (notably, the Raspberry Pi 2) put
+ * spin-tables and other pieces of firmware at the base of RAM,
+ * abusing the fact that the window of TEXT_OFFSET bytes at the
+ * base of the kernel image is only partially used at the moment.
+ * (Up to 5 pages are used for the swapper page tables)
+ */
+ kernel_base += TEXT_OFFSET - 5 * PAGE_SIZE;
+
+ status = reserve_kernel_base(sys_table, kernel_base, reserve_addr,
reserve_size);
if (status != EFI_SUCCESS) {
pr_efi_err(sys_table, "Unable to allocate memory for uncompressed kernel.\n");
*image_size = image->image_size;
status = efi_relocate_kernel(sys_table, image_addr, *image_size,
*image_size,
- dram_base + MAX_UNCOMP_KERNEL_SIZE, 0);
+ kernel_base + MAX_UNCOMP_KERNEL_SIZE, 0, 0);
if (status != EFI_SUCCESS) {
pr_efi_err(sys_table, "Failed to relocate kernel.\n");
efi_free(sys_table, *reserve_size, *reserve_addr);
}
/*
- * Allocate at the lowest possible address.
+ * Allocate at the lowest possible address that is not below 'min'.
*/
-efi_status_t efi_low_alloc(efi_system_table_t *sys_table_arg,
- unsigned long size, unsigned long align,
- unsigned long *addr)
+efi_status_t efi_low_alloc_above(efi_system_table_t *sys_table_arg,
+ unsigned long size, unsigned long align,
+ unsigned long *addr, unsigned long min)
{
unsigned long map_size, desc_size, buff_size;
efi_memory_desc_t *map;
start = desc->phys_addr;
end = start + desc->num_pages * EFI_PAGE_SIZE;
- /*
- * Don't allocate at 0x0. It will confuse code that
- * checks pointers against NULL. Skip the first 8
- * bytes so we start at a nice even number.
- */
- if (start == 0x0)
- start += 8;
+ if (start < min)
+ start = min;
start = round_up(start, align);
if ((start + size) > end)
unsigned long image_size,
unsigned long alloc_size,
unsigned long preferred_addr,
- unsigned long alignment)
+ unsigned long alignment,
+ unsigned long min_addr)
{
unsigned long cur_image_addr;
unsigned long new_addr = 0;
* possible.
*/
if (status != EFI_SUCCESS) {
- status = efi_low_alloc(sys_table_arg, alloc_size, alignment,
- &new_addr);
+ status = efi_low_alloc_above(sys_table_arg, alloc_size,
+ alignment, &new_addr, min_addr);
}
if (status != EFI_SUCCESS) {
pr_efi_err(sys_table_arg, "Failed to allocate usable memory for kernel.\n");
#include <linux/init.h>
#include <linux/proc_fs.h>
#include <linux/efi.h>
+#include <linux/security.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
static int efi_test_open(struct inode *inode, struct file *file)
{
+ int ret = security_locked_down(LOCKDOWN_EFI_TEST);
+
+ if (ret)
+ return ret;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
/*
* nothing special to do here
* We do accept multiple open files at the same time as we
if (tbl_size < 0) {
pr_err(FW_BUG "Failed to parse event in TPM Final Events Log\n");
+ ret = -EINVAL;
goto out_calc;
}
return 0;
error_free:
- while (i--) {
+ for (i = 0; i < last_entry; ++i) {
+ struct amdgpu_bo *bo = ttm_to_amdgpu_bo(array[i].tv.bo);
+
+ amdgpu_bo_unref(&bo);
+ }
+ for (i = first_userptr; i < num_entries; ++i) {
struct amdgpu_bo *bo = ttm_to_amdgpu_bo(array[i].tv.bo);
amdgpu_bo_unref(&bo);
list_for_each_entry(lobj, validated, tv.head) {
struct amdgpu_bo *bo = ttm_to_amdgpu_bo(lobj->tv.bo);
- bool binding_userptr = false;
struct mm_struct *usermm;
usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm);
amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm,
lobj->user_pages);
- binding_userptr = true;
}
if (p->evictable == lobj)
if (r)
return r;
- if (binding_userptr) {
- kvfree(lobj->user_pages);
- lobj->user_pages = NULL;
- }
+ kvfree(lobj->user_pages);
+ lobj->user_pages = NULL;
}
return 0;
}
struct amdgpu_ring *ring = to_amdgpu_ring(sched_job->sched);
struct dma_fence *fence = NULL, *finished;
struct amdgpu_job *job;
- int r;
+ int r = 0;
job = to_amdgpu_job(sched_job);
finished = &job->base.s_fence->finished;
job->fence = dma_fence_get(fence);
amdgpu_job_free_resources(job);
+
+ fence = r ? ERR_PTR(r) : fence;
return fence;
}
.interruptible = (bp->type != ttm_bo_type_kernel),
.no_wait_gpu = false,
.resv = bp->resv,
- .flags = TTM_OPT_FLAG_ALLOW_RES_EVICT
+ .flags = bp->type != ttm_bo_type_kernel ?
+ TTM_OPT_FLAG_ALLOW_RES_EVICT : 0
};
struct amdgpu_bo *bo;
unsigned long page_align, size = bp->size;
* Open up a stream for HW test
*/
int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
+ struct amdgpu_bo *bo,
struct dma_fence **fence)
{
const unsigned ib_size_dw = 1024;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
struct dma_fence *f = NULL;
- uint64_t dummy;
+ uint64_t addr;
int i, r;
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
ib = &job->ibs[0];
- dummy = ib->gpu_addr + 1024;
+ addr = amdgpu_bo_gpu_offset(bo);
/* stitch together an VCE create msg */
ib->length_dw = 0;
ib->ptr[ib->length_dw++] = 0x00000014; /* len */
ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */
- ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
- ib->ptr[ib->length_dw++] = dummy;
+ ib->ptr[ib->length_dw++] = upper_32_bits(addr);
+ ib->ptr[ib->length_dw++] = addr;
ib->ptr[ib->length_dw++] = 0x00000001;
for (i = ib->length_dw; i < ib_size_dw; ++i)
int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct dma_fence *fence = NULL;
+ struct amdgpu_bo *bo = NULL;
long r;
/* skip vce ring1/2 ib test for now, since it's not reliable */
if (ring != &ring->adev->vce.ring[0])
return 0;
- r = amdgpu_vce_get_create_msg(ring, 1, NULL);
+ r = amdgpu_bo_create_reserved(ring->adev, 512, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &bo, NULL, NULL);
+ if (r)
+ return r;
+
+ r = amdgpu_vce_get_create_msg(ring, 1, bo, NULL);
if (r)
goto error;
error:
dma_fence_put(fence);
+ amdgpu_bo_unreserve(bo);
+ amdgpu_bo_unref(&bo);
return r;
}
int amdgpu_vce_suspend(struct amdgpu_device *adev);
int amdgpu_vce_resume(struct amdgpu_device *adev);
int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
+ struct amdgpu_bo *bo,
struct dma_fence **fence);
int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
bool direct, struct dma_fence **fence);
}
static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
- struct dma_fence **fence)
+ struct amdgpu_bo *bo,
+ struct dma_fence **fence)
{
const unsigned ib_size_dw = 16;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
struct dma_fence *f = NULL;
- uint64_t dummy;
+ uint64_t addr;
int i, r;
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
return r;
ib = &job->ibs[0];
- dummy = ib->gpu_addr + 1024;
+ addr = amdgpu_bo_gpu_offset(bo);
ib->length_dw = 0;
ib->ptr[ib->length_dw++] = 0x00000018;
ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
ib->ptr[ib->length_dw++] = handle;
- ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
- ib->ptr[ib->length_dw++] = dummy;
+ ib->ptr[ib->length_dw++] = upper_32_bits(addr);
+ ib->ptr[ib->length_dw++] = addr;
ib->ptr[ib->length_dw++] = 0x0000000b;
ib->ptr[ib->length_dw++] = 0x00000014;
}
static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
- struct dma_fence **fence)
+ struct amdgpu_bo *bo,
+ struct dma_fence **fence)
{
const unsigned ib_size_dw = 16;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
struct dma_fence *f = NULL;
- uint64_t dummy;
+ uint64_t addr;
int i, r;
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
return r;
ib = &job->ibs[0];
- dummy = ib->gpu_addr + 1024;
+ addr = amdgpu_bo_gpu_offset(bo);
ib->length_dw = 0;
ib->ptr[ib->length_dw++] = 0x00000018;
ib->ptr[ib->length_dw++] = 0x00000001;
ib->ptr[ib->length_dw++] = handle;
- ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
- ib->ptr[ib->length_dw++] = dummy;
+ ib->ptr[ib->length_dw++] = upper_32_bits(addr);
+ ib->ptr[ib->length_dw++] = addr;
ib->ptr[ib->length_dw++] = 0x0000000b;
ib->ptr[ib->length_dw++] = 0x00000014;
int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct dma_fence *fence = NULL;
+ struct amdgpu_bo *bo = NULL;
long r;
- r = amdgpu_vcn_enc_get_create_msg(ring, 1, NULL);
+ r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &bo, NULL, NULL);
+ if (r)
+ return r;
+
+ r = amdgpu_vcn_enc_get_create_msg(ring, 1, bo, NULL);
if (r)
goto error;
- r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, &fence);
+ r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, bo, &fence);
if (r)
goto error;
error:
dma_fence_put(fence);
+ amdgpu_bo_unreserve(bo);
+ amdgpu_bo_unref(&bo);
return r;
}
{
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_4, 0xffffffff, 0x00400014),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_CPF_CLK_CTRL, 0xfcff8fff, 0xf8000100),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CLK_CTRL, 0xc0000000, 0xc0000100),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CLK_CTRL, 0xcd000000, 0x0d000100),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQ_CLK_CTRL, 0x60000ff0, 0x60000100),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQG_CLK_CTRL, 0x40000000, 0x40000100),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_VGT_CLK_CTRL, 0xffff8fff, 0xffff8100),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_4, 0xffffffff, 0x003c0014),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_GS_NGG_CLK_CTRL, 0xffff8fff, 0xffff8100),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_IA_CLK_CTRL, 0xffff0fff, 0xffff0100),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CLK_CTRL, 0xc0000000, 0xc0000100),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CLK_CTRL, 0xcd000000, 0x0d000100),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQ_CLK_CTRL, 0xf8ff0fff, 0x60000100),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQG_CLK_CTRL, 0x40000ff0, 0x40000100),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_VGT_CLK_CTRL, 0xffff8fff, 0xffff8100),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_4, 0x003e001f, 0x003c0014),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_GS_NGG_CLK_CTRL, 0xffff8fff, 0xffff8100),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_IA_CLK_CTRL, 0xffff0fff, 0xffff0100),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CLK_CTRL, 0xff7f0fff, 0xc0000100),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CLK_CTRL, 0xff7f0fff, 0x0d000100),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQ_CLK_CTRL, 0xffffcfff, 0x60000100),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQG_CLK_CTRL, 0xffff0fff, 0x40000100),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_VGT_CLK_CTRL, 0xffff8fff, 0xffff8100),
WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL2, tmp);
tmp = mmGCVM_L2_CNTL3_DEFAULT;
+ if (adev->gmc.translate_further) {
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL3, BANK_SELECT, 12);
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL3,
+ L2_CACHE_BIGK_FRAGMENT_SIZE, 9);
+ } else {
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL3, BANK_SELECT, 9);
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL3,
+ L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
+ }
WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL3, tmp);
tmp = mmGCVM_L2_CNTL4_DEFAULT;
job->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gart.bo);
job->vm_needs_flush = true;
+ job->ibs->ptr[job->ibs->length_dw++] = ring->funcs->nop;
amdgpu_ring_pad_ib(ring, &job->ibs[0]);
r = amdgpu_job_submit(job, &adev->mman.entity,
AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL2, tmp);
tmp = mmMMVM_L2_CNTL3_DEFAULT;
+ if (adev->gmc.translate_further) {
+ tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3, BANK_SELECT, 12);
+ tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3,
+ L2_CACHE_BIGK_FRAGMENT_SIZE, 9);
+ } else {
+ tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3, BANK_SELECT, 9);
+ tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3,
+ L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
+ }
WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL3, tmp);
tmp = mmMMVM_L2_CNTL4_DEFAULT;
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x00000000)
};
static u32 sdma_v4_0_get_reg_offset(struct amdgpu_device *adev,
* Open up a stream for HW test
*/
static int uvd_v6_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
+ struct amdgpu_bo *bo,
struct dma_fence **fence)
{
const unsigned ib_size_dw = 16;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
struct dma_fence *f = NULL;
- uint64_t dummy;
+ uint64_t addr;
int i, r;
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
return r;
ib = &job->ibs[0];
- dummy = ib->gpu_addr + 1024;
+ addr = amdgpu_bo_gpu_offset(bo);
ib->length_dw = 0;
ib->ptr[ib->length_dw++] = 0x00000018;
ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
ib->ptr[ib->length_dw++] = handle;
ib->ptr[ib->length_dw++] = 0x00010000;
- ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
- ib->ptr[ib->length_dw++] = dummy;
+ ib->ptr[ib->length_dw++] = upper_32_bits(addr);
+ ib->ptr[ib->length_dw++] = addr;
ib->ptr[ib->length_dw++] = 0x00000014;
ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
*/
static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring,
uint32_t handle,
+ struct amdgpu_bo *bo,
struct dma_fence **fence)
{
const unsigned ib_size_dw = 16;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
struct dma_fence *f = NULL;
- uint64_t dummy;
+ uint64_t addr;
int i, r;
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
return r;
ib = &job->ibs[0];
- dummy = ib->gpu_addr + 1024;
+ addr = amdgpu_bo_gpu_offset(bo);
ib->length_dw = 0;
ib->ptr[ib->length_dw++] = 0x00000018;
ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
ib->ptr[ib->length_dw++] = handle;
ib->ptr[ib->length_dw++] = 0x00010000;
- ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
- ib->ptr[ib->length_dw++] = dummy;
+ ib->ptr[ib->length_dw++] = upper_32_bits(addr);
+ ib->ptr[ib->length_dw++] = addr;
ib->ptr[ib->length_dw++] = 0x00000014;
ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct dma_fence *fence = NULL;
+ struct amdgpu_bo *bo = NULL;
long r;
- r = uvd_v6_0_enc_get_create_msg(ring, 1, NULL);
+ r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &bo, NULL, NULL);
+ if (r)
+ return r;
+
+ r = uvd_v6_0_enc_get_create_msg(ring, 1, bo, NULL);
if (r)
goto error;
- r = uvd_v6_0_enc_get_destroy_msg(ring, 1, &fence);
+ r = uvd_v6_0_enc_get_destroy_msg(ring, 1, bo, &fence);
if (r)
goto error;
error:
dma_fence_put(fence);
+ amdgpu_bo_unreserve(bo);
+ amdgpu_bo_unref(&bo);
return r;
}
* Open up a stream for HW test
*/
static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
+ struct amdgpu_bo *bo,
struct dma_fence **fence)
{
const unsigned ib_size_dw = 16;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
struct dma_fence *f = NULL;
- uint64_t dummy;
+ uint64_t addr;
int i, r;
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
return r;
ib = &job->ibs[0];
- dummy = ib->gpu_addr + 1024;
+ addr = amdgpu_bo_gpu_offset(bo);
ib->length_dw = 0;
ib->ptr[ib->length_dw++] = 0x00000018;
ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
ib->ptr[ib->length_dw++] = handle;
ib->ptr[ib->length_dw++] = 0x00000000;
- ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
- ib->ptr[ib->length_dw++] = dummy;
+ ib->ptr[ib->length_dw++] = upper_32_bits(addr);
+ ib->ptr[ib->length_dw++] = addr;
ib->ptr[ib->length_dw++] = 0x00000014;
ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
* Close up a stream for HW test or if userspace failed to do so
*/
static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
- struct dma_fence **fence)
+ struct amdgpu_bo *bo,
+ struct dma_fence **fence)
{
const unsigned ib_size_dw = 16;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
struct dma_fence *f = NULL;
- uint64_t dummy;
+ uint64_t addr;
int i, r;
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
return r;
ib = &job->ibs[0];
- dummy = ib->gpu_addr + 1024;
+ addr = amdgpu_bo_gpu_offset(bo);
ib->length_dw = 0;
ib->ptr[ib->length_dw++] = 0x00000018;
ib->ptr[ib->length_dw++] = 0x00000001;
ib->ptr[ib->length_dw++] = handle;
ib->ptr[ib->length_dw++] = 0x00000000;
- ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
- ib->ptr[ib->length_dw++] = dummy;
+ ib->ptr[ib->length_dw++] = upper_32_bits(addr);
+ ib->ptr[ib->length_dw++] = addr;
ib->ptr[ib->length_dw++] = 0x00000014;
ib->ptr[ib->length_dw++] = 0x00000002;
static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct dma_fence *fence = NULL;
+ struct amdgpu_bo *bo = NULL;
long r;
- r = uvd_v7_0_enc_get_create_msg(ring, 1, NULL);
+ r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &bo, NULL, NULL);
+ if (r)
+ return r;
+
+ r = uvd_v7_0_enc_get_create_msg(ring, 1, bo, NULL);
if (r)
goto error;
- r = uvd_v7_0_enc_get_destroy_msg(ring, 1, &fence);
+ r = uvd_v7_0_enc_get_destroy_msg(ring, 1, bo, &fence);
if (r)
goto error;
error:
dma_fence_put(fence);
+ amdgpu_bo_unreserve(bo);
+ amdgpu_bo_unref(&bo);
return r;
}
# It calculates Bandwidth and Watermarks values for HW programming
#
-ifneq ($(call cc-option, -mpreferred-stack-boundary=4),)
- cc_stack_align := -mpreferred-stack-boundary=4
-else ifneq ($(call cc-option, -mstack-alignment=16),)
- cc_stack_align := -mstack-alignment=16
-endif
+calcs_ccflags := -mhard-float -msse
-calcs_ccflags := -mhard-float -msse $(cc_stack_align)
+ifdef CONFIG_CC_IS_GCC
+ifeq ($(call cc-ifversion, -lt, 0701, y), y)
+IS_OLD_GCC = 1
+endif
+endif
-ifdef CONFIG_CC_IS_CLANG
+ifdef IS_OLD_GCC
+# Stack alignment mismatch, proceed with caution.
+# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
+# (8B stack alignment).
+calcs_ccflags += -mpreferred-stack-boundary=4
+else
calcs_ccflags += -msse2
endif
#ifdef CONFIG_DRM_AMD_DC_DCN2_0
// Allocate memory for the vm_helper
dc->vm_helper = kzalloc(sizeof(struct vm_helper), GFP_KERNEL);
+ if (!dc->vm_helper) {
+ dm_error("%s: failed to create dc->vm_helper\n", __func__);
+ goto fail;
+ }
#endif
memcpy(&dc->bb_overrides, &init_params->bb_overrides, sizeof(dc->bb_overrides));
CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
COLOR_DEPTH_UNDEFINED);
+ /* This second call is needed to reconfigure the DIG
+ * as a workaround for the incorrect value being applied
+ * from transmitter control.
+ */
+ if (!dc_is_virtual_signal(pipe_ctx->stream->signal))
+ stream->link->link_enc->funcs->setup(
+ stream->link->link_enc,
+ pipe_ctx->stream->signal);
+
#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
if (pipe_ctx->stream->timing.flags.DSC) {
if (dc_is_dp_signal(pipe_ctx->stream->signal) ||
enum display_dongle_type *dongle = &sink_cap->dongle_type;
uint8_t type2_dongle_buf[DP_ADAPTOR_TYPE2_SIZE];
bool is_type2_dongle = false;
+ int retry_count = 2;
struct dp_hdmi_dongle_signature_data *dongle_signature;
/* Assume we have no valid DP passive dongle connected */
DP_HDMI_DONGLE_ADDRESS,
type2_dongle_buf,
sizeof(type2_dongle_buf))) {
- *dongle = DISPLAY_DONGLE_DP_DVI_DONGLE;
- sink_cap->max_hdmi_pixel_clock = DP_ADAPTOR_DVI_MAX_TMDS_CLK;
+ /* Passive HDMI dongles can sometimes fail here without retrying*/
+ while (retry_count > 0) {
+ if (i2c_read(ddc,
+ DP_HDMI_DONGLE_ADDRESS,
+ type2_dongle_buf,
+ sizeof(type2_dongle_buf)))
+ break;
+ retry_count--;
+ }
+ if (retry_count == 0) {
+ *dongle = DISPLAY_DONGLE_DP_DVI_DONGLE;
+ sink_cap->max_hdmi_pixel_clock = DP_ADAPTOR_DVI_MAX_TMDS_CLK;
- CONN_DATA_DETECT(ddc->link, type2_dongle_buf, sizeof(type2_dongle_buf),
- "DP-DVI passive dongle %dMhz: ",
- DP_ADAPTOR_DVI_MAX_TMDS_CLK / 1000);
- return;
+ CONN_DATA_DETECT(ddc->link, type2_dongle_buf, sizeof(type2_dongle_buf),
+ "DP-DVI passive dongle %dMhz: ",
+ DP_ADAPTOR_DVI_MAX_TMDS_CLK / 1000);
+ return;
+ }
}
/* Check if Type 2 dongle.*/
if (stream1->view_format != stream2->view_format)
return false;
+ if (stream1->ignore_msa_timing_param || stream2->ignore_msa_timing_param)
+ return false;
+
return true;
}
static bool is_dp_and_hdmi_sharable(
if (!are_stream_backends_same(old_stream, stream))
return false;
+ if (old_stream->ignore_msa_timing_param != stream->ignore_msa_timing_param)
+ return false;
+
return true;
}
rgb_resulted[hw_points - 1].green = output_tf->tf_pts.green[start_index];
rgb_resulted[hw_points - 1].blue = output_tf->tf_pts.blue[start_index];
+ rgb_resulted[hw_points].red = rgb_resulted[hw_points - 1].red;
+ rgb_resulted[hw_points].green = rgb_resulted[hw_points - 1].green;
+ rgb_resulted[hw_points].blue = rgb_resulted[hw_points - 1].blue;
+
// All 3 color channels have same x
corner_points[0].red.x = dc_fixpt_pow(dc_fixpt_from_int(2),
dc_fixpt_from_int(region_start));
i = 1;
while (i != hw_points + 1) {
- if (dc_fixpt_lt(rgb_plus_1->red, rgb->red))
- rgb_plus_1->red = rgb->red;
- if (dc_fixpt_lt(rgb_plus_1->green, rgb->green))
- rgb_plus_1->green = rgb->green;
- if (dc_fixpt_lt(rgb_plus_1->blue, rgb->blue))
- rgb_plus_1->blue = rgb->blue;
-
rgb->delta_red = dc_fixpt_sub(rgb_plus_1->red, rgb->red);
rgb->delta_green = dc_fixpt_sub(rgb_plus_1->green, rgb->green);
rgb->delta_blue = dc_fixpt_sub(rgb_plus_1->blue, rgb->blue);
rgb_resulted[hw_points - 1].green = output_tf->tf_pts.green[start_index];
rgb_resulted[hw_points - 1].blue = output_tf->tf_pts.blue[start_index];
+ rgb_resulted[hw_points].red = rgb_resulted[hw_points - 1].red;
+ rgb_resulted[hw_points].green = rgb_resulted[hw_points - 1].green;
+ rgb_resulted[hw_points].blue = rgb_resulted[hw_points - 1].blue;
+
corner_points[0].red.x = dc_fixpt_pow(dc_fixpt_from_int(2),
dc_fixpt_from_int(region_start));
corner_points[0].green.x = corner_points[0].red.x;
i = 1;
while (i != hw_points + 1) {
- if (dc_fixpt_lt(rgb_plus_1->red, rgb->red))
- rgb_plus_1->red = rgb->red;
- if (dc_fixpt_lt(rgb_plus_1->green, rgb->green))
- rgb_plus_1->green = rgb->green;
- if (dc_fixpt_lt(rgb_plus_1->blue, rgb->blue))
- rgb_plus_1->blue = rgb->blue;
-
rgb->delta_red = dc_fixpt_sub(rgb_plus_1->red, rgb->red);
rgb->delta_green = dc_fixpt_sub(rgb_plus_1->green, rgb->green);
rgb->delta_blue = dc_fixpt_sub(rgb_plus_1->blue, rgb->blue);
DCN20 += dcn20_dsc.o
endif
-ifneq ($(call cc-option, -mpreferred-stack-boundary=4),)
- cc_stack_align := -mpreferred-stack-boundary=4
-else ifneq ($(call cc-option, -mstack-alignment=16),)
- cc_stack_align := -mstack-alignment=16
-endif
+CFLAGS_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o := -mhard-float -msse
-CFLAGS_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o := -mhard-float -msse $(cc_stack_align)
+ifdef CONFIG_CC_IS_GCC
+ifeq ($(call cc-ifversion, -lt, 0701, y), y)
+IS_OLD_GCC = 1
+endif
+endif
-ifdef CONFIG_CC_IS_CLANG
+ifdef IS_OLD_GCC
+# Stack alignment mismatch, proceed with caution.
+# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
+# (8B stack alignment).
+CFLAGS_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o += -mpreferred-stack-boundary=4
+else
CFLAGS_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o += -msse2
endif
.num_audio = 6,
.num_stream_encoder = 5,
.num_pll = 5,
- .num_dwb = 0,
+ .num_dwb = 1,
.num_ddc = 5,
};
DCN21 = dcn21_hubp.o dcn21_hubbub.o dcn21_resource.o
-ifneq ($(call cc-option, -mpreferred-stack-boundary=4),)
- cc_stack_align := -mpreferred-stack-boundary=4
-else ifneq ($(call cc-option, -mstack-alignment=16),)
- cc_stack_align := -mstack-alignment=16
-endif
+CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o := -mhard-float -msse
-CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o := -mhard-float -msse $(cc_stack_align)
+ifdef CONFIG_CC_IS_GCC
+ifeq ($(call cc-ifversion, -lt, 0701, y), y)
+IS_OLD_GCC = 1
+endif
+endif
-ifdef CONFIG_CC_IS_CLANG
+ifdef IS_OLD_GCC
+# Stack alignment mismatch, proceed with caution.
+# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
+# (8B stack alignment).
+CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o += -mpreferred-stack-boundary=4
+else
CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o += -msse2
endif
# It provides the general basic services required by other DAL
# subcomponents.
-ifneq ($(call cc-option, -mpreferred-stack-boundary=4),)
- cc_stack_align := -mpreferred-stack-boundary=4
-else ifneq ($(call cc-option, -mstack-alignment=16),)
- cc_stack_align := -mstack-alignment=16
-endif
+dml_ccflags := -mhard-float -msse
-dml_ccflags := -mhard-float -msse $(cc_stack_align)
+ifdef CONFIG_CC_IS_GCC
+ifeq ($(call cc-ifversion, -lt, 0701, y), y)
+IS_OLD_GCC = 1
+endif
+endif
-ifdef CONFIG_CC_IS_CLANG
+ifdef IS_OLD_GCC
+# Stack alignment mismatch, proceed with caution.
+# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
+# (8B stack alignment).
+dml_ccflags += -mpreferred-stack-boundary=4
+else
dml_ccflags += -msse2
endif
mode_lib->vba.MinActiveDRAMClockChangeMargin
+ mode_lib->vba.DRAMClockChangeLatency;
- if (mode_lib->vba.MinActiveDRAMClockChangeMargin > 0) {
+ if (mode_lib->vba.MinActiveDRAMClockChangeMargin > 50) {
+ mode_lib->vba.DRAMClockChangeWatermark += 25;
mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vactive;
} else {
if (mode_lib->vba.SynchronizedVBlank || mode_lib->vba.NumberOfActivePlanes == 1) {
#
# Makefile for the 'dsc' sub-component of DAL.
-ifneq ($(call cc-option, -mpreferred-stack-boundary=4),)
- cc_stack_align := -mpreferred-stack-boundary=4
-else ifneq ($(call cc-option, -mstack-alignment=16),)
- cc_stack_align := -mstack-alignment=16
-endif
+dsc_ccflags := -mhard-float -msse
-dsc_ccflags := -mhard-float -msse $(cc_stack_align)
+ifdef CONFIG_CC_IS_GCC
+ifeq ($(call cc-ifversion, -lt, 0701, y), y)
+IS_OLD_GCC = 1
+endif
+endif
-ifdef CONFIG_CC_IS_CLANG
+ifdef IS_OLD_GCC
+# Stack alignment mismatch, proceed with caution.
+# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
+# (8B stack alignment).
+dsc_ccflags += -mpreferred-stack-boundary=4
+else
dsc_ccflags += -msse2
endif
if (type == PP_OD_EDIT_SCLK_VDDC_TABLE) {
podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_sclk;
- for (i = 0; i < podn_vdd_dep->count - 1; i++)
- od_vddc_lookup_table->entries[i].us_vdd = podn_vdd_dep->entries[i].vddc;
- if (od_vddc_lookup_table->entries[i].us_vdd < podn_vdd_dep->entries[i].vddc)
+ for (i = 0; i < podn_vdd_dep->count; i++)
od_vddc_lookup_table->entries[i].us_vdd = podn_vdd_dep->entries[i].vddc;
} else if (type == PP_OD_EDIT_MCLK_VDDC_TABLE) {
podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_mclk;
drm_atomic_helper_commit_modeset_disables(dev, old_state);
- drm_atomic_helper_commit_planes(dev, old_state, 0);
+ drm_atomic_helper_commit_planes(dev, old_state,
+ DRM_PLANE_COMMIT_ACTIVE_ONLY);
drm_atomic_helper_commit_modeset_enables(dev, old_state);
}
if (!in_range(&splitter->vsize, dflow->in_h)) {
- DRM_DEBUG_ATOMIC("split in_in: %d exceed the acceptable range.\n",
- dflow->in_w);
+ DRM_DEBUG_ATOMIC("split in_h: %d exceeds the acceptable range.\n",
+ dflow->in_h);
return -EINVAL;
}
etnaviv_cmdbuf_get_va(&submit->cmdbuf,
&gpu->mmu_context->cmdbuf_mapping));
+ mutex_unlock(&gpu->mmu_context->lock);
+
/* Reserve space for the bomap */
if (n_bomap_pages) {
bomap_start = bomap = iter.data;
obj->base.size);
}
- mutex_unlock(&gpu->mmu_context->lock);
-
etnaviv_core_dump_header(&iter, ETDUMP_BUF_END, iter.data);
dev_coredumpv(gpu->dev, iter.start, iter.data - iter.start, GFP_KERNEL);
memcpy(buf, v2_context->mtlb_cpu, SZ_4K);
buf += SZ_4K;
- for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++, buf += SZ_4K)
- if (v2_context->mtlb_cpu[i] & MMUv2_PTE_PRESENT)
+ for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++)
+ if (v2_context->mtlb_cpu[i] & MMUv2_PTE_PRESENT) {
memcpy(buf, v2_context->stlb_cpu[i], SZ_4K);
+ buf += SZ_4K;
+ }
}
static void etnaviv_iommuv2_restore_nonsec(struct etnaviv_gpu *gpu,
ret = etnaviv_cmdbuf_suballoc_map(suballoc, ctx, &ctx->cmdbuf_mapping,
global->memory_base);
- if (ret) {
- global->ops->free(ctx);
- return NULL;
+ if (ret)
+ goto out_free;
+
+ if (global->version == ETNAVIV_IOMMU_V1 &&
+ ctx->cmdbuf_mapping.iova > 0x80000000) {
+ dev_err(global->dev,
+ "command buffer outside valid memory window\n");
+ goto out_unmap;
}
return ctx;
+
+out_unmap:
+ etnaviv_cmdbuf_suballoc_unmap(ctx, &ctx->cmdbuf_mapping);
+out_free:
+ global->ops->free(ctx);
+ return NULL;
}
void etnaviv_iommu_restore(struct etnaviv_gpu *gpu,
static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
{
struct intel_encoder *encoder;
- bool pch_ssc_in_use = false;
bool has_fdi = false;
for_each_intel_encoder(&dev_priv->drm, encoder) {
* clock hierarchy. That would also allow us to do
* clock bending finally.
*/
+ dev_priv->pch_ssc_use = 0;
+
if (spll_uses_pch_ssc(dev_priv)) {
DRM_DEBUG_KMS("SPLL using PCH SSC\n");
- pch_ssc_in_use = true;
+ dev_priv->pch_ssc_use |= BIT(DPLL_ID_SPLL);
}
if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL1)) {
DRM_DEBUG_KMS("WRPLL1 using PCH SSC\n");
- pch_ssc_in_use = true;
+ dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL1);
}
if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL2)) {
DRM_DEBUG_KMS("WRPLL2 using PCH SSC\n");
- pch_ssc_in_use = true;
+ dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL2);
}
- if (pch_ssc_in_use)
+ if (dev_priv->pch_ssc_use)
return;
if (has_fdi) {
val = I915_READ(WRPLL_CTL(id));
I915_WRITE(WRPLL_CTL(id), val & ~WRPLL_PLL_ENABLE);
POSTING_READ(WRPLL_CTL(id));
+
+ /*
+ * Try to set up the PCH reference clock once all DPLLs
+ * that depend on it have been shut down.
+ */
+ if (dev_priv->pch_ssc_use & BIT(id))
+ intel_init_pch_refclk(dev_priv);
}
static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
+ enum intel_dpll_id id = pll->info->id;
u32 val;
val = I915_READ(SPLL_CTL);
I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE);
POSTING_READ(SPLL_CTL);
+
+ /*
+ * Try to set up the PCH reference clock once all DPLLs
+ * that depend on it have been shut down.
+ */
+ if (dev_priv->pch_ssc_use & BIT(id))
+ intel_init_pch_refclk(dev_priv);
}
static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
*/
DPLL_ID_ICL_MGPLL4 = 6,
/**
- * @DPLL_ID_TGL_TCPLL5: TGL TC PLL port 5 (TC5)
+ * @DPLL_ID_TGL_MGPLL5: TGL TC PLL port 5 (TC5)
*/
DPLL_ID_TGL_MGPLL5 = 7,
/**
- * @DPLL_ID_TGL_TCPLL6: TGL TC PLL port 6 (TC6)
+ * @DPLL_ID_TGL_MGPLL6: TGL TC PLL port 6 (TC6)
*/
DPLL_ID_TGL_MGPLL6 = 8,
};
struct work_struct idle_work;
} gem;
+ u8 pch_ssc_use;
+
/* For i945gm vblank irq vs. C3 workaround */
struct {
struct work_struct work;
return 0;
err_out2:
+ pm_runtime_disable(pfdev->dev);
panfrost_devfreq_fini(pfdev);
err_out1:
panfrost_device_fini(pfdev);
err_out0:
- pm_runtime_disable(pfdev->dev);
drm_dev_put(ddev);
return err;
}
return SZ_2M;
}
-void panfrost_mmu_flush_range(struct panfrost_device *pfdev,
- struct panfrost_mmu *mmu,
- u64 iova, size_t size)
+static void panfrost_mmu_flush_range(struct panfrost_device *pfdev,
+ struct panfrost_mmu *mmu,
+ u64 iova, size_t size)
{
if (mmu->as < 0)
return;
spin_lock(&pfdev->as_lock);
list_for_each_entry(mmu, &pfdev->as_lru_list, list) {
if (as == mmu->as)
- break;
+ goto found_mmu;
}
- if (as != mmu->as)
- goto out;
+ goto out;
+found_mmu:
priv = container_of(mmu, struct panfrost_file_priv, mmu);
spin_lock(&priv->mm_lock);
#define NUM_FAULT_PAGES (SZ_2M / PAGE_SIZE)
-int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, u64 addr)
+static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
+ u64 addr)
{
int ret, i;
struct panfrost_gem_object *bo;
#include "panfrost_issues.h"
#include "panfrost_job.h"
#include "panfrost_mmu.h"
+#include "panfrost_perfcnt.h"
#include "panfrost_regs.h"
#define COUNTERS_PER_BLOCK 64
static void
radeon_pci_shutdown(struct pci_dev *pdev)
{
+#ifdef CONFIG_PPC64
+ struct drm_device *ddev = pci_get_drvdata(pdev);
+#endif
+
/* if we are running in a VM, make sure the device
* torn down properly on reboot/shutdown
*/
if (radeon_device_is_virtual())
radeon_pci_remove(pdev);
+
+#ifdef CONFIG_PPC64
+ /* Some adapters need to be suspended before a
+ * shutdown occurs in order to prevent an error
+ * during kexec.
+ * Make this power specific becauase it breaks
+ * some non-power boards.
+ */
+ radeon_suspend_kms(ddev, true, true, false);
+#endif
}
static int radeon_pmops_suspend(struct device *dev)
struct drm_sched_job *s_job, *tmp;
uint64_t guilty_context;
bool found_guilty = false;
+ struct dma_fence *fence;
list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
struct drm_sched_fence *s_fence = s_job->s_fence;
dma_fence_set_error(&s_fence->finished, -ECANCELED);
dma_fence_put(s_job->s_fence->parent);
- s_job->s_fence->parent = sched->ops->run_job(s_job);
+ fence = sched->ops->run_job(s_job);
+
+ if (IS_ERR_OR_NULL(fence)) {
+ s_job->s_fence->parent = NULL;
+ dma_fence_set_error(&s_fence->finished, PTR_ERR(fence));
+ } else {
+ s_job->s_fence->parent = fence;
+ }
+
+
}
}
EXPORT_SYMBOL(drm_sched_resubmit_jobs);
fence = sched->ops->run_job(sched_job);
drm_sched_fence_scheduled(s_fence);
- if (fence) {
+ if (!IS_ERR_OR_NULL(fence)) {
s_fence->parent = dma_fence_get(fence);
r = dma_fence_add_callback(fence, &sched_job->cb,
drm_sched_process_job);
DRM_ERROR("fence add callback failed (%d)\n",
r);
dma_fence_put(fence);
- } else
+ } else {
+
+ dma_fence_set_error(&s_fence->finished, PTR_ERR(fence));
drm_sched_process_job(NULL, &sched_job->cb);
+ }
wake_up(&sched->job_scheduled);
}
if (args->bcl_start != args->bcl_end) {
bin = kcalloc(1, sizeof(*bin), GFP_KERNEL);
- if (!bin)
+ if (!bin) {
+ v3d_job_put(&render->base);
return -ENOMEM;
+ }
ret = v3d_job_init(v3d, file_priv, &bin->base,
v3d_job_free, args->in_sync_bcl);
if (ret) {
v3d_job_put(&render->base);
+ kfree(bin);
return ret;
}
{
struct axff_device *axff;
struct hid_report *report;
- struct hid_input *hidinput = list_first_entry(&hid->inputs, struct hid_input, list);
+ struct hid_input *hidinput;
struct list_head *report_list =&hid->report_enum[HID_OUTPUT_REPORT].report_list;
- struct input_dev *dev = hidinput->input;
+ struct input_dev *dev;
int field_count = 0;
int i, j;
int error;
+ if (list_empty(&hid->inputs)) {
+ hid_err(hid, "no inputs found\n");
+ return -ENODEV;
+ }
+ hidinput = list_first_entry(&hid->inputs, struct hid_input, list);
+ dev = hidinput->input;
+
if (list_empty(report_list)) {
hid_err(hid, "no output reports found\n");
return -ENODEV;
__u8 *start;
__u8 *buf;
__u8 *end;
+ __u8 *next;
int ret;
static int (*dispatch_type[])(struct hid_parser *parser,
struct hid_item *item) = {
device->collection_size = HID_DEFAULT_NUM_COLLECTIONS;
ret = -EINVAL;
- while ((start = fetch_item(start, end, &item)) != NULL) {
+ while ((next = fetch_item(start, end, &item)) != NULL) {
+ start = next;
if (item.format != HID_ITEM_FORMAT_SHORT) {
hid_err(device, "unexpected long global item\n");
}
}
- hid_err(device, "item fetching failed at offset %d\n", (int)(end - start));
+ hid_err(device, "item fetching failed at offset %u/%u\n",
+ size - (unsigned int)(end - start), size);
err:
kfree(parser->collection_stack);
alloc_err:
{
struct drff_device *drff;
struct hid_report *report;
- struct hid_input *hidinput = list_first_entry(&hid->inputs,
- struct hid_input, list);
+ struct hid_input *hidinput;
struct list_head *report_list =
&hid->report_enum[HID_OUTPUT_REPORT].report_list;
- struct input_dev *dev = hidinput->input;
+ struct input_dev *dev;
int error;
+ if (list_empty(&hid->inputs)) {
+ hid_err(hid, "no inputs found\n");
+ return -ENODEV;
+ }
+ hidinput = list_first_entry(&hid->inputs, struct hid_input, list);
+ dev = hidinput->input;
+
if (list_empty(report_list)) {
hid_err(hid, "no output reports found\n");
return -ENODEV;
{
struct emsff_device *emsff;
struct hid_report *report;
- struct hid_input *hidinput = list_first_entry(&hid->inputs,
- struct hid_input, list);
+ struct hid_input *hidinput;
struct list_head *report_list =
&hid->report_enum[HID_OUTPUT_REPORT].report_list;
- struct input_dev *dev = hidinput->input;
+ struct input_dev *dev;
int error;
+ if (list_empty(&hid->inputs)) {
+ hid_err(hid, "no inputs found\n");
+ return -ENODEV;
+ }
+ hidinput = list_first_entry(&hid->inputs, struct hid_input, list);
+ dev = hidinput->input;
+
if (list_empty(report_list)) {
hid_err(hid, "no output reports found\n");
return -ENODEV;
{
struct gaff_device *gaff;
struct hid_report *report;
- struct hid_input *hidinput = list_entry(hid->inputs.next,
- struct hid_input, list);
+ struct hid_input *hidinput;
struct list_head *report_list =
&hid->report_enum[HID_OUTPUT_REPORT].report_list;
struct list_head *report_ptr = report_list;
- struct input_dev *dev = hidinput->input;
+ struct input_dev *dev;
int error;
+ if (list_empty(&hid->inputs)) {
+ hid_err(hid, "no inputs found\n");
+ return -ENODEV;
+ }
+ hidinput = list_entry(hid->inputs.next, struct hid_input, list);
+ dev = hidinput->input;
+
if (list_empty(report_list)) {
hid_err(hid, "no output reports found\n");
return -ENODEV;
{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_HAMMER) },
{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
+ USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_MAGNEMITE) },
+ { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
+ USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_MASTERBALL) },
+ { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_STAFF) },
{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_WAND) },
{
struct holtekff_device *holtekff;
struct hid_report *report;
- struct hid_input *hidinput = list_entry(hid->inputs.next,
- struct hid_input, list);
+ struct hid_input *hidinput;
struct list_head *report_list =
&hid->report_enum[HID_OUTPUT_REPORT].report_list;
- struct input_dev *dev = hidinput->input;
+ struct input_dev *dev;
int error;
+ if (list_empty(&hid->inputs)) {
+ hid_err(hid, "no inputs found\n");
+ return -ENODEV;
+ }
+ hidinput = list_entry(hid->inputs.next, struct hid_input, list);
+ dev = hidinput->input;
+
if (list_empty(report_list)) {
hid_err(hid, "no output report found\n");
return -ENODEV;
#define USB_DEVICE_ID_GOOGLE_STAFF 0x502b
#define USB_DEVICE_ID_GOOGLE_WAND 0x502d
#define USB_DEVICE_ID_GOOGLE_WHISKERS 0x5030
+#define USB_DEVICE_ID_GOOGLE_MASTERBALL 0x503c
+#define USB_DEVICE_ID_GOOGLE_MAGNEMITE 0x503d
#define USB_VENDOR_ID_GOTOP 0x08f2
#define USB_DEVICE_ID_SUPER_Q2 0x007f
{
struct lg2ff_device *lg2ff;
struct hid_report *report;
- struct hid_input *hidinput = list_entry(hid->inputs.next,
- struct hid_input, list);
- struct input_dev *dev = hidinput->input;
+ struct hid_input *hidinput;
+ struct input_dev *dev;
int error;
+ if (list_empty(&hid->inputs)) {
+ hid_err(hid, "no inputs found\n");
+ return -ENODEV;
+ }
+ hidinput = list_entry(hid->inputs.next, struct hid_input, list);
+ dev = hidinput->input;
+
/* Check that the report looks ok */
report = hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 7);
if (!report)
int lg3ff_init(struct hid_device *hid)
{
- struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
- struct input_dev *dev = hidinput->input;
+ struct hid_input *hidinput;
+ struct input_dev *dev;
const signed short *ff_bits = ff3_joystick_ac;
int error;
int i;
+ if (list_empty(&hid->inputs)) {
+ hid_err(hid, "no inputs found\n");
+ return -ENODEV;
+ }
+ hidinput = list_entry(hid->inputs.next, struct hid_input, list);
+ dev = hidinput->input;
+
/* Check that the report looks ok */
if (!hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 35))
return -ENODEV;
int lg4ff_init(struct hid_device *hid)
{
- struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
- struct input_dev *dev = hidinput->input;
+ struct hid_input *hidinput;
+ struct input_dev *dev;
struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
struct hid_report *report = list_entry(report_list->next, struct hid_report, list);
const struct usb_device_descriptor *udesc = &(hid_to_usb_dev(hid)->descriptor);
int mmode_ret, mmode_idx = -1;
u16 real_product_id;
+ if (list_empty(&hid->inputs)) {
+ hid_err(hid, "no inputs found\n");
+ return -ENODEV;
+ }
+ hidinput = list_entry(hid->inputs.next, struct hid_input, list);
+ dev = hidinput->input;
+
/* Check that the report looks ok */
if (!hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 7))
return -1;
int lgff_init(struct hid_device* hid)
{
- struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
- struct input_dev *dev = hidinput->input;
+ struct hid_input *hidinput;
+ struct input_dev *dev;
const signed short *ff_bits = ff_joystick;
int error;
int i;
+ if (list_empty(&hid->inputs)) {
+ hid_err(hid, "no inputs found\n");
+ return -ENODEV;
+ }
+ hidinput = list_entry(hid->inputs.next, struct hid_input, list);
+ dev = hidinput->input;
+
/* Check that the report looks ok */
if (!hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 7))
return -ENODEV;
#define HIDPP_FF_EFFECTID_NONE -1
#define HIDPP_FF_EFFECTID_AUTOCENTER -2
+#define HIDPP_AUTOCENTER_PARAMS_LENGTH 18
#define HIDPP_FF_MAX_PARAMS 20
#define HIDPP_FF_RESERVED_SLOTS 1
static void hidpp_ff_set_autocenter(struct input_dev *dev, u16 magnitude)
{
struct hidpp_ff_private_data *data = dev->ff->private;
- u8 params[18];
+ u8 params[HIDPP_AUTOCENTER_PARAMS_LENGTH];
dbg_hid("Setting autocenter to %d.\n", magnitude);
static void hidpp_ff_destroy(struct ff_device *ff)
{
struct hidpp_ff_private_data *data = ff->private;
+ struct hid_device *hid = data->hidpp->hid_dev;
+ hid_info(hid, "Unloading HID++ force feedback.\n");
+
+ device_remove_file(&hid->dev, &dev_attr_range);
+ destroy_workqueue(data->wq);
kfree(data->effect_ids);
}
-static int hidpp_ff_init(struct hidpp_device *hidpp, u8 feature_index)
+static int hidpp_ff_init(struct hidpp_device *hidpp,
+ struct hidpp_ff_private_data *data)
{
struct hid_device *hid = hidpp->hid_dev;
- struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
- struct input_dev *dev = hidinput->input;
+ struct hid_input *hidinput;
+ struct input_dev *dev;
const struct usb_device_descriptor *udesc = &(hid_to_usb_dev(hid)->descriptor);
const u16 bcdDevice = le16_to_cpu(udesc->bcdDevice);
struct ff_device *ff;
- struct hidpp_report response;
- struct hidpp_ff_private_data *data;
- int error, j, num_slots;
+ int error, j, num_slots = data->num_effects;
u8 version;
+ if (list_empty(&hid->inputs)) {
+ hid_err(hid, "no inputs found\n");
+ return -ENODEV;
+ }
+ hidinput = list_entry(hid->inputs.next, struct hid_input, list);
+ dev = hidinput->input;
+
if (!dev) {
hid_err(hid, "Struct input_dev not set!\n");
return -EINVAL;
for (j = 0; hidpp_ff_effects_v2[j] >= 0; j++)
set_bit(hidpp_ff_effects_v2[j], dev->ffbit);
- /* Read number of slots available in device */
- error = hidpp_send_fap_command_sync(hidpp, feature_index,
- HIDPP_FF_GET_INFO, NULL, 0, &response);
- if (error) {
- if (error < 0)
- return error;
- hid_err(hidpp->hid_dev, "%s: received protocol error 0x%02x\n",
- __func__, error);
- return -EPROTO;
- }
-
- num_slots = response.fap.params[0] - HIDPP_FF_RESERVED_SLOTS;
-
error = input_ff_create(dev, num_slots);
if (error) {
hid_err(dev, "Failed to create FF device!\n");
return error;
}
-
- data = kzalloc(sizeof(*data), GFP_KERNEL);
+ /*
+ * Create a copy of passed data, so we can transfer memory
+ * ownership to FF core
+ */
+ data = kmemdup(data, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
data->effect_ids = kcalloc(num_slots, sizeof(int), GFP_KERNEL);
}
data->hidpp = hidpp;
- data->feature_index = feature_index;
data->version = version;
- data->slot_autocenter = 0;
- data->num_effects = num_slots;
for (j = 0; j < num_slots; j++)
data->effect_ids[j] = -1;
ff->set_autocenter = hidpp_ff_set_autocenter;
ff->destroy = hidpp_ff_destroy;
-
- /* reset all forces */
- error = hidpp_send_fap_command_sync(hidpp, feature_index,
- HIDPP_FF_RESET_ALL, NULL, 0, &response);
-
- /* Read current Range */
- error = hidpp_send_fap_command_sync(hidpp, feature_index,
- HIDPP_FF_GET_APERTURE, NULL, 0, &response);
- if (error)
- hid_warn(hidpp->hid_dev, "Failed to read range from device!\n");
- data->range = error ? 900 : get_unaligned_be16(&response.fap.params[0]);
-
/* Create sysfs interface */
error = device_create_file(&(hidpp->hid_dev->dev), &dev_attr_range);
if (error)
hid_warn(hidpp->hid_dev, "Unable to create sysfs interface for \"range\", errno %d!\n", error);
- /* Read the current gain values */
- error = hidpp_send_fap_command_sync(hidpp, feature_index,
- HIDPP_FF_GET_GLOBAL_GAINS, NULL, 0, &response);
- if (error)
- hid_warn(hidpp->hid_dev, "Failed to read gain values from device!\n");
- data->gain = error ? 0xffff : get_unaligned_be16(&response.fap.params[0]);
- /* ignore boost value at response.fap.params[2] */
-
/* init the hardware command queue */
atomic_set(&data->workqueue_size, 0);
- /* initialize with zero autocenter to get wheel in usable state */
- hidpp_ff_set_autocenter(dev, 0);
-
hid_info(hid, "Force feedback support loaded (firmware release %d).\n",
version);
return 0;
}
-static int hidpp_ff_deinit(struct hid_device *hid)
-{
- struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
- struct input_dev *dev = hidinput->input;
- struct hidpp_ff_private_data *data;
-
- if (!dev) {
- hid_err(hid, "Struct input_dev not found!\n");
- return -EINVAL;
- }
-
- hid_info(hid, "Unloading HID++ force feedback.\n");
- data = dev->ff->private;
- if (!data) {
- hid_err(hid, "Private data not found!\n");
- return -EINVAL;
- }
-
- destroy_workqueue(data->wq);
- device_remove_file(&hid->dev, &dev_attr_range);
-
- return 0;
-}
-
-
/* ************************************************************************** */
/* */
/* Device Support */
#define HIDPP_PAGE_G920_FORCE_FEEDBACK 0x8123
-static int g920_get_config(struct hidpp_device *hidpp)
+static int g920_ff_set_autocenter(struct hidpp_device *hidpp,
+ struct hidpp_ff_private_data *data)
{
+ struct hidpp_report response;
+ u8 params[HIDPP_AUTOCENTER_PARAMS_LENGTH] = {
+ [1] = HIDPP_FF_EFFECT_SPRING | HIDPP_FF_EFFECT_AUTOSTART,
+ };
+ int ret;
+
+ /* initialize with zero autocenter to get wheel in usable state */
+
+ dbg_hid("Setting autocenter to 0.\n");
+ ret = hidpp_send_fap_command_sync(hidpp, data->feature_index,
+ HIDPP_FF_DOWNLOAD_EFFECT,
+ params, ARRAY_SIZE(params),
+ &response);
+ if (ret)
+ hid_warn(hidpp->hid_dev, "Failed to autocenter device!\n");
+ else
+ data->slot_autocenter = response.fap.params[0];
+
+ return ret;
+}
+
+static int g920_get_config(struct hidpp_device *hidpp,
+ struct hidpp_ff_private_data *data)
+{
+ struct hidpp_report response;
u8 feature_type;
- u8 feature_index;
int ret;
+ memset(data, 0, sizeof(*data));
+
/* Find feature and store for later use */
ret = hidpp_root_get_feature(hidpp, HIDPP_PAGE_G920_FORCE_FEEDBACK,
- &feature_index, &feature_type);
+ &data->feature_index, &feature_type);
if (ret)
return ret;
- ret = hidpp_ff_init(hidpp, feature_index);
+ /* Read number of slots available in device */
+ ret = hidpp_send_fap_command_sync(hidpp, data->feature_index,
+ HIDPP_FF_GET_INFO,
+ NULL, 0,
+ &response);
+ if (ret) {
+ if (ret < 0)
+ return ret;
+ hid_err(hidpp->hid_dev,
+ "%s: received protocol error 0x%02x\n", __func__, ret);
+ return -EPROTO;
+ }
+
+ data->num_effects = response.fap.params[0] - HIDPP_FF_RESERVED_SLOTS;
+
+ /* reset all forces */
+ ret = hidpp_send_fap_command_sync(hidpp, data->feature_index,
+ HIDPP_FF_RESET_ALL,
+ NULL, 0,
+ &response);
if (ret)
- hid_warn(hidpp->hid_dev, "Unable to initialize force feedback support, errno %d\n",
- ret);
+ hid_warn(hidpp->hid_dev, "Failed to reset all forces!\n");
- return 0;
+ ret = hidpp_send_fap_command_sync(hidpp, data->feature_index,
+ HIDPP_FF_GET_APERTURE,
+ NULL, 0,
+ &response);
+ if (ret) {
+ hid_warn(hidpp->hid_dev,
+ "Failed to read range from device!\n");
+ }
+ data->range = ret ?
+ 900 : get_unaligned_be16(&response.fap.params[0]);
+
+ /* Read the current gain values */
+ ret = hidpp_send_fap_command_sync(hidpp, data->feature_index,
+ HIDPP_FF_GET_GLOBAL_GAINS,
+ NULL, 0,
+ &response);
+ if (ret)
+ hid_warn(hidpp->hid_dev,
+ "Failed to read gain values from device!\n");
+ data->gain = ret ?
+ 0xffff : get_unaligned_be16(&response.fap.params[0]);
+
+ /* ignore boost value at response.fap.params[2] */
+
+ return g920_ff_set_autocenter(hidpp, data);
}
/* -------------------------------------------------------------------------- */
return report->field[0]->report_count + 1;
}
-static bool hidpp_validate_report(struct hid_device *hdev, int id,
- int expected_length, bool optional)
+static bool hidpp_validate_device(struct hid_device *hdev)
{
- int report_length;
+ struct hidpp_device *hidpp = hid_get_drvdata(hdev);
+ int id, report_length, supported_reports = 0;
- if (id >= HID_MAX_IDS || id < 0) {
- hid_err(hdev, "invalid HID report id %u\n", id);
- return false;
+ id = REPORT_ID_HIDPP_SHORT;
+ report_length = hidpp_get_report_length(hdev, id);
+ if (report_length) {
+ if (report_length < HIDPP_REPORT_SHORT_LENGTH)
+ goto bad_device;
+
+ supported_reports++;
}
+ id = REPORT_ID_HIDPP_LONG;
report_length = hidpp_get_report_length(hdev, id);
- if (!report_length)
- return optional;
+ if (report_length) {
+ if (report_length < HIDPP_REPORT_LONG_LENGTH)
+ goto bad_device;
- if (report_length < expected_length) {
- hid_warn(hdev, "not enough values in hidpp report %d\n", id);
- return false;
+ supported_reports++;
}
- return true;
-}
+ id = REPORT_ID_HIDPP_VERY_LONG;
+ report_length = hidpp_get_report_length(hdev, id);
+ if (report_length) {
+ if (report_length < HIDPP_REPORT_LONG_LENGTH ||
+ report_length > HIDPP_REPORT_VERY_LONG_MAX_LENGTH)
+ goto bad_device;
-static bool hidpp_validate_device(struct hid_device *hdev)
-{
- return hidpp_validate_report(hdev, REPORT_ID_HIDPP_SHORT,
- HIDPP_REPORT_SHORT_LENGTH, false) &&
- hidpp_validate_report(hdev, REPORT_ID_HIDPP_LONG,
- HIDPP_REPORT_LONG_LENGTH, true);
+ supported_reports++;
+ hidpp->very_long_report_length = report_length;
+ }
+
+ return supported_reports;
+
+bad_device:
+ hid_warn(hdev, "not enough values in hidpp report %d\n", id);
+ return false;
}
static bool hidpp_application_equals(struct hid_device *hdev,
int ret;
bool connected;
unsigned int connect_mask = HID_CONNECT_DEFAULT;
+ struct hidpp_ff_private_data data;
/* report_fixup needs drvdata to be set before we call hid_parse */
hidpp = devm_kzalloc(&hdev->dev, sizeof(*hidpp), GFP_KERNEL);
return hid_hw_start(hdev, HID_CONNECT_DEFAULT);
}
- hidpp->very_long_report_length =
- hidpp_get_report_length(hdev, REPORT_ID_HIDPP_VERY_LONG);
- if (hidpp->very_long_report_length > HIDPP_REPORT_VERY_LONG_MAX_LENGTH)
- hidpp->very_long_report_length = HIDPP_REPORT_VERY_LONG_MAX_LENGTH;
-
if (id->group == HID_GROUP_LOGITECH_DJ_DEVICE)
hidpp->quirks |= HIDPP_QUIRK_UNIFYING;
if (ret)
goto hid_hw_init_fail;
} else if (connected && (hidpp->quirks & HIDPP_QUIRK_CLASS_G920)) {
- ret = g920_get_config(hidpp);
+ ret = g920_get_config(hidpp, &data);
if (ret)
goto hid_hw_init_fail;
}
goto hid_hw_start_fail;
}
+ if (hidpp->quirks & HIDPP_QUIRK_CLASS_G920) {
+ ret = hidpp_ff_init(hidpp, &data);
+ if (ret)
+ hid_warn(hidpp->hid_dev,
+ "Unable to initialize force feedback support, errno %d\n",
+ ret);
+ }
+
return ret;
hid_hw_init_fail:
sysfs_remove_group(&hdev->dev.kobj, &ps_attribute_group);
- if (hidpp->quirks & HIDPP_QUIRK_CLASS_G920)
- hidpp_ff_deinit(hdev);
-
hid_hw_stop(hdev);
cancel_work_sync(&hidpp->work);
mutex_destroy(&hidpp->send_mutex);
static int ms_init_ff(struct hid_device *hdev)
{
- struct hid_input *hidinput = list_entry(hdev->inputs.next,
- struct hid_input, list);
- struct input_dev *input_dev = hidinput->input;
+ struct hid_input *hidinput;
+ struct input_dev *input_dev;
struct ms_data *ms = hid_get_drvdata(hdev);
+ if (list_empty(&hdev->inputs)) {
+ hid_err(hdev, "no inputs found\n");
+ return -ENODEV;
+ }
+ hidinput = list_entry(hdev->inputs.next, struct hid_input, list);
+ input_dev = hidinput->input;
+
if (!(ms->quirks & MS_QUIRK_FF))
return 0;
MY PICTURES => KEY_WORDPROCESSOR
MY MUSIC=> KEY_SPREADSHEET
*/
- unsigned int keys[] = {
+ static const unsigned int keys[] = {
KEY_FN,
KEY_MESSENGER, KEY_CALENDAR,
KEY_ADDRESSBOOK, KEY_DOCUMENTS,
0
};
- unsigned int *pkeys = &keys[0];
+ const unsigned int *pkeys = &keys[0];
unsigned short i;
if (pm->ifnum != 1) /* only set up ONCE for interace 1 */
static int sony_init_ff(struct sony_sc *sc)
{
- struct hid_input *hidinput = list_entry(sc->hdev->inputs.next,
- struct hid_input, list);
- struct input_dev *input_dev = hidinput->input;
+ struct hid_input *hidinput;
+ struct input_dev *input_dev;
+
+ if (list_empty(&sc->hdev->inputs)) {
+ hid_err(sc->hdev, "no inputs found\n");
+ return -ENODEV;
+ }
+ hidinput = list_entry(sc->hdev->inputs.next, struct hid_input, list);
+ input_dev = hidinput->input;
input_set_capability(input_dev, EV_FF, FF_RUMBLE);
return input_ff_create_memless(input_dev, NULL, sony_play_effect);
struct tmff_device *tmff;
struct hid_report *report;
struct list_head *report_list;
- struct hid_input *hidinput = list_entry(hid->inputs.next,
- struct hid_input, list);
- struct input_dev *input_dev = hidinput->input;
+ struct hid_input *hidinput;
+ struct input_dev *input_dev;
int error;
int i;
+ if (list_empty(&hid->inputs)) {
+ hid_err(hid, "no inputs found\n");
+ return -ENODEV;
+ }
+ hidinput = list_entry(hid->inputs.next, struct hid_input, list);
+ input_dev = hidinput->input;
+
tmff = kzalloc(sizeof(struct tmff_device), GFP_KERNEL);
if (!tmff)
return -ENOMEM;
{
struct zpff_device *zpff;
struct hid_report *report;
- struct hid_input *hidinput = list_entry(hid->inputs.next,
- struct hid_input, list);
- struct input_dev *dev = hidinput->input;
+ struct hid_input *hidinput;
+ struct input_dev *dev;
int i, error;
+ if (list_empty(&hid->inputs)) {
+ hid_err(hid, "no inputs found\n");
+ return -ENODEV;
+ }
+ hidinput = list_entry(hid->inputs.next, struct hid_input, list);
+ dev = hidinput->input;
+
for (i = 0; i < 4; i++) {
report = hid_validate_values(hid, HID_OUTPUT_REPORT, 0, i, 1);
if (!report)
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/pm.h>
-#include <linux/pm_runtime.h>
#include <linux/device.h>
#include <linux/wait.h>
#include <linux/err.h>
/* quirks to control the device */
#define I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV BIT(0)
#define I2C_HID_QUIRK_NO_IRQ_AFTER_RESET BIT(1)
-#define I2C_HID_QUIRK_NO_RUNTIME_PM BIT(2)
-#define I2C_HID_QUIRK_DELAY_AFTER_SLEEP BIT(3)
#define I2C_HID_QUIRK_BOGUS_IRQ BIT(4)
/* flags */
{ USB_VENDOR_ID_WEIDA, HID_ANY_ID,
I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV },
{ I2C_VENDOR_ID_HANTICK, I2C_PRODUCT_ID_HANTICK_5288,
- I2C_HID_QUIRK_NO_IRQ_AFTER_RESET |
- I2C_HID_QUIRK_NO_RUNTIME_PM },
- { I2C_VENDOR_ID_RAYDIUM, I2C_PRODUCT_ID_RAYDIUM_4B33,
- I2C_HID_QUIRK_DELAY_AFTER_SLEEP },
- { USB_VENDOR_ID_LG, I2C_DEVICE_ID_LG_8001,
- I2C_HID_QUIRK_NO_RUNTIME_PM },
- { I2C_VENDOR_ID_GOODIX, I2C_DEVICE_ID_GOODIX_01F0,
- I2C_HID_QUIRK_NO_RUNTIME_PM },
+ I2C_HID_QUIRK_NO_IRQ_AFTER_RESET },
{ USB_VENDOR_ID_ELAN, HID_ANY_ID,
I2C_HID_QUIRK_BOGUS_IRQ },
{ 0, 0 }
{
struct i2c_hid *ihid = i2c_get_clientdata(client);
int ret;
- unsigned long now, delay;
i2c_hid_dbg(ihid, "%s\n", __func__);
goto set_pwr_exit;
}
- if (ihid->quirks & I2C_HID_QUIRK_DELAY_AFTER_SLEEP &&
- power_state == I2C_HID_PWR_ON) {
- now = jiffies;
- if (time_after(ihid->sleep_delay, now)) {
- delay = jiffies_to_usecs(ihid->sleep_delay - now);
- usleep_range(delay, delay + 1);
- }
- }
-
ret = __i2c_hid_command(client, &hid_set_power_cmd, power_state,
0, NULL, 0, NULL, 0);
- if (ihid->quirks & I2C_HID_QUIRK_DELAY_AFTER_SLEEP &&
- power_state == I2C_HID_PWR_SLEEP)
- ihid->sleep_delay = jiffies + msecs_to_jiffies(20);
-
if (ret)
dev_err(&client->dev, "failed to change power setting.\n");
{
struct i2c_client *client = hid->driver_data;
struct i2c_hid *ihid = i2c_get_clientdata(client);
- int ret = 0;
-
- ret = pm_runtime_get_sync(&client->dev);
- if (ret < 0)
- return ret;
set_bit(I2C_HID_STARTED, &ihid->flags);
return 0;
struct i2c_hid *ihid = i2c_get_clientdata(client);
clear_bit(I2C_HID_STARTED, &ihid->flags);
-
- /* Save some power */
- pm_runtime_put(&client->dev);
-}
-
-static int i2c_hid_power(struct hid_device *hid, int lvl)
-{
- struct i2c_client *client = hid->driver_data;
- struct i2c_hid *ihid = i2c_get_clientdata(client);
-
- i2c_hid_dbg(ihid, "%s lvl:%d\n", __func__, lvl);
-
- switch (lvl) {
- case PM_HINT_FULLON:
- pm_runtime_get_sync(&client->dev);
- break;
- case PM_HINT_NORMAL:
- pm_runtime_put(&client->dev);
- break;
- }
- return 0;
}
struct hid_ll_driver i2c_hid_ll_driver = {
.stop = i2c_hid_stop,
.open = i2c_hid_open,
.close = i2c_hid_close,
- .power = i2c_hid_power,
.output_report = i2c_hid_output_report,
.raw_request = i2c_hid_raw_request,
};
i2c_hid_acpi_fix_up_power(&client->dev);
- pm_runtime_get_noresume(&client->dev);
- pm_runtime_set_active(&client->dev);
- pm_runtime_enable(&client->dev);
device_enable_async_suspend(&client->dev);
/* Make sure there is something at this address */
if (ret < 0) {
dev_dbg(&client->dev, "nothing at this address: %d\n", ret);
ret = -ENXIO;
- goto err_pm;
+ goto err_regulator;
}
ret = i2c_hid_fetch_hid_descriptor(ihid);
if (ret < 0)
- goto err_pm;
+ goto err_regulator;
ret = i2c_hid_init_irq(client);
if (ret < 0)
- goto err_pm;
+ goto err_regulator;
hid = hid_allocate_device();
if (IS_ERR(hid)) {
goto err_mem_free;
}
- if (!(ihid->quirks & I2C_HID_QUIRK_NO_RUNTIME_PM))
- pm_runtime_put(&client->dev);
-
return 0;
err_mem_free:
err_irq:
free_irq(client->irq, ihid);
-err_pm:
- pm_runtime_put_noidle(&client->dev);
- pm_runtime_disable(&client->dev);
-
err_regulator:
regulator_bulk_disable(ARRAY_SIZE(ihid->pdata.supplies),
ihid->pdata.supplies);
struct i2c_hid *ihid = i2c_get_clientdata(client);
struct hid_device *hid;
- if (!(ihid->quirks & I2C_HID_QUIRK_NO_RUNTIME_PM))
- pm_runtime_get_sync(&client->dev);
- pm_runtime_disable(&client->dev);
- pm_runtime_set_suspended(&client->dev);
- pm_runtime_put_noidle(&client->dev);
-
hid = ihid->hid;
hid_destroy_device(hid);
int wake_status;
if (hid->driver && hid->driver->suspend) {
- /*
- * Wake up the device so that IO issues in
- * HID driver's suspend code can succeed.
- */
- ret = pm_runtime_resume(dev);
- if (ret < 0)
- return ret;
-
ret = hid->driver->suspend(hid, PMSG_SUSPEND);
if (ret < 0)
return ret;
}
- if (!pm_runtime_suspended(dev)) {
- /* Save some power */
- i2c_hid_set_power(client, I2C_HID_PWR_SLEEP);
+ /* Save some power */
+ i2c_hid_set_power(client, I2C_HID_PWR_SLEEP);
- disable_irq(client->irq);
- }
+ disable_irq(client->irq);
if (device_may_wakeup(&client->dev)) {
wake_status = enable_irq_wake(client->irq);
wake_status);
}
- /* We'll resume to full power */
- pm_runtime_disable(dev);
- pm_runtime_set_active(dev);
- pm_runtime_enable(dev);
-
enable_irq(client->irq);
/* Instead of resetting device, simply powers the device on. This
}
#endif
-#ifdef CONFIG_PM
-static int i2c_hid_runtime_suspend(struct device *dev)
-{
- struct i2c_client *client = to_i2c_client(dev);
-
- i2c_hid_set_power(client, I2C_HID_PWR_SLEEP);
- disable_irq(client->irq);
- return 0;
-}
-
-static int i2c_hid_runtime_resume(struct device *dev)
-{
- struct i2c_client *client = to_i2c_client(dev);
-
- enable_irq(client->irq);
- i2c_hid_set_power(client, I2C_HID_PWR_ON);
- return 0;
-}
-#endif
-
static const struct dev_pm_ops i2c_hid_pm = {
SET_SYSTEM_SLEEP_PM_OPS(i2c_hid_suspend, i2c_hid_resume)
- SET_RUNTIME_PM_OPS(i2c_hid_runtime_suspend, i2c_hid_runtime_resume,
- NULL)
};
static const struct i2c_device_id i2c_hid_id_table[] = {
.driver_data = (void *)&sipodev_desc
},
{
+ /*
+ * There are at least 2 Primebook C11B versions, the older
+ * version has a product-name of "Primebook C11B", and a
+ * bios version / release / firmware revision of:
+ * V2.1.2 / 05/03/2018 / 18.2
+ * The new version has "PRIMEBOOK C11B" as product-name and a
+ * bios version / release / firmware revision of:
+ * CFALKSW05_BIOS_V1.1.2 / 11/19/2018 / 19.2
+ * Only the older version needs this quirk, note the newer
+ * version will not match as it has a different product-name.
+ */
+ .ident = "Trekstor Primebook C11B",
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "TREKSTOR"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Primebook C11B"),
+ },
+ .driver_data = (void *)&sipodev_desc
+ },
+ {
.ident = "Direkt-Tek DTLAPY116-2",
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Direkt-Tek"),
return 0;
out:
dev_err(&cl->device->dev, "error in allocating Tx pool\n");
- ishtp_cl_free_rx_ring(cl);
+ ishtp_cl_free_tx_ring(cl);
return -ENOMEM;
}
#define ASPEED_I2CD_S_TX_CMD BIT(2)
#define ASPEED_I2CD_M_TX_CMD BIT(1)
#define ASPEED_I2CD_M_START_CMD BIT(0)
+#define ASPEED_I2CD_MASTER_CMDS_MASK \
+ (ASPEED_I2CD_M_STOP_CMD | \
+ ASPEED_I2CD_M_S_RX_CMD_LAST | \
+ ASPEED_I2CD_M_RX_CMD | \
+ ASPEED_I2CD_M_TX_CMD | \
+ ASPEED_I2CD_M_START_CMD)
/* 0x18 : I2CD Slave Device Address Register */
#define ASPEED_I2CD_DEV_ADDR_MASK GENMASK(6, 0)
struct i2c_msg *msg = &bus->msgs[bus->msgs_index];
u8 slave_addr = i2c_8bit_addr_from_msg(msg);
- bus->master_state = ASPEED_I2C_MASTER_START;
-
#if IS_ENABLED(CONFIG_I2C_SLAVE)
/*
* If it's requested in the middle of a slave session, set the master
* state to 'pending' then H/W will continue handling this master
* command when the bus comes back to the idle state.
*/
- if (bus->slave_state != ASPEED_I2C_SLAVE_INACTIVE)
+ if (bus->slave_state != ASPEED_I2C_SLAVE_INACTIVE) {
bus->master_state = ASPEED_I2C_MASTER_PENDING;
+ return;
+ }
#endif /* CONFIG_I2C_SLAVE */
+ bus->master_state = ASPEED_I2C_MASTER_START;
bus->buf_index = 0;
if (msg->flags & I2C_M_RD) {
}
}
-#if IS_ENABLED(CONFIG_I2C_SLAVE)
- /*
- * A pending master command will be started by H/W when the bus comes
- * back to idle state after completing a slave operation so change the
- * master state from 'pending' to 'start' at here if slave is inactive.
- */
- if (bus->master_state == ASPEED_I2C_MASTER_PENDING) {
- if (bus->slave_state != ASPEED_I2C_SLAVE_INACTIVE)
- goto out_no_complete;
-
- bus->master_state = ASPEED_I2C_MASTER_START;
- }
-#endif /* CONFIG_I2C_SLAVE */
-
/* Master is not currently active, irq was for someone else. */
if (bus->master_state == ASPEED_I2C_MASTER_INACTIVE ||
bus->master_state == ASPEED_I2C_MASTER_PENDING)
#if IS_ENABLED(CONFIG_I2C_SLAVE)
/*
* If a peer master starts a xfer immediately after it queues a
- * master command, change its state to 'pending' then H/W will
- * continue the queued master xfer just after completing the
- * slave mode session.
+ * master command, clear the queued master command and change
+ * its state to 'pending'. To simplify handling of pending
+ * cases, it uses S/W solution instead of H/W command queue
+ * handling.
*/
if (unlikely(irq_status & ASPEED_I2CD_INTR_SLAVE_MATCH)) {
+ writel(readl(bus->base + ASPEED_I2C_CMD_REG) &
+ ~ASPEED_I2CD_MASTER_CMDS_MASK,
+ bus->base + ASPEED_I2C_CMD_REG);
bus->master_state = ASPEED_I2C_MASTER_PENDING;
dev_dbg(bus->dev,
"master goes pending due to a slave start\n");
irq_handled |= aspeed_i2c_master_irq(bus,
irq_remaining);
}
+
+ /*
+ * Start a pending master command at here if a slave operation is
+ * completed.
+ */
+ if (bus->master_state == ASPEED_I2C_MASTER_PENDING &&
+ bus->slave_state == ASPEED_I2C_SLAVE_INACTIVE)
+ aspeed_i2c_do_start(bus);
#else
irq_handled = aspeed_i2c_master_irq(bus, irq_remaining);
#endif /* CONFIG_I2C_SLAVE */
ASPEED_I2CD_BUS_BUSY_STS))
aspeed_i2c_recover_bus(bus);
+ /*
+ * If timed out and the state is still pending, drop the pending
+ * master command.
+ */
+ spin_lock_irqsave(&bus->lock, flags);
+ if (bus->master_state == ASPEED_I2C_MASTER_PENDING)
+ bus->master_state = ASPEED_I2C_MASTER_INACTIVE;
+ spin_unlock_irqrestore(&bus->lock, flags);
+
return -ETIMEDOUT;
}
static u32 mtk_i2c_functionality(struct i2c_adapter *adap)
{
- if (adap->quirks->flags & I2C_AQ_NO_ZERO_LEN)
+ if (i2c_check_quirks(adap, I2C_AQ_NO_ZERO_LEN))
return I2C_FUNC_I2C |
(I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK);
else
struct regmap *regmap;
};
-/**
+/*
* All these values are coming from I2C Specification, Version 6.0, 4th of
* April 2014.
*
STM32F7_I2C_CR1_TXIE;
stm32f7_i2c_set_bits(base + STM32F7_I2C_CR1, mask);
+ /* Write 1st data byte */
+ writel_relaxed(value, base + STM32F7_I2C_TXDR);
} else {
/* Notify i2c slave that new write transfer is starting */
i2c_slave_event(slave, I2C_SLAVE_WRITE_REQUESTED, &value);
void __iomem *base = i2c_dev->base;
struct device *dev = i2c_dev->dev;
struct stm32_i2c_dma *dma = i2c_dev->dma;
- u32 mask, status;
+ u32 status;
status = readl_relaxed(i2c_dev->base + STM32F7_I2C_ISR);
f7_msg->result = -EINVAL;
}
- /* Disable interrupts */
- if (stm32f7_i2c_is_slave_registered(i2c_dev))
- mask = STM32F7_I2C_XFER_IRQ_MASK;
- else
- mask = STM32F7_I2C_ALL_IRQ_MASK;
- stm32f7_i2c_disable_irq(i2c_dev, mask);
+ if (!i2c_dev->slave_running) {
+ u32 mask;
+ /* Disable interrupts */
+ if (stm32f7_i2c_is_slave_registered(i2c_dev))
+ mask = STM32F7_I2C_XFER_IRQ_MASK;
+ else
+ mask = STM32F7_I2C_ALL_IRQ_MASK;
+ stm32f7_i2c_disable_irq(i2c_dev, mask);
+ }
/* Disable dma */
if (i2c_dev->use_dma) {
int ib_sa_init(void);
void ib_sa_cleanup(void);
+void rdma_nl_init(void);
void rdma_nl_exit(void);
int ib_nl_handle_resolve_resp(struct sk_buff *skb,
goto err_comp_unbound;
}
+ rdma_nl_init();
+
ret = addr_init();
if (ret) {
pr_warn("Could't init IB address resolution\n");
static void destroy_cm_id(struct iw_cm_id *cm_id)
{
struct iwcm_id_private *cm_id_priv;
+ struct ib_qp *qp;
unsigned long flags;
cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
set_bit(IWCM_F_DROP_EVENTS, &cm_id_priv->flags);
spin_lock_irqsave(&cm_id_priv->lock, flags);
+ qp = cm_id_priv->qp;
+ cm_id_priv->qp = NULL;
+
switch (cm_id_priv->state) {
case IW_CM_STATE_LISTEN:
cm_id_priv->state = IW_CM_STATE_DESTROYING;
cm_id_priv->state = IW_CM_STATE_DESTROYING;
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
/* Abrupt close of the connection */
- (void)iwcm_modify_qp_err(cm_id_priv->qp);
+ (void)iwcm_modify_qp_err(qp);
spin_lock_irqsave(&cm_id_priv->lock, flags);
break;
case IW_CM_STATE_IDLE:
BUG();
break;
}
- if (cm_id_priv->qp) {
- cm_id_priv->id.device->ops.iw_rem_ref(cm_id_priv->qp);
- cm_id_priv->qp = NULL;
- }
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ if (qp)
+ cm_id_priv->id.device->ops.iw_rem_ref(qp);
if (cm_id->mapped) {
iwpm_remove_mapinfo(&cm_id->local_addr, &cm_id->m_local_addr);
BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_RECV);
cm_id_priv->state = IW_CM_STATE_IDLE;
spin_lock_irqsave(&cm_id_priv->lock, flags);
- if (cm_id_priv->qp) {
- cm_id->device->ops.iw_rem_ref(qp);
- cm_id_priv->qp = NULL;
- }
+ qp = cm_id_priv->qp;
+ cm_id_priv->qp = NULL;
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ if (qp)
+ cm_id->device->ops.iw_rem_ref(qp);
clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
wake_up_all(&cm_id_priv->connect_wait);
}
struct iwcm_id_private *cm_id_priv;
int ret;
unsigned long flags;
- struct ib_qp *qp;
+ struct ib_qp *qp = NULL;
cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
return 0; /* success */
spin_lock_irqsave(&cm_id_priv->lock, flags);
- if (cm_id_priv->qp) {
- cm_id->device->ops.iw_rem_ref(qp);
- cm_id_priv->qp = NULL;
- }
+ qp = cm_id_priv->qp;
+ cm_id_priv->qp = NULL;
cm_id_priv->state = IW_CM_STATE_IDLE;
err:
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ if (qp)
+ cm_id->device->ops.iw_rem_ref(qp);
clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
wake_up_all(&cm_id_priv->connect_wait);
return ret;
static int cm_conn_rep_handler(struct iwcm_id_private *cm_id_priv,
struct iw_cm_event *iw_event)
{
+ struct ib_qp *qp = NULL;
unsigned long flags;
int ret;
cm_id_priv->state = IW_CM_STATE_ESTABLISHED;
} else {
/* REJECTED or RESET */
- cm_id_priv->id.device->ops.iw_rem_ref(cm_id_priv->qp);
+ qp = cm_id_priv->qp;
cm_id_priv->qp = NULL;
cm_id_priv->state = IW_CM_STATE_IDLE;
}
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ if (qp)
+ cm_id_priv->id.device->ops.iw_rem_ref(qp);
ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event);
if (iw_event->private_data_len)
static int cm_close_handler(struct iwcm_id_private *cm_id_priv,
struct iw_cm_event *iw_event)
{
+ struct ib_qp *qp;
unsigned long flags;
- int ret = 0;
+ int ret = 0, notify_event = 0;
spin_lock_irqsave(&cm_id_priv->lock, flags);
+ qp = cm_id_priv->qp;
+ cm_id_priv->qp = NULL;
- if (cm_id_priv->qp) {
- cm_id_priv->id.device->ops.iw_rem_ref(cm_id_priv->qp);
- cm_id_priv->qp = NULL;
- }
switch (cm_id_priv->state) {
case IW_CM_STATE_ESTABLISHED:
case IW_CM_STATE_CLOSING:
cm_id_priv->state = IW_CM_STATE_IDLE;
- spin_unlock_irqrestore(&cm_id_priv->lock, flags);
- ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event);
- spin_lock_irqsave(&cm_id_priv->lock, flags);
+ notify_event = 1;
break;
case IW_CM_STATE_DESTROYING:
break;
}
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ if (qp)
+ cm_id_priv->id.device->ops.iw_rem_ref(qp);
+ if (notify_event)
+ ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event);
return ret;
}
#include <linux/module.h>
#include "core_priv.h"
-static DEFINE_MUTEX(rdma_nl_mutex);
static struct {
- const struct rdma_nl_cbs *cb_table;
+ const struct rdma_nl_cbs *cb_table;
+ /* Synchronizes between ongoing netlink commands and netlink client
+ * unregistration.
+ */
+ struct rw_semaphore sem;
} rdma_nl_types[RDMA_NL_NUM_CLIENTS];
bool rdma_nl_chk_listeners(unsigned int group)
return (op < max_num_ops[type]) ? true : false;
}
-static bool
-is_nl_valid(const struct sk_buff *skb, unsigned int type, unsigned int op)
+static const struct rdma_nl_cbs *
+get_cb_table(const struct sk_buff *skb, unsigned int type, unsigned int op)
{
const struct rdma_nl_cbs *cb_table;
- if (!is_nl_msg_valid(type, op))
- return false;
-
/*
* Currently only NLDEV client is supporting netlink commands in
* non init_net net namespace.
*/
if (sock_net(skb->sk) != &init_net && type != RDMA_NL_NLDEV)
- return false;
+ return NULL;
- if (!rdma_nl_types[type].cb_table) {
- mutex_unlock(&rdma_nl_mutex);
- request_module("rdma-netlink-subsys-%d", type);
- mutex_lock(&rdma_nl_mutex);
- }
+ cb_table = READ_ONCE(rdma_nl_types[type].cb_table);
+ if (!cb_table) {
+ /*
+ * Didn't get valid reference of the table, attempt module
+ * load once.
+ */
+ up_read(&rdma_nl_types[type].sem);
- cb_table = rdma_nl_types[type].cb_table;
+ request_module("rdma-netlink-subsys-%d", type);
+ down_read(&rdma_nl_types[type].sem);
+ cb_table = READ_ONCE(rdma_nl_types[type].cb_table);
+ }
if (!cb_table || (!cb_table[op].dump && !cb_table[op].doit))
- return false;
- return true;
+ return NULL;
+ return cb_table;
}
void rdma_nl_register(unsigned int index,
const struct rdma_nl_cbs cb_table[])
{
- mutex_lock(&rdma_nl_mutex);
- if (!is_nl_msg_valid(index, 0)) {
- /*
- * All clients are not interesting in success/failure of
- * this call. They want to see the print to error log and
- * continue their initialization. Print warning for them,
- * because it is programmer's error to be here.
- */
- mutex_unlock(&rdma_nl_mutex);
- WARN(true,
- "The not-valid %u index was supplied to RDMA netlink\n",
- index);
+ if (WARN_ON(!is_nl_msg_valid(index, 0)) ||
+ WARN_ON(READ_ONCE(rdma_nl_types[index].cb_table)))
return;
- }
-
- if (rdma_nl_types[index].cb_table) {
- mutex_unlock(&rdma_nl_mutex);
- WARN(true,
- "The %u index is already registered in RDMA netlink\n",
- index);
- return;
- }
- rdma_nl_types[index].cb_table = cb_table;
- mutex_unlock(&rdma_nl_mutex);
+ /* Pairs with the READ_ONCE in is_nl_valid() */
+ smp_store_release(&rdma_nl_types[index].cb_table, cb_table);
}
EXPORT_SYMBOL(rdma_nl_register);
void rdma_nl_unregister(unsigned int index)
{
- mutex_lock(&rdma_nl_mutex);
+ down_write(&rdma_nl_types[index].sem);
rdma_nl_types[index].cb_table = NULL;
- mutex_unlock(&rdma_nl_mutex);
+ up_write(&rdma_nl_types[index].sem);
}
EXPORT_SYMBOL(rdma_nl_unregister);
unsigned int index = RDMA_NL_GET_CLIENT(type);
unsigned int op = RDMA_NL_GET_OP(type);
const struct rdma_nl_cbs *cb_table;
+ int err = -EINVAL;
- if (!is_nl_valid(skb, index, op))
+ if (!is_nl_msg_valid(index, op))
return -EINVAL;
- cb_table = rdma_nl_types[index].cb_table;
+ down_read(&rdma_nl_types[index].sem);
+ cb_table = get_cb_table(skb, index, op);
+ if (!cb_table)
+ goto done;
if ((cb_table[op].flags & RDMA_NL_ADMIN_PERM) &&
- !netlink_capable(skb, CAP_NET_ADMIN))
- return -EPERM;
+ !netlink_capable(skb, CAP_NET_ADMIN)) {
+ err = -EPERM;
+ goto done;
+ }
/*
* LS responses overload the 0x100 (NLM_F_ROOT) flag. Don't
*/
if (index == RDMA_NL_LS) {
if (cb_table[op].doit)
- return cb_table[op].doit(skb, nlh, extack);
- return -EINVAL;
+ err = cb_table[op].doit(skb, nlh, extack);
+ goto done;
}
/* FIXME: Convert IWCM to properly handle doit callbacks */
if ((nlh->nlmsg_flags & NLM_F_DUMP) || index == RDMA_NL_IWCM) {
.dump = cb_table[op].dump,
};
if (c.dump)
- return netlink_dump_start(skb->sk, skb, nlh, &c);
- return -EINVAL;
+ err = netlink_dump_start(skb->sk, skb, nlh, &c);
+ goto done;
}
if (cb_table[op].doit)
- return cb_table[op].doit(skb, nlh, extack);
-
- return 0;
+ err = cb_table[op].doit(skb, nlh, extack);
+done:
+ up_read(&rdma_nl_types[index].sem);
+ return err;
}
/*
static void rdma_nl_rcv(struct sk_buff *skb)
{
- mutex_lock(&rdma_nl_mutex);
rdma_nl_rcv_skb(skb, &rdma_nl_rcv_msg);
- mutex_unlock(&rdma_nl_mutex);
}
int rdma_nl_unicast(struct net *net, struct sk_buff *skb, u32 pid)
}
EXPORT_SYMBOL(rdma_nl_multicast);
+void rdma_nl_init(void)
+{
+ int idx;
+
+ for (idx = 0; idx < RDMA_NL_NUM_CLIENTS; idx++)
+ init_rwsem(&rdma_nl_types[idx].sem);
+}
+
void rdma_nl_exit(void)
{
int idx;
container_of(res, struct rdma_counter, res);
if (port && port != counter->port)
- return 0;
+ return -EAGAIN;
/* Dump it even query failed */
rdma_counter_query_stats(counter);
struct ib_uverbs_device {
atomic_t refcount;
- int num_comp_vectors;
+ u32 num_comp_vectors;
struct completion comp;
struct device dev;
/* First group for device attributes, NULL terminated array */
void *context)
{
struct find_gid_index_context *ctx = context;
+ u16 vlan_id = 0xffff;
+ int ret;
if (ctx->gid_type != gid_attr->gid_type)
return false;
- if ((!!(ctx->vlan_id != 0xffff) == !is_vlan_dev(gid_attr->ndev)) ||
- (is_vlan_dev(gid_attr->ndev) &&
- vlan_dev_vlan_id(gid_attr->ndev) != ctx->vlan_id))
+ ret = rdma_read_gid_l2_fields(gid_attr, &vlan_id, NULL);
+ if (ret)
return false;
- return true;
+ return ctx->vlan_id == vlan_id;
}
static const struct ib_gid_attr *
ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *)));
release_ep_resources(ep);
- kfree_skb(skb);
return 0;
}
ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *)));
c4iw_put_ep(&ep->parent_ep->com);
release_ep_resources(ep);
- kfree_skb(skb);
return 0;
}
enum chip_type adapter_type = ep->com.dev->rdev.lldi.adapter_type;
pr_debug("ep %p tid %u\n", ep, ep->hwtid);
-
- skb_get(skb);
- rpl = cplhdr(skb);
- if (!is_t4(adapter_type)) {
- skb_trim(skb, roundup(sizeof(*rpl5), 16));
- rpl5 = (void *)rpl;
- INIT_TP_WR(rpl5, ep->hwtid);
- } else {
- skb_trim(skb, sizeof(*rpl));
- INIT_TP_WR(rpl, ep->hwtid);
- }
- OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
- ep->hwtid));
-
cxgb_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
enable_tcp_timestamps && req->tcpopt.tstamp,
(ep->com.remote_addr.ss_family == AF_INET) ? 0 : 1);
if (tcph->ece && tcph->cwr)
opt2 |= CCTRL_ECN_V(1);
}
+
+ skb_get(skb);
+ rpl = cplhdr(skb);
+ if (!is_t4(adapter_type)) {
+ skb_trim(skb, roundup(sizeof(*rpl5), 16));
+ rpl5 = (void *)rpl;
+ INIT_TP_WR(rpl5, ep->hwtid);
+ } else {
+ skb_trim(skb, sizeof(*rpl));
+ INIT_TP_WR(rpl, ep->hwtid);
+ }
+ OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
+ ep->hwtid));
+
if (CHELSIO_CHIP_VERSION(adapter_type) > CHELSIO_T4) {
u32 isn = (prandom_u32() & ~7UL) - 1;
opt2 |= T5_OPT_2_VALID_F;
#define SDMA_DESCQ_CNT 2048
#define SDMA_DESC_INTR 64
#define INVALID_TAIL 0xffff
+#define SDMA_PAD max_t(size_t, MAX_16B_PADDING, sizeof(u32))
static uint sdma_descq_cnt = SDMA_DESCQ_CNT;
module_param(sdma_descq_cnt, uint, S_IRUGO);
struct sdma_engine *sde;
if (dd->sdma_pad_dma) {
- dma_free_coherent(&dd->pcidev->dev, 4,
+ dma_free_coherent(&dd->pcidev->dev, SDMA_PAD,
(void *)dd->sdma_pad_dma,
dd->sdma_pad_phys);
dd->sdma_pad_dma = NULL;
}
/* Allocate memory for pad */
- dd->sdma_pad_dma = dma_alloc_coherent(&dd->pcidev->dev, sizeof(u32),
+ dd->sdma_pad_dma = dma_alloc_coherent(&dd->pcidev->dev, SDMA_PAD,
&dd->sdma_pad_phys, GFP_KERNEL);
if (!dd->sdma_pad_dma) {
dd_dev_err(dd, "failed to allocate SendDMA pad memory\n");
diff = cmp_psn(psn,
flow->flow_state.r_next_psn);
if (diff > 0) {
- if (!(qp->r_flags & RVT_R_RDMAR_SEQ))
- restart_tid_rdma_read_req(rcd,
- qp,
- wqe);
-
/* Drop the packet.*/
goto s_unlock;
} else if (diff < 0) {
/* Length of buffer to create verbs txreq cache name */
#define TXREQ_NAME_LEN 24
-/* 16B trailing buffer */
-static const u8 trail_buf[MAX_16B_PADDING];
-
static uint wss_threshold = 80;
module_param(wss_threshold, uint, S_IRUGO);
MODULE_PARM_DESC(wss_threshold, "Percentage (1-100) of LLC to use as a threshold for a cacheless copy");
/* add icrc, lt byte, and padding to flit */
if (extra_bytes)
- ret = sdma_txadd_kvaddr(sde->dd, &tx->txreq,
- (void *)trail_buf, extra_bytes);
+ ret = sdma_txadd_daddr(sde->dd, &tx->txreq,
+ sde->dd->sdma_pad_phys, extra_bytes);
bail_txadd:
return ret;
}
/* add icrc, lt byte, and padding to flit */
if (extra_bytes)
- seg_pio_copy_mid(pbuf, trail_buf, extra_bytes);
+ seg_pio_copy_mid(pbuf, ppd->dd->sdma_pad_dma,
+ extra_bytes);
seg_pio_copy_end(pbuf);
}
return;
}
- if (eq->buf_list)
- dma_free_coherent(hr_dev->dev, buf_chk_sz,
- eq->buf_list->buf, eq->buf_list->map);
+ dma_free_coherent(hr_dev->dev, buf_chk_sz, eq->buf_list->buf,
+ eq->buf_list->map);
+ kfree(eq->buf_list);
}
static void hns_roce_config_eqc(struct hns_roce_dev *hr_dev,
int err;
if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
- xa_erase(&dev->mdev->priv.mkey_table,
- mlx5_base_mkey(mmw->mmkey.key));
+ xa_erase_irq(&dev->mdev->priv.mkey_table,
+ mlx5_base_mkey(mmw->mmkey.key));
/*
* pagefault_single_data_segment() may be accessing mmw under
* SRCU if the user bound an ODP MR to this MW.
}
/* Only remove the old rate after new rate was set */
- if ((old_rl.rate &&
- !mlx5_rl_are_equal(&old_rl, &new_rl)) ||
- (new_state != MLX5_SQC_STATE_RDY))
+ if ((old_rl.rate && !mlx5_rl_are_equal(&old_rl, &new_rl)) ||
+ (new_state != MLX5_SQC_STATE_RDY)) {
mlx5_rl_remove_rate(dev, &old_rl);
+ if (new_state != MLX5_SQC_STATE_RDY)
+ memset(&new_rl, 0, sizeof(new_rl));
+ }
ibqp->rl = new_rl;
sq->state = new_state;
struct qedr_dev *qedr = get_qedr_dev(ibdev);
u32 fw_ver = (u32)qedr->attr.fw_ver;
- snprintf(str, IB_FW_VERSION_NAME_MAX, "%d. %d. %d. %d",
+ snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d.%d",
(fw_ver >> 24) & 0xFF, (fw_ver >> 16) & 0xFF,
(fw_ver >> 8) & 0xFF, fw_ver & 0xFF);
}
void siw_free_qp(struct kref *ref)
{
struct siw_qp *found, *qp = container_of(ref, struct siw_qp, ref);
+ struct siw_base_qp *siw_base_qp = to_siw_base_qp(qp->ib_qp);
struct siw_device *sdev = qp->sdev;
unsigned long flags;
atomic_dec(&sdev->num_qp);
siw_dbg_qp(qp, "free QP\n");
kfree_rcu(qp, rcu);
+ kfree(siw_base_qp);
}
int siw_destroy_qp(struct ib_qp *base_qp, struct ib_udata *udata)
{
struct siw_qp *qp = to_siw_qp(base_qp);
- struct siw_base_qp *siw_base_qp = to_siw_base_qp(base_qp);
struct siw_ucontext *uctx =
rdma_udata_to_drv_context(udata, struct siw_ucontext,
base_ucontext);
qp->scq = qp->rcq = NULL;
siw_qp_put(qp);
- kfree(siw_base_qp);
return 0;
}
for (i = 0, y = 0; i < ts->chip_info->max_fingers; i++, y += 3) {
finger[i].is_valid = buf[i + y] >> 7;
if (finger[i].is_valid) {
- finger[i].x = ((buf[i + y] & 0x0070) << 4) | buf[i + 1];
- finger[i].y = ((buf[i + y] & 0x0007) << 8) | buf[i + 2];
+ finger[i].x = ((buf[i + y] & 0x0070) << 4) |
+ buf[i + y + 1];
+ finger[i].y = ((buf[i + y] & 0x0007) << 8) |
+ buf[i + y + 2];
/* st1232 includes a z-axis / touch strength */
if (ts->chip_info->have_z)
.driver_data = (void *)&ivrs_ioapic_quirks[DELL_LATITUDE_5495],
},
{
+ /*
+ * Acer Aspire A315-41 requires the very same workaround as
+ * Dell Latitude 5495
+ */
+ .callback = ivrs_ioapic_quirk_cb,
+ .ident = "Acer Aspire A315-41",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire A315-41"),
+ },
+ .driver_data = (void *)&ivrs_ioapic_quirks[DELL_LATITUDE_5495],
+ },
+ {
.callback = ivrs_ioapic_quirk_cb,
.ident = "Lenovo ideapad 330S-15ARR",
.matches = {
struct device_domain_info *info;
info = dev->archdata.iommu;
- if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
+ if (info && info != DUMMY_DEVICE_DOMAIN_INFO && info != DEFER_DEVICE_DOMAIN_INFO)
return (info->domain == si_domain);
return 0;
if (dev->coherent_dma_mask && dev->coherent_dma_mask < dma_mask)
dma_mask = dev->coherent_dma_mask;
- if (dma_mask >= dma_get_required_mask(dev))
+ if (dma_mask >= dma_direct_get_required_mask(dev))
return false;
/*
return nelems;
}
+static u64 intel_get_required_mask(struct device *dev)
+{
+ if (!iommu_need_mapping(dev))
+ return dma_direct_get_required_mask(dev);
+ return DMA_BIT_MASK(32);
+}
+
static const struct dma_map_ops intel_dma_ops = {
.alloc = intel_alloc_coherent,
.free = intel_free_coherent,
.dma_supported = dma_direct_supported,
.mmap = dma_common_mmap,
.get_sgtable = dma_common_get_sgtable,
+ .get_required_mask = intel_get_required_mask,
};
static void
/* Root devices have mandatory IRQs */
if (ipmmu_is_root(mmu)) {
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "no IRQ found\n");
+ if (irq < 0)
return irq;
- }
ret = devm_request_irq(&pdev->dev, irq, ipmmu_irq, 0,
dev_name(&pdev->dev), mmu);
/* FIC Registers */
#define AL_FIC_CAUSE 0x00
+#define AL_FIC_SET_CAUSE 0x08
#define AL_FIC_MASK 0x10
#define AL_FIC_CONTROL 0x28
chained_irq_exit(irqchip, desc);
}
+static int al_fic_irq_retrigger(struct irq_data *data)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
+ struct al_fic *fic = gc->private;
+
+ writel_relaxed(BIT(data->hwirq), fic->base + AL_FIC_SET_CAUSE);
+
+ return 1;
+}
+
static int al_fic_register(struct device_node *node,
struct al_fic *fic)
{
gc->chip_types->chip.irq_unmask = irq_gc_mask_clr_bit;
gc->chip_types->chip.irq_ack = irq_gc_ack_clr_bit;
gc->chip_types->chip.irq_set_type = al_fic_irq_set_type;
+ gc->chip_types->chip.irq_retrigger = al_fic_irq_retrigger;
gc->chip_types->chip.flags = IRQCHIP_SKIP_SET_WAKE;
gc->private = fic;
static const struct of_device_id aic5_irq_fixups[] __initconst = {
{ .compatible = "atmel,sama5d3", .data = sama5d3_aic_irq_fixup },
{ .compatible = "atmel,sama5d4", .data = sama5d3_aic_irq_fixup },
+ { .compatible = "microchip,sam9x60", .data = sama5d3_aic_irq_fixup },
{ /* sentinel */ },
};
return aic5_of_init(node, parent, NR_SAMA5D4_IRQS);
}
IRQCHIP_DECLARE(sama5d4_aic5, "atmel,sama5d4-aic", sama5d4_aic5_of_init);
+
+#define NR_SAM9X60_IRQS 50
+
+static int __init sam9x60_aic5_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ return aic5_of_init(node, parent, NR_SAM9X60_IRQS);
+}
+IRQCHIP_DECLARE(sam9x60_aic5, "microchip,sam9x60-aic", sam9x60_aic5_of_init);
#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
#define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K)
+static u16 get_its_list(struct its_vm *vm)
+{
+ struct its_node *its;
+ unsigned long its_list = 0;
+
+ list_for_each_entry(its, &its_nodes, entry) {
+ if (!its->is_v4)
+ continue;
+
+ if (vm->vlpi_count[its->list_nr])
+ __set_bit(its->list_nr, &its_list);
+ }
+
+ return (u16)its_list;
+}
+
static struct its_collection *dev_event_to_col(struct its_device *its_dev,
u32 event)
{
static void its_send_vmovp(struct its_vpe *vpe)
{
- struct its_cmd_desc desc;
+ struct its_cmd_desc desc = {};
struct its_node *its;
unsigned long flags;
int col_id = vpe->col_idx;
desc.its_vmovp_cmd.vpe = vpe;
- desc.its_vmovp_cmd.its_list = (u16)its_list_map;
if (!its_list_map) {
its = list_first_entry(&its_nodes, struct its_node, entry);
- desc.its_vmovp_cmd.seq_num = 0;
desc.its_vmovp_cmd.col = &its->collections[col_id];
its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
return;
raw_spin_lock_irqsave(&vmovp_lock, flags);
desc.its_vmovp_cmd.seq_num = vmovp_seq_num++;
+ desc.its_vmovp_cmd.its_list = get_its_list(vpe->its_vm);
/* Emit VMOVPs */
list_for_each_entry(its, &its_nodes, entry) {
static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key);
#define GIC_ID_NR (1U << GICD_TYPER_ID_BITS(gic_data.rdists.gicd_typer))
-#define GIC_LINE_NR max(GICD_TYPER_SPIS(gic_data.rdists.gicd_typer), 1020U)
+#define GIC_LINE_NR min(GICD_TYPER_SPIS(gic_data.rdists.gicd_typer), 1020U)
#define GIC_ESPI_NR GICD_TYPER_ESPIS(gic_data.rdists.gicd_typer)
/*
}
}
-static void plic_irq_enable(struct irq_data *d)
+static void plic_irq_unmask(struct irq_data *d)
{
unsigned int cpu = cpumask_any_and(irq_data_get_affinity_mask(d),
cpu_online_mask);
plic_irq_toggle(cpumask_of(cpu), d->hwirq, 1);
}
-static void plic_irq_disable(struct irq_data *d)
+static void plic_irq_mask(struct irq_data *d)
{
plic_irq_toggle(cpu_possible_mask, d->hwirq, 0);
}
if (cpu >= nr_cpu_ids)
return -EINVAL;
- if (!irqd_irq_disabled(d)) {
- plic_irq_toggle(cpu_possible_mask, d->hwirq, 0);
- plic_irq_toggle(cpumask_of(cpu), d->hwirq, 1);
- }
+ plic_irq_toggle(cpu_possible_mask, d->hwirq, 0);
+ plic_irq_toggle(cpumask_of(cpu), d->hwirq, 1);
irq_data_update_effective_affinity(d, cpumask_of(cpu));
}
#endif
+static void plic_irq_eoi(struct irq_data *d)
+{
+ struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
+
+ writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM);
+}
+
static struct irq_chip plic_chip = {
.name = "SiFive PLIC",
- /*
- * There is no need to mask/unmask PLIC interrupts. They are "masked"
- * by reading claim and "unmasked" when writing it back.
- */
- .irq_enable = plic_irq_enable,
- .irq_disable = plic_irq_disable,
+ .irq_mask = plic_irq_mask,
+ .irq_unmask = plic_irq_unmask,
+ .irq_eoi = plic_irq_eoi,
#ifdef CONFIG_SMP
.irq_set_affinity = plic_set_affinity,
#endif
static int plic_irqdomain_map(struct irq_domain *d, unsigned int irq,
irq_hw_number_t hwirq)
{
- irq_set_chip_and_handler(irq, &plic_chip, handle_simple_irq);
+ irq_set_chip_and_handler(irq, &plic_chip, handle_fasteoi_irq);
irq_set_chip_data(irq, NULL);
irq_set_noprobe(irq);
return 0;
hwirq);
else
generic_handle_irq(irq);
- writel(hwirq, claim);
}
csr_set(sie, SIE_SEIE);
}
continue;
}
- /* skip context holes */
- if (parent.args[0] == -1)
+ /* skip contexts other than supervisor external interrupt */
+ if (parent.args[0] != IRQ_S_EXT)
continue;
hartid = plic_find_hart_id(parent.np);
poll_wait(file, &(cdev->recvwait), wait);
mask = EPOLLOUT | EPOLLWRNORM;
- if (!skb_queue_empty(&cdev->recvqueue))
+ if (!skb_queue_empty_lockless(&cdev->recvqueue))
mask |= EPOLLIN | EPOLLRDNORM;
return mask;
}
#define DEBUG_HW_FIRMWARE_FIFO 0x10000
-static const u8 faxmodulation_s[] = "3,24,48,72,73,74,96,97,98,121,122,145,146";
static const u8 faxmodulation[] = {3, 24, 48, 72, 73, 74, 96, 97, 98, 121,
122, 145, 146};
#define FAXMODCNT 13
static int clamped;
static struct wf_control *clamp_control;
-static struct dev_pm_qos_request qos_req;
+static struct freq_qos_request qos_req;
static unsigned int min_freq, max_freq;
static int clamp_set(struct wf_control *ct, s32 value)
}
clamped = value;
- return dev_pm_qos_update_request(&qos_req, freq);
+ return freq_qos_update_request(&qos_req, freq);
}
static int clamp_get(struct wf_control *ct, s32 *value)
min_freq = policy->cpuinfo.min_freq;
max_freq = policy->cpuinfo.max_freq;
+
+ ret = freq_qos_add_request(&policy->constraints, &qos_req, FREQ_QOS_MAX,
+ max_freq);
+
cpufreq_cpu_put(policy);
+ if (ret < 0) {
+ pr_err("%s: Failed to add freq constraint (%d)\n", __func__,
+ ret);
+ return ret;
+ }
+
dev = get_cpu_device(0);
if (unlikely(!dev)) {
pr_warn("%s: No cpu device for cpu0\n", __func__);
- return -ENODEV;
+ ret = -ENODEV;
+ goto fail;
}
clamp = kmalloc(sizeof(struct wf_control), GFP_KERNEL);
- if (clamp == NULL)
- return -ENOMEM;
-
- ret = dev_pm_qos_add_request(dev, &qos_req, DEV_PM_QOS_MAX_FREQUENCY,
- max_freq);
- if (ret < 0) {
- pr_err("%s: Failed to add freq constraint (%d)\n", __func__,
- ret);
- goto free;
+ if (clamp == NULL) {
+ ret = -ENOMEM;
+ goto fail;
}
clamp->ops = &clamp_ops;
clamp->name = "cpufreq-clamp";
ret = wf_register_control(clamp);
if (ret)
- goto fail;
+ goto free;
+
clamp_control = clamp;
return 0;
- fail:
- dev_pm_qos_remove_request(&qos_req);
free:
kfree(clamp);
+ fail:
+ freq_qos_remove_request(&qos_req);
return ret;
}
{
if (clamp_control) {
wf_unregister_control(clamp_control);
- dev_pm_qos_remove_request(&qos_req);
+ freq_qos_remove_request(&qos_req);
}
}
static SIMPLE_DEV_PM_OPS(mt6397_pm_ops, mt6397_irq_suspend,
mt6397_irq_resume);
+struct chip_data {
+ u32 cid_addr;
+ u32 cid_shift;
+};
+
+static const struct chip_data mt6323_core = {
+ .cid_addr = MT6323_CID,
+ .cid_shift = 0,
+};
+
+static const struct chip_data mt6397_core = {
+ .cid_addr = MT6397_CID,
+ .cid_shift = 0,
+};
+
static int mt6397_probe(struct platform_device *pdev)
{
int ret;
unsigned int id;
struct mt6397_chip *pmic;
+ const struct chip_data *pmic_core;
pmic = devm_kzalloc(&pdev->dev, sizeof(*pmic), GFP_KERNEL);
if (!pmic)
if (!pmic->regmap)
return -ENODEV;
- platform_set_drvdata(pdev, pmic);
+ pmic_core = of_device_get_match_data(&pdev->dev);
+ if (!pmic_core)
+ return -ENODEV;
- ret = regmap_read(pmic->regmap, MT6397_CID, &id);
+ ret = regmap_read(pmic->regmap, pmic_core->cid_addr, &id);
if (ret) {
- dev_err(pmic->dev, "Failed to read chip id: %d\n", ret);
+ dev_err(&pdev->dev, "Failed to read chip id: %d\n", ret);
return ret;
}
+ pmic->chip_id = (id >> pmic_core->cid_shift) & 0xff;
+
+ platform_set_drvdata(pdev, pmic);
+
pmic->irq = platform_get_irq(pdev, 0);
if (pmic->irq <= 0)
return pmic->irq;
- switch (id & 0xff) {
- case MT6323_CHIP_ID:
- pmic->int_con[0] = MT6323_INT_CON0;
- pmic->int_con[1] = MT6323_INT_CON1;
- pmic->int_status[0] = MT6323_INT_STATUS0;
- pmic->int_status[1] = MT6323_INT_STATUS1;
- ret = mt6397_irq_init(pmic);
- if (ret)
- return ret;
+ ret = mt6397_irq_init(pmic);
+ if (ret)
+ return ret;
+ switch (pmic->chip_id) {
+ case MT6323_CHIP_ID:
ret = devm_mfd_add_devices(&pdev->dev, -1, mt6323_devs,
ARRAY_SIZE(mt6323_devs), NULL,
0, pmic->irq_domain);
case MT6391_CHIP_ID:
case MT6397_CHIP_ID:
- pmic->int_con[0] = MT6397_INT_CON0;
- pmic->int_con[1] = MT6397_INT_CON1;
- pmic->int_status[0] = MT6397_INT_STATUS0;
- pmic->int_status[1] = MT6397_INT_STATUS1;
- ret = mt6397_irq_init(pmic);
- if (ret)
- return ret;
-
ret = devm_mfd_add_devices(&pdev->dev, -1, mt6397_devs,
ARRAY_SIZE(mt6397_devs), NULL,
0, pmic->irq_domain);
break;
default:
- dev_err(&pdev->dev, "unsupported chip: %d\n", id);
+ dev_err(&pdev->dev, "unsupported chip: %d\n", pmic->chip_id);
return -ENODEV;
}
}
static const struct of_device_id mt6397_of_match[] = {
- { .compatible = "mediatek,mt6397" },
- { .compatible = "mediatek,mt6323" },
- { }
+ {
+ .compatible = "mediatek,mt6323",
+ .data = &mt6323_core,
+ }, {
+ .compatible = "mediatek,mt6397",
+ .data = &mt6397_core,
+ }, {
+ /* sentinel */
+ }
};
MODULE_DEVICE_TABLE(of, mt6397_of_match);
cq_host->slot[tag].flags = 0;
cq_host->qcnt += 1;
-
+ /* Make sure descriptors are ready before ringing the doorbell */
+ wmb();
cqhci_writel(cq_host, 1 << tag, CQHCI_TDBR);
if (!(cqhci_readl(cq_host, CQHCI_TDBR) & (1 << tag)))
pr_debug("%s: cqhci: doorbell not set for tag %d\n",
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
+#include <linux/dma/mxs-dma.h>
#include <linux/highmem.h>
#include <linux/clk.h>
#include <linux/err.h>
ssp->ssp_pio_words[2] = cmd1;
ssp->dma_dir = DMA_NONE;
ssp->slave_dirn = DMA_TRANS_NONE;
- desc = mxs_mmc_prep_dma(host, DMA_CTRL_ACK);
+ desc = mxs_mmc_prep_dma(host, MXS_DMA_CTRL_WAIT4END);
if (!desc)
goto out;
ssp->ssp_pio_words[2] = cmd1;
ssp->dma_dir = DMA_NONE;
ssp->slave_dirn = DMA_TRANS_NONE;
- desc = mxs_mmc_prep_dma(host, DMA_CTRL_ACK);
+ desc = mxs_mmc_prep_dma(host, MXS_DMA_CTRL_WAIT4END);
if (!desc)
goto out;
host->data = data;
ssp->dma_dir = dma_data_dir;
ssp->slave_dirn = slave_dirn;
- desc = mxs_mmc_prep_dma(host, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ desc = mxs_mmc_prep_dma(host, DMA_PREP_INTERRUPT | MXS_DMA_CTRL_WAIT4END);
if (!desc)
goto out;
* on temperature
*/
if (temperature < -20000)
- phase_delay = min(max_window + 4 * max_len - 24,
+ phase_delay = min(max_window + 4 * (max_len - 1) - 24,
max_window +
DIV_ROUND_UP(13 * max_len, 16) * 4);
else if (temperature < 20000)
struct bond_vlan_tag *tags;
if (is_vlan_dev(upper) &&
- bond->nest_level == vlan_get_encap_level(upper) - 1) {
+ bond->dev->lower_level == upper->lower_level - 1) {
if (upper->addr_assign_type == NET_ADDR_STOLEN) {
alb_send_lp_vid(slave, mac_addr,
vlan_dev_vlan_proto(upper),
goto err_upper_unlink;
}
- bond->nest_level = dev_get_nest_level(bond_dev) + 1;
-
/* If the mode uses primary, then the following is handled by
* bond_change_active_slave().
*/
slave_disable_netpoll(new_slave);
err_close:
- slave_dev->priv_flags &= ~IFF_BONDING;
+ if (!netif_is_bond_master(slave_dev))
+ slave_dev->priv_flags &= ~IFF_BONDING;
dev_close(slave_dev);
err_restore_mac:
if (!bond_has_slaves(bond)) {
bond_set_carrier(bond);
eth_hw_addr_random(bond_dev);
- bond->nest_level = SINGLE_DEPTH_NESTING;
- } else {
- bond->nest_level = dev_get_nest_level(bond_dev) + 1;
}
unblock_netpoll_tx();
else
dev_set_mtu(slave_dev, slave->original_mtu);
- slave_dev->priv_flags &= ~IFF_BONDING;
+ if (!netif_is_bond_master(slave_dev))
+ slave_dev->priv_flags &= ~IFF_BONDING;
bond_free_slave(slave);
}
}
-static int bond_get_nest_level(struct net_device *bond_dev)
-{
- struct bonding *bond = netdev_priv(bond_dev);
-
- return bond->nest_level;
-}
-
static void bond_get_stats(struct net_device *bond_dev,
struct rtnl_link_stats64 *stats)
{
struct list_head *iter;
struct slave *slave;
- spin_lock_nested(&bond->stats_lock, bond_get_nest_level(bond_dev));
+ spin_lock(&bond->stats_lock);
memcpy(stats, &bond->bond_stats, sizeof(*stats));
rcu_read_lock();
.ndo_neigh_setup = bond_neigh_setup,
.ndo_vlan_rx_add_vid = bond_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = bond_vlan_rx_kill_vid,
- .ndo_get_lock_subclass = bond_get_nest_level,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_netpoll_setup = bond_netpoll_setup,
.ndo_netpoll_cleanup = bond_netpoll_cleanup,
struct bonding *bond = netdev_priv(bond_dev);
spin_lock_init(&bond->mode_lock);
- spin_lock_init(&bond->stats_lock);
bond->params = bonding_defaults;
/* Initialize pointers */
list_del(&bond->bond_list);
+ lockdep_unregister_key(&bond->stats_lock_key);
bond_debug_unregister(bond);
}
if (!bond->wq)
return -ENOMEM;
- bond->nest_level = SINGLE_DEPTH_NESTING;
- netdev_lockdep_set_classes(bond_dev);
+ spin_lock_init(&bond->stats_lock);
+ lockdep_register_key(&bond->stats_lock_key);
+ lockdep_set_class(&bond->stats_lock, &bond->stats_lock_key);
list_add_tail(&bond->bond_list, &bn->dev_list);
unsigned int i;
u32 reg, offset;
- if (priv->type == BCM7445_DEVICE_ID)
- offset = CORE_STS_OVERRIDE_IMP;
- else
- offset = CORE_STS_OVERRIDE_IMP2;
-
/* Enable the port memories */
reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
reg &= ~P_TXQ_PSM_VDD(port);
core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
- /* Enable Broadcast, Multicast, Unicast forwarding to IMP port */
- reg = core_readl(priv, CORE_IMP_CTL);
- reg |= (RX_BCST_EN | RX_MCST_EN | RX_UCST_EN);
- reg &= ~(RX_DIS | TX_DIS);
- core_writel(priv, reg, CORE_IMP_CTL);
-
/* Enable forwarding */
core_writel(priv, SW_FWDG_EN, CORE_SWMODE);
b53_brcm_hdr_setup(ds, port);
- /* Force link status for IMP port */
- reg = core_readl(priv, offset);
- reg |= (MII_SW_OR | LINK_STS);
- core_writel(priv, reg, offset);
+ if (port == 8) {
+ if (priv->type == BCM7445_DEVICE_ID)
+ offset = CORE_STS_OVERRIDE_IMP;
+ else
+ offset = CORE_STS_OVERRIDE_IMP2;
+
+ /* Force link status for IMP port */
+ reg = core_readl(priv, offset);
+ reg |= (MII_SW_OR | LINK_STS);
+ core_writel(priv, reg, offset);
+
+ /* Enable Broadcast, Multicast, Unicast forwarding to IMP port */
+ reg = core_readl(priv, CORE_IMP_CTL);
+ reg |= (RX_BCST_EN | RX_MCST_EN | RX_UCST_EN);
+ reg &= ~(RX_DIS | TX_DIS);
+ core_writel(priv, reg, CORE_IMP_CTL);
+ } else {
+ reg = core_readl(priv, CORE_G_PCTL_PORT(port));
+ reg &= ~(RX_DIS | TX_DIS);
+ core_writel(priv, reg, CORE_G_PCTL_PORT(port));
+ }
}
static void bcm_sf2_gphy_enable_set(struct dsa_switch *ds, bool enable)
config NET_DSA_SJA1105_TAS
bool "Support for the Time-Aware Scheduler on NXP SJA1105"
- depends on NET_DSA_SJA1105
- depends on NET_SCH_TAPRIO
+ depends on NET_DSA_SJA1105 && NET_SCH_TAPRIO
+ depends on NET_SCH_TAPRIO=y || NET_DSA_SJA1105=m
help
This enables support for the TTEthernet-based egress scheduling
engine in the SJA1105 DSA driver, which is controlled using a
if (priv->regulator)
regulator_disable(priv->regulator);
+ if (priv->soc_data->need_div_macclk)
+ clk_disable_unprepare(priv->macclk);
+
free_netdev(ndev);
return err;
}
{
bnxt_unmap_bars(bp, bp->pdev);
pci_release_regions(bp->pdev);
- pci_disable_device(bp->pdev);
+ if (pci_is_enabled(bp->pdev))
+ pci_disable_device(bp->pdev);
}
static void bnxt_init_dflt_coal(struct bnxt *bp)
bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
}
/* fall through */
- case BNXT_FW_RESET_STATE_RESET_FW: {
- u32 wait_dsecs = bp->fw_health->post_reset_wait_dsecs;
-
+ case BNXT_FW_RESET_STATE_RESET_FW:
bnxt_reset_all(bp);
bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
- bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
+ bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10);
return;
- }
case BNXT_FW_RESET_STATE_ENABLE_DEV:
if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) &&
bp->fw_health) {
val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
health_status = val & 0xffff;
- if (health_status == BNXT_FW_STATUS_HEALTHY) {
- rc = devlink_fmsg_string_pair_put(fmsg, "FW status",
- "Healthy;");
- if (rc)
- return rc;
- } else if (health_status < BNXT_FW_STATUS_HEALTHY) {
- rc = devlink_fmsg_string_pair_put(fmsg, "FW status",
- "Not yet completed initialization;");
+ if (health_status < BNXT_FW_STATUS_HEALTHY) {
+ rc = devlink_fmsg_string_pair_put(fmsg, "Description",
+ "Not yet completed initialization");
if (rc)
return rc;
} else if (health_status > BNXT_FW_STATUS_HEALTHY) {
- rc = devlink_fmsg_string_pair_put(fmsg, "FW status",
- "Encountered fatal error and cannot recover;");
+ rc = devlink_fmsg_string_pair_put(fmsg, "Description",
+ "Encountered fatal error and cannot recover");
if (rc)
return rc;
}
if (val >> 16) {
- rc = devlink_fmsg_u32_pair_put(fmsg, "Error", val >> 16);
+ rc = devlink_fmsg_u32_pair_put(fmsg, "Error code", val >> 16);
if (rc)
return rc;
}
static const struct bnxt_dl_nvm_param nvm_params[] = {
{DEVLINK_PARAM_GENERIC_ID_ENABLE_SRIOV, NVM_OFF_ENABLE_SRIOV,
- BNXT_NVM_SHARED_CFG, 1},
+ BNXT_NVM_SHARED_CFG, 1, 1},
{DEVLINK_PARAM_GENERIC_ID_IGNORE_ARI, NVM_OFF_IGNORE_ARI,
- BNXT_NVM_SHARED_CFG, 1},
+ BNXT_NVM_SHARED_CFG, 1, 1},
{DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MAX,
- NVM_OFF_MSIX_VEC_PER_PF_MAX, BNXT_NVM_SHARED_CFG, 10},
+ NVM_OFF_MSIX_VEC_PER_PF_MAX, BNXT_NVM_SHARED_CFG, 10, 4},
{DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MIN,
- NVM_OFF_MSIX_VEC_PER_PF_MIN, BNXT_NVM_SHARED_CFG, 7},
+ NVM_OFF_MSIX_VEC_PER_PF_MIN, BNXT_NVM_SHARED_CFG, 7, 4},
{BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK, NVM_OFF_DIS_GRE_VER_CHECK,
- BNXT_NVM_SHARED_CFG, 1},
+ BNXT_NVM_SHARED_CFG, 1, 1},
};
+union bnxt_nvm_data {
+ u8 val8;
+ __le32 val32;
+};
+
+static void bnxt_copy_to_nvm_data(union bnxt_nvm_data *dst,
+ union devlink_param_value *src,
+ int nvm_num_bits, int dl_num_bytes)
+{
+ u32 val32 = 0;
+
+ if (nvm_num_bits == 1) {
+ dst->val8 = src->vbool;
+ return;
+ }
+ if (dl_num_bytes == 4)
+ val32 = src->vu32;
+ else if (dl_num_bytes == 2)
+ val32 = (u32)src->vu16;
+ else if (dl_num_bytes == 1)
+ val32 = (u32)src->vu8;
+ dst->val32 = cpu_to_le32(val32);
+}
+
+static void bnxt_copy_from_nvm_data(union devlink_param_value *dst,
+ union bnxt_nvm_data *src,
+ int nvm_num_bits, int dl_num_bytes)
+{
+ u32 val32;
+
+ if (nvm_num_bits == 1) {
+ dst->vbool = src->val8;
+ return;
+ }
+ val32 = le32_to_cpu(src->val32);
+ if (dl_num_bytes == 4)
+ dst->vu32 = val32;
+ else if (dl_num_bytes == 2)
+ dst->vu16 = (u16)val32;
+ else if (dl_num_bytes == 1)
+ dst->vu8 = (u8)val32;
+}
+
static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
int msg_len, union devlink_param_value *val)
{
struct hwrm_nvm_get_variable_input *req = msg;
- void *data_addr = NULL, *buf = NULL;
struct bnxt_dl_nvm_param nvm_param;
- int bytesize, idx = 0, rc, i;
+ union bnxt_nvm_data *data;
dma_addr_t data_dma_addr;
+ int idx = 0, rc, i;
/* Get/Set NVM CFG parameter is supported only on PFs */
if (BNXT_VF(bp))
else if (nvm_param.dir_type == BNXT_NVM_FUNC_CFG)
idx = bp->pf.fw_fid - BNXT_FIRST_PF_FID;
- bytesize = roundup(nvm_param.num_bits, BITS_PER_BYTE) / BITS_PER_BYTE;
- switch (bytesize) {
- case 1:
- if (nvm_param.num_bits == 1)
- buf = &val->vbool;
- else
- buf = &val->vu8;
- break;
- case 2:
- buf = &val->vu16;
- break;
- case 4:
- buf = &val->vu32;
- break;
- default:
- return -EFAULT;
- }
-
- data_addr = dma_alloc_coherent(&bp->pdev->dev, bytesize,
- &data_dma_addr, GFP_KERNEL);
- if (!data_addr)
+ data = dma_alloc_coherent(&bp->pdev->dev, sizeof(*data),
+ &data_dma_addr, GFP_KERNEL);
+ if (!data)
return -ENOMEM;
req->dest_data_addr = cpu_to_le64(data_dma_addr);
- req->data_len = cpu_to_le16(nvm_param.num_bits);
+ req->data_len = cpu_to_le16(nvm_param.nvm_num_bits);
req->option_num = cpu_to_le16(nvm_param.offset);
req->index_0 = cpu_to_le16(idx);
if (idx)
req->dimensions = cpu_to_le16(1);
if (req->req_type == cpu_to_le16(HWRM_NVM_SET_VARIABLE)) {
- memcpy(data_addr, buf, bytesize);
+ bnxt_copy_to_nvm_data(data, val, nvm_param.nvm_num_bits,
+ nvm_param.dl_num_bytes);
rc = hwrm_send_message(bp, msg, msg_len, HWRM_CMD_TIMEOUT);
} else {
rc = hwrm_send_message_silent(bp, msg, msg_len,
HWRM_CMD_TIMEOUT);
+ if (!rc)
+ bnxt_copy_from_nvm_data(val, data,
+ nvm_param.nvm_num_bits,
+ nvm_param.dl_num_bytes);
}
- if (!rc && req->req_type == cpu_to_le16(HWRM_NVM_GET_VARIABLE))
- memcpy(buf, data_addr, bytesize);
-
- dma_free_coherent(&bp->pdev->dev, bytesize, data_addr, data_dma_addr);
+ dma_free_coherent(&bp->pdev->dev, sizeof(*data), data, data_dma_addr);
if (rc == -EACCES)
netdev_err(bp->dev, "PF does not have admin privileges to modify NVM config\n");
return rc;
u16 id;
u16 offset;
u16 dir_type;
- u16 num_bits;
+ u16 nvm_num_bits;
+ u8 dl_num_bytes;
};
void bnxt_devlink_health_report(struct bnxt *bp, unsigned long event);
lld->write_cmpl_support = adap->params.write_cmpl_support;
}
-static void uld_attach(struct adapter *adap, unsigned int uld)
+static int uld_attach(struct adapter *adap, unsigned int uld)
{
- void *handle;
struct cxgb4_lld_info lli;
+ void *handle;
uld_init(adap, &lli);
uld_queue_init(adap, uld, &lli);
dev_warn(adap->pdev_dev,
"could not attach to the %s driver, error %ld\n",
adap->uld[uld].name, PTR_ERR(handle));
- return;
+ return PTR_ERR(handle);
}
adap->uld[uld].handle = handle;
if (adap->flags & CXGB4_FULL_INIT_DONE)
adap->uld[uld].state_change(handle, CXGB4_STATE_UP);
+
+ return 0;
}
-/**
- * cxgb4_register_uld - register an upper-layer driver
- * @type: the ULD type
- * @p: the ULD methods
+/* cxgb4_register_uld - register an upper-layer driver
+ * @type: the ULD type
+ * @p: the ULD methods
*
- * Registers an upper-layer driver with this driver and notifies the ULD
- * about any presently available devices that support its type. Returns
- * %-EBUSY if a ULD of the same type is already registered.
+ * Registers an upper-layer driver with this driver and notifies the ULD
+ * about any presently available devices that support its type.
*/
void cxgb4_register_uld(enum cxgb4_uld type,
const struct cxgb4_uld_info *p)
{
- int ret = 0;
struct adapter *adap;
+ int ret = 0;
if (type >= CXGB4_ULD_MAX)
return;
if (ret)
goto free_irq;
adap->uld[type] = *p;
- uld_attach(adap, type);
+ ret = uld_attach(adap, type);
+ if (ret)
+ goto free_txq;
continue;
+free_txq:
+ release_sge_txq_uld(adap, type);
free_irq:
if (adap->flags & CXGB4_FULL_INIT_DONE)
quiesce_rx_uld(adap, type);
* write the CIDX Updates into the Status Page at the end of the
* TX Queue.
*/
- c.autoequiqe_to_viid = htonl((dbqt
- ? FW_EQ_ETH_CMD_AUTOEQUIQE_F
- : FW_EQ_ETH_CMD_AUTOEQUEQE_F) |
+ c.autoequiqe_to_viid = htonl(FW_EQ_ETH_CMD_AUTOEQUEQE_F |
FW_EQ_ETH_CMD_VIID_V(pi->viid));
c.fetchszm_to_iqid =
- htonl(FW_EQ_ETH_CMD_HOSTFCMODE_V(dbqt
- ? HOSTFCMODE_INGRESS_QUEUE_X
- : HOSTFCMODE_STATUS_PAGE_X) |
+ htonl(FW_EQ_ETH_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) |
FW_EQ_ETH_CMD_PCIECHN_V(pi->tx_chan) |
FW_EQ_ETH_CMD_FETCHRO_F | FW_EQ_ETH_CMD_IQID_V(iqid));
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/* Register definitions for Gemini GMAC Ethernet device driver
*
* Copyright (C) 2006 Storlink, Corp.
*/
nfrags = skb_shinfo(skb)->nr_frags;
+ /* Setup HW checksumming */
+ csum_vlan = 0;
+ if (skb->ip_summed == CHECKSUM_PARTIAL &&
+ !ftgmac100_prep_tx_csum(skb, &csum_vlan))
+ goto drop;
+
+ /* Add VLAN tag */
+ if (skb_vlan_tag_present(skb)) {
+ csum_vlan |= FTGMAC100_TXDES1_INS_VLANTAG;
+ csum_vlan |= skb_vlan_tag_get(skb) & 0xffff;
+ }
+
/* Get header len */
len = skb_headlen(skb);
if (nfrags == 0)
f_ctl_stat |= FTGMAC100_TXDES0_LTS;
txdes->txdes3 = cpu_to_le32(map);
-
- /* Setup HW checksumming */
- csum_vlan = 0;
- if (skb->ip_summed == CHECKSUM_PARTIAL &&
- !ftgmac100_prep_tx_csum(skb, &csum_vlan))
- goto drop;
-
- /* Add VLAN tag */
- if (skb_vlan_tag_present(skb)) {
- csum_vlan |= FTGMAC100_TXDES1_INS_VLANTAG;
- csum_vlan |= skb_vlan_tag_get(skb) & 0xffff;
- }
-
txdes->txdes1 = cpu_to_le32(csum_vlan);
/* Next descriptor */
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright 2018 NXP
*/
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright 2013-2016 Freescale Semiconductor Inc.
* Copyright 2016-2018 NXP
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright 2013-2016 Freescale Semiconductor Inc.
* Copyright 2016-2018 NXP
for (i = 0; i < irq_cnt; i++) {
snprintf(irq_name, sizeof(irq_name), "int%d", i);
- irq = platform_get_irq_byname(pdev, irq_name);
+ irq = platform_get_irq_byname_optional(pdev, irq_name);
if (irq < 0)
irq = platform_get_irq(pdev, i);
if (irq < 0) {
INIT_DELAYED_WORK(&fep->time_keep, fec_time_keep);
- irq = platform_get_irq_byname(pdev, "pps");
+ irq = platform_get_irq_byname_optional(pdev, "pps");
if (irq < 0)
- irq = platform_get_irq(pdev, irq_idx);
+ irq = platform_get_irq_optional(pdev, irq_idx);
/* Failure to get an irq is not fatal,
* only the PTP_CLOCK_PPS clock events should stop
*/
len = be16_to_cpu(rx_desc->len) - GVE_RX_PAD;
page_info = &rx->data.page_info[idx];
+ dma_sync_single_for_cpu(&priv->pdev->dev, rx->data.qpl->page_buses[idx],
+ PAGE_SIZE, DMA_FROM_DEVICE);
/* gvnic can only receive into registered segments. If the buffer
* can't be recycled, our only choice is to copy the data out of
seg_desc->seg.seg_addr = cpu_to_be64(addr);
}
-static int gve_tx_add_skb(struct gve_tx_ring *tx, struct sk_buff *skb)
+static void gve_dma_sync_for_device(struct device *dev, dma_addr_t *page_buses,
+ u64 iov_offset, u64 iov_len)
+{
+ dma_addr_t dma;
+ u64 addr;
+
+ for (addr = iov_offset; addr < iov_offset + iov_len;
+ addr += PAGE_SIZE) {
+ dma = page_buses[addr / PAGE_SIZE];
+ dma_sync_single_for_device(dev, dma, PAGE_SIZE, DMA_TO_DEVICE);
+ }
+}
+
+static int gve_tx_add_skb(struct gve_tx_ring *tx, struct sk_buff *skb,
+ struct device *dev)
{
int pad_bytes, hlen, hdr_nfrags, payload_nfrags, l4_hdr_offset;
union gve_tx_desc *pkt_desc, *seg_desc;
skb_copy_bits(skb, 0,
tx->tx_fifo.base + info->iov[hdr_nfrags - 1].iov_offset,
hlen);
+ gve_dma_sync_for_device(dev, tx->tx_fifo.qpl->page_buses,
+ info->iov[hdr_nfrags - 1].iov_offset,
+ info->iov[hdr_nfrags - 1].iov_len);
copy_offset = hlen;
for (i = payload_iov; i < payload_nfrags + payload_iov; i++) {
skb_copy_bits(skb, copy_offset,
tx->tx_fifo.base + info->iov[i].iov_offset,
info->iov[i].iov_len);
+ gve_dma_sync_for_device(dev, tx->tx_fifo.qpl->page_buses,
+ info->iov[i].iov_offset,
+ info->iov[i].iov_len);
copy_offset += info->iov[i].iov_len;
}
gve_tx_put_doorbell(priv, tx->q_resources, tx->req);
return NETDEV_TX_BUSY;
}
- nsegs = gve_tx_add_skb(tx, skb);
+ nsegs = gve_tx_add_skb(tx, skb, &priv->pdev->dev);
netdev_tx_sent_queue(tx->netdev_txq, skb->len);
skb_tx_timestamp(skb);
dma_addr_t rx_phys[RX_DESC_NUM];
unsigned int rx_head;
unsigned int rx_buf_size;
+ unsigned int rx_cnt_remaining;
struct device_node *phy_node;
struct phy_device *phy;
struct hip04_priv *priv = container_of(napi, struct hip04_priv, napi);
struct net_device *ndev = priv->ndev;
struct net_device_stats *stats = &ndev->stats;
- unsigned int cnt = hip04_recv_cnt(priv);
struct rx_desc *desc;
struct sk_buff *skb;
unsigned char *buf;
/* clean up tx descriptors */
tx_remaining = hip04_tx_reclaim(ndev, false);
-
- while (cnt && !last) {
+ priv->rx_cnt_remaining += hip04_recv_cnt(priv);
+ while (priv->rx_cnt_remaining && !last) {
buf = priv->rx_buf[priv->rx_head];
skb = build_skb(buf, priv->rx_buf_size);
if (unlikely(!skb)) {
hip04_set_recv_desc(priv, phys);
priv->rx_head = RX_NEXT(priv->rx_head);
- if (rx >= budget)
+ if (rx >= budget) {
+ --priv->rx_cnt_remaining;
goto done;
+ }
- if (--cnt == 0)
- cnt = hip04_recv_cnt(priv);
+ if (--priv->rx_cnt_remaining == 0)
+ priv->rx_cnt_remaining += hip04_recv_cnt(priv);
}
if (!(priv->reg_inten & RCV_INT)) {
int i;
priv->rx_head = 0;
+ priv->rx_cnt_remaining = 0;
priv->tx_head = 0;
priv->tx_tail = 0;
hip04_reset_ppe(priv);
hip04_free_ring(ndev, d);
unregister_netdev(ndev);
- free_irq(ndev->irq, ndev);
of_node_put(priv->phy_node);
cancel_work_sync(&priv->tx_timeout_task);
free_netdev(ndev);
for (i = 0; i < adapter->num_rx_queues; i++)
rxdr[i].count = rxdr->count;
+ err = 0;
if (netif_running(adapter->netdev)) {
/* Try to get new resources before deleting old */
err = e1000_setup_all_rx_resources(adapter);
adapter->rx_ring = rxdr;
adapter->tx_ring = txdr;
err = e1000_up(adapter);
- if (err)
- goto err_setup;
}
kfree(tx_old);
kfree(rx_old);
clear_bit(__E1000_RESETTING, &adapter->flags);
- return 0;
+ return err;
+
err_setup_tx:
e1000_free_all_rx_resources(adapter);
err_setup_rx:
err_alloc_tx:
if (netif_running(adapter->netdev))
e1000_up(adapter);
-err_setup:
clear_bit(__E1000_RESETTING, &adapter->flags);
return err;
}
err = i40e_queue_pair_enable(vsi, qid);
if (err)
return err;
-
- /* Kick start the NAPI context so that receiving will start */
- err = i40e_xsk_wakeup(vsi->netdev, qid, XDP_WAKEUP_RX);
- if (err)
- return err;
}
return 0;
? igb_setup_copper_link_82575
: igb_setup_serdes_link_82575;
- if (mac->type == e1000_82580) {
+ if (mac->type == e1000_82580 || mac->type == e1000_i350) {
switch (hw->device_id) {
/* feature not supported on these id's */
case E1000_DEV_ID_DH89XXCC_SGMII:
struct net_device *netdev = igb->netdev;
hw->hw_addr = NULL;
netdev_err(netdev, "PCIe link lost\n");
- WARN(1, "igb: Failed to read reg 0x%x!\n", reg);
+ WARN(pci_device_is_present(igb->pdev),
+ "igb: Failed to read reg 0x%x!\n", reg);
}
return value;
if ((hw->phy.media_type == e1000_media_type_copper) &&
(!(connsw & E1000_CONNSW_AUTOSENSE_EN))) {
swap_now = true;
- } else if (!(connsw & E1000_CONNSW_SERDESD)) {
+ } else if ((hw->phy.media_type != e1000_media_type_copper) &&
+ !(connsw & E1000_CONNSW_SERDESD)) {
/* copper signal takes time to appear */
if (adapter->copper_tries < 4) {
adapter->copper_tries++;
adapter->ei.get_invariants(hw);
adapter->flags &= ~IGB_FLAG_MEDIA_RESET;
}
- if ((mac->type == e1000_82575) &&
+ if ((mac->type == e1000_82575 || mac->type == e1000_i350) &&
(adapter->flags & IGB_FLAG_MAS_ENABLE)) {
igb_enable_mas(adapter);
}
hw->hw_addr = NULL;
netif_device_detach(netdev);
netdev_err(netdev, "PCIe link lost, device now detached\n");
- WARN(1, "igc: Failed to read reg 0x%x!\n", reg);
+ WARN(pci_device_is_present(igc->pdev),
+ "igc: Failed to read reg 0x%x!\n", reg);
}
return value;
if (test_bit(__IXGBE_RX_FCOE, &rx_ring->state))
set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
- clear_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state);
if (adapter->flags2 & IXGBE_FLAG2_RX_LEGACY)
continue;
(bm_pool->id << MVNETA_BM_POOL_ACCESS_OFFS));
}
#else
-void mvneta_bm_pool_destroy(struct mvneta_bm *priv,
- struct mvneta_bm_pool *bm_pool, u8 port_map) {}
-void mvneta_bm_bufs_free(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool,
- u8 port_map) {}
-int mvneta_bm_construct(struct hwbm_pool *hwbm_pool, void *buf) { return 0; }
-int mvneta_bm_pool_refill(struct mvneta_bm *priv,
- struct mvneta_bm_pool *bm_pool) {return 0; }
-struct mvneta_bm_pool *mvneta_bm_pool_use(struct mvneta_bm *priv, u8 pool_id,
- enum mvneta_bm_type type, u8 port_id,
- int pkt_size) { return NULL; }
+static inline void mvneta_bm_pool_destroy(struct mvneta_bm *priv,
+ struct mvneta_bm_pool *bm_pool,
+ u8 port_map) {}
+static inline void mvneta_bm_bufs_free(struct mvneta_bm *priv,
+ struct mvneta_bm_pool *bm_pool,
+ u8 port_map) {}
+static inline int mvneta_bm_construct(struct hwbm_pool *hwbm_pool, void *buf)
+{ return 0; }
+static inline int mvneta_bm_pool_refill(struct mvneta_bm *priv,
+ struct mvneta_bm_pool *bm_pool)
+{ return 0; }
+static inline struct mvneta_bm_pool *mvneta_bm_pool_use(struct mvneta_bm *priv,
+ u8 pool_id,
+ enum mvneta_bm_type type,
+ u8 port_id,
+ int pkt_size)
+{ return NULL; }
static inline void mvneta_bm_pool_put_bp(struct mvneta_bm *priv,
struct mvneta_bm_pool *bm_pool,
static inline u32 mvneta_bm_pool_get_bp(struct mvneta_bm *priv,
struct mvneta_bm_pool *bm_pool)
{ return 0; }
-struct mvneta_bm *mvneta_bm_get(struct device_node *node) { return NULL; }
-void mvneta_bm_put(struct mvneta_bm *priv) {}
+static inline struct mvneta_bm *mvneta_bm_get(struct device_node *node)
+{ return NULL; }
+static inline void mvneta_bm_put(struct mvneta_bm *priv) {}
#endif /* CONFIG_MVNETA_BM */
#endif
priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[pf];
}
-static int get_max_gauranteed_vfs_counter(struct mlx4_dev *dev)
+static int
+mlx4_calc_res_counter_guaranteed(struct mlx4_dev *dev,
+ struct resource_allocator *res_alloc,
+ int vf)
{
- /* reduce the sink counter */
- return (dev->caps.max_counters - 1 -
- (MLX4_PF_COUNTERS_PER_PORT * MLX4_MAX_PORTS))
- / MLX4_MAX_PORTS;
+ struct mlx4_active_ports actv_ports;
+ int ports, counters_guaranteed;
+
+ /* For master, only allocate according to the number of phys ports */
+ if (vf == mlx4_master_func_num(dev))
+ return MLX4_PF_COUNTERS_PER_PORT * dev->caps.num_ports;
+
+ /* calculate real number of ports for the VF */
+ actv_ports = mlx4_get_active_ports(dev, vf);
+ ports = bitmap_weight(actv_ports.ports, dev->caps.num_ports);
+ counters_guaranteed = ports * MLX4_VF_COUNTERS_PER_PORT;
+
+ /* If we do not have enough counters for this VF, do not
+ * allocate any for it. '-1' to reduce the sink counter.
+ */
+ if ((res_alloc->res_reserved + counters_guaranteed) >
+ (dev->caps.max_counters - 1))
+ return 0;
+
+ return counters_guaranteed;
}
int mlx4_init_resource_tracker(struct mlx4_dev *dev)
struct mlx4_priv *priv = mlx4_priv(dev);
int i, j;
int t;
- int max_vfs_guarantee_counter = get_max_gauranteed_vfs_counter(dev);
priv->mfunc.master.res_tracker.slave_list =
kcalloc(dev->num_slaves, sizeof(struct slave_list),
break;
case RES_COUNTER:
res_alloc->quota[t] = dev->caps.max_counters;
- if (t == mlx4_master_func_num(dev))
- res_alloc->guaranteed[t] =
- MLX4_PF_COUNTERS_PER_PORT *
- MLX4_MAX_PORTS;
- else if (t <= max_vfs_guarantee_counter)
- res_alloc->guaranteed[t] =
- MLX4_VF_COUNTERS_PER_PORT *
- MLX4_MAX_PORTS;
- else
- res_alloc->guaranteed[t] = 0;
+ res_alloc->guaranteed[t] =
+ mlx4_calc_res_counter_guaranteed(dev, res_alloc, t);
break;
default:
break;
u8 num_wqebbs;
u8 num_dma;
#ifdef CONFIG_MLX5_EN_TLS
- skb_frag_t *resync_dump_frag;
+ struct page *resync_dump_frag_page;
#endif
};
struct device *pdev;
__be32 mkey_be;
unsigned long state;
+ unsigned int hw_mtu;
struct hwtstamp_config *tstamp;
struct mlx5_clock *clock;
"Failed to create hv vhca stats agent, err = %ld\n",
PTR_ERR(agent));
- kfree(priv->stats_agent.buf);
+ kvfree(priv->stats_agent.buf);
return IS_ERR_OR_NULL(agent);
}
return;
mlx5_hv_vhca_agent_destroy(priv->stats_agent.agent);
- kfree(priv->stats_agent.buf);
+ kvfree(priv->stats_agent.buf);
}
if (ret)
return ret;
- if (mlx5_lag_is_multipath(mdev) && rt->rt_gw_family != AF_INET)
+ if (mlx5_lag_is_multipath(mdev) && rt->rt_gw_family != AF_INET) {
+ ip_rt_put(rt);
return -ENETUNREACH;
+ }
#else
return -EOPNOTSUPP;
#endif
ret = get_route_and_out_devs(priv, rt->dst.dev, route_dev, out_dev);
- if (ret < 0)
+ if (ret < 0) {
+ ip_rt_put(rt);
return ret;
+ }
if (!(*out_ttl))
*out_ttl = ip4_dst_hoplimit(&rt->dst);
*out_ttl = ip6_dst_hoplimit(dst);
ret = get_route_and_out_devs(priv, dst->dev, route_dev, out_dev);
- if (ret < 0)
+ if (ret < 0) {
+ dst_release(dst);
return ret;
+ }
#else
return -EOPNOTSUPP;
#endif
#else
/* TLS offload requires additional stop_room for:
* - a resync SKB.
- * kTLS offload requires additional stop_room for:
- * - static params WQE,
- * - progress params WQE, and
- * - resync DUMP per frag.
+ * kTLS offload requires fixed additional stop_room for:
+ * - a static params WQE, and a progress params WQE.
+ * The additional MTU-depending room for the resync DUMP WQEs
+ * will be calculated and added in runtime.
*/
#define MLX5E_SQ_TLS_ROOM \
(MLX5_SEND_WQE_MAX_WQEBBS + \
- MLX5E_KTLS_STATIC_WQEBBS + MLX5E_KTLS_PROGRESS_WQEBBS + \
- MAX_SKB_FRAGS * MLX5E_KTLS_MAX_DUMP_WQEBBS)
+ MLX5E_KTLS_STATIC_WQEBBS + MLX5E_KTLS_PROGRESS_WQEBBS)
#endif
#define INL_HDR_START_SZ (sizeof(((struct mlx5_wqe_eth_seg *)NULL)->inline_hdr.start))
/* fill sq frag edge with nops to avoid wqe wrapping two pages */
for (; wi < edge_wi; wi++) {
- wi->skb = NULL;
+ memset(wi, 0, sizeof(*wi));
wi->num_wqebbs = 1;
mlx5e_post_nop(wq, sq->sqn, &sq->pc);
}
return -ENOMEM;
tx_priv->expected_seq = start_offload_tcp_sn;
- tx_priv->crypto_info = crypto_info;
+ tx_priv->crypto_info = *(struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
mlx5e_set_ktls_tx_priv_ctx(tls_ctx, tx_priv);
/* tc and underlay_qpn values are not in use for tls tis */
MLX5_ST_SZ_BYTES(tls_progress_params))
#define MLX5E_KTLS_PROGRESS_WQEBBS \
(DIV_ROUND_UP(MLX5E_KTLS_PROGRESS_WQE_SZ, MLX5_SEND_WQE_BB))
-#define MLX5E_KTLS_MAX_DUMP_WQEBBS 2
+
+struct mlx5e_dump_wqe {
+ struct mlx5_wqe_ctrl_seg ctrl;
+ struct mlx5_wqe_data_seg data;
+};
+
+#define MLX5E_KTLS_DUMP_WQEBBS \
+ (DIV_ROUND_UP(sizeof(struct mlx5e_dump_wqe), MLX5_SEND_WQE_BB))
enum {
MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_NO_OFFLOAD = 0,
struct mlx5e_ktls_offload_context_tx {
struct tls_offload_context_tx *tx_ctx;
- struct tls_crypto_info *crypto_info;
+ struct tls12_crypto_info_aes_gcm_128 crypto_info;
u32 expected_seq;
u32 tisn;
u32 key_id;
struct mlx5e_tx_wqe **wqe, u16 *pi);
void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
struct mlx5e_tx_wqe_info *wi,
- struct mlx5e_sq_dma *dma);
-
+ u32 *dma_fifo_cc);
+static inline u8
+mlx5e_ktls_dumps_num_wqebbs(struct mlx5e_txqsq *sq, unsigned int nfrags,
+ unsigned int sync_len)
+{
+ /* Given the MTU and sync_len, calculates an upper bound for the
+ * number of WQEBBs needed for the TX resync DUMP WQEs of a record.
+ */
+ return MLX5E_KTLS_DUMP_WQEBBS *
+ (nfrags + DIV_ROUND_UP(sync_len, sq->hw_mtu));
+}
#else
static inline void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv)
{
}
+static inline void
+mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
+ struct mlx5e_tx_wqe_info *wi,
+ u32 *dma_fifo_cc) {}
+
#endif
#endif /* __MLX5E_TLS_H__ */
static void
fill_static_params_ctx(void *ctx, struct mlx5e_ktls_offload_context_tx *priv_tx)
{
- struct tls_crypto_info *crypto_info = priv_tx->crypto_info;
- struct tls12_crypto_info_aes_gcm_128 *info;
+ struct tls12_crypto_info_aes_gcm_128 *info = &priv_tx->crypto_info;
char *initial_rn, *gcm_iv;
u16 salt_sz, rec_seq_sz;
char *salt, *rec_seq;
u8 tls_version;
- if (WARN_ON(crypto_info->cipher_type != TLS_CIPHER_AES_GCM_128))
- return;
-
- info = (struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
EXTRACT_INFO_FIELDS;
gcm_iv = MLX5_ADDR_OF(tls_static_params, ctx, gcm_iv);
}
static void tx_fill_wi(struct mlx5e_txqsq *sq,
- u16 pi, u8 num_wqebbs,
- skb_frag_t *resync_dump_frag,
- u32 num_bytes)
+ u16 pi, u8 num_wqebbs, u32 num_bytes,
+ struct page *page)
{
struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi];
- wi->skb = NULL;
- wi->num_wqebbs = num_wqebbs;
- wi->resync_dump_frag = resync_dump_frag;
- wi->num_bytes = num_bytes;
+ memset(wi, 0, sizeof(*wi));
+ wi->num_wqebbs = num_wqebbs;
+ wi->num_bytes = num_bytes;
+ wi->resync_dump_frag_page = page;
}
void mlx5e_ktls_tx_offload_set_pending(struct mlx5e_ktls_offload_context_tx *priv_tx)
umr_wqe = mlx5e_sq_fetch_wqe(sq, MLX5E_KTLS_STATIC_UMR_WQE_SZ, &pi);
build_static_params(umr_wqe, sq->pc, sq->sqn, priv_tx, fence);
- tx_fill_wi(sq, pi, MLX5E_KTLS_STATIC_WQEBBS, NULL, 0);
+ tx_fill_wi(sq, pi, MLX5E_KTLS_STATIC_WQEBBS, 0, NULL);
sq->pc += MLX5E_KTLS_STATIC_WQEBBS;
}
wqe = mlx5e_sq_fetch_wqe(sq, MLX5E_KTLS_PROGRESS_WQE_SZ, &pi);
build_progress_params(wqe, sq->pc, sq->sqn, priv_tx, fence);
- tx_fill_wi(sq, pi, MLX5E_KTLS_PROGRESS_WQEBBS, NULL, 0);
+ tx_fill_wi(sq, pi, MLX5E_KTLS_PROGRESS_WQEBBS, 0, NULL);
sq->pc += MLX5E_KTLS_PROGRESS_WQEBBS;
}
bool skip_static_post, bool fence_first_post)
{
bool progress_fence = skip_static_post || !fence_first_post;
+ struct mlx5_wq_cyc *wq = &sq->wq;
+ u16 contig_wqebbs_room, pi;
+
+ pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
+ contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
+ if (unlikely(contig_wqebbs_room <
+ MLX5E_KTLS_STATIC_WQEBBS + MLX5E_KTLS_PROGRESS_WQEBBS))
+ mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room);
if (!skip_static_post)
post_static_params(sq, priv_tx, fence_first_post);
u64 rcd_sn;
s32 sync_len;
int nr_frags;
- skb_frag_t *frags[MAX_SKB_FRAGS];
+ skb_frag_t frags[MAX_SKB_FRAGS];
+};
+
+enum mlx5e_ktls_sync_retval {
+ MLX5E_KTLS_SYNC_DONE,
+ MLX5E_KTLS_SYNC_FAIL,
+ MLX5E_KTLS_SYNC_SKIP_NO_DATA,
};
-static bool tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx,
- u32 tcp_seq, struct tx_sync_info *info)
+static enum mlx5e_ktls_sync_retval
+tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx,
+ u32 tcp_seq, struct tx_sync_info *info)
{
struct tls_offload_context_tx *tx_ctx = priv_tx->tx_ctx;
+ enum mlx5e_ktls_sync_retval ret = MLX5E_KTLS_SYNC_DONE;
struct tls_record_info *record;
int remaining, i = 0;
unsigned long flags;
- bool ret = true;
spin_lock_irqsave(&tx_ctx->lock, flags);
record = tls_get_record(tx_ctx, tcp_seq, &info->rcd_sn);
if (unlikely(!record)) {
- ret = false;
+ ret = MLX5E_KTLS_SYNC_FAIL;
goto out;
}
if (unlikely(tcp_seq < tls_record_start_seq(record))) {
- if (!tls_record_is_start_marker(record))
- ret = false;
+ ret = tls_record_is_start_marker(record) ?
+ MLX5E_KTLS_SYNC_SKIP_NO_DATA : MLX5E_KTLS_SYNC_FAIL;
goto out;
}
while (remaining > 0) {
skb_frag_t *frag = &record->frags[i];
- __skb_frag_ref(frag);
+ get_page(skb_frag_page(frag));
remaining -= skb_frag_size(frag);
- info->frags[i++] = frag;
+ info->frags[i++] = *frag;
}
/* reduce the part which will be sent with the original SKB */
if (remaining < 0)
- skb_frag_size_add(info->frags[i - 1], remaining);
+ skb_frag_size_add(&info->frags[i - 1], remaining);
info->nr_frags = i;
out:
spin_unlock_irqrestore(&tx_ctx->lock, flags);
struct mlx5e_ktls_offload_context_tx *priv_tx,
u64 rcd_sn)
{
- struct tls_crypto_info *crypto_info = priv_tx->crypto_info;
- struct tls12_crypto_info_aes_gcm_128 *info;
+ struct tls12_crypto_info_aes_gcm_128 *info = &priv_tx->crypto_info;
__be64 rn_be = cpu_to_be64(rcd_sn);
bool skip_static_post;
u16 rec_seq_sz;
char *rec_seq;
- if (WARN_ON(crypto_info->cipher_type != TLS_CIPHER_AES_GCM_128))
- return;
-
- info = (struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
rec_seq = info->rec_seq;
rec_seq_sz = sizeof(info->rec_seq);
mlx5e_ktls_tx_post_param_wqes(sq, priv_tx, skip_static_post, true);
}
-struct mlx5e_dump_wqe {
- struct mlx5_wqe_ctrl_seg ctrl;
- struct mlx5_wqe_data_seg data;
-};
-
static int
tx_post_resync_dump(struct mlx5e_txqsq *sq, skb_frag_t *frag, u32 tisn, bool first)
{
struct mlx5_wqe_data_seg *dseg;
struct mlx5e_dump_wqe *wqe;
dma_addr_t dma_addr = 0;
- u8 num_wqebbs;
u16 ds_cnt;
int fsz;
u16 pi;
wqe = mlx5e_sq_fetch_wqe(sq, sizeof(*wqe), &pi);
ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
- num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
cseg = &wqe->ctrl;
dseg = &wqe->data;
dseg->byte_count = cpu_to_be32(fsz);
mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE);
- tx_fill_wi(sq, pi, num_wqebbs, frag, fsz);
- sq->pc += num_wqebbs;
-
- WARN(num_wqebbs > MLX5E_KTLS_MAX_DUMP_WQEBBS,
- "unexpected DUMP num_wqebbs, %d > %d",
- num_wqebbs, MLX5E_KTLS_MAX_DUMP_WQEBBS);
+ tx_fill_wi(sq, pi, MLX5E_KTLS_DUMP_WQEBBS, fsz, skb_frag_page(frag));
+ sq->pc += MLX5E_KTLS_DUMP_WQEBBS;
return 0;
}
void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
struct mlx5e_tx_wqe_info *wi,
- struct mlx5e_sq_dma *dma)
+ u32 *dma_fifo_cc)
{
- struct mlx5e_sq_stats *stats = sq->stats;
+ struct mlx5e_sq_stats *stats;
+ struct mlx5e_sq_dma *dma;
+
+ if (!wi->resync_dump_frag_page)
+ return;
+
+ dma = mlx5e_dma_get(sq, (*dma_fifo_cc)++);
+ stats = sq->stats;
mlx5e_tx_dma_unmap(sq->pdev, dma);
- __skb_frag_unref(wi->resync_dump_frag);
+ put_page(wi->resync_dump_frag_page);
stats->tls_dump_packets++;
stats->tls_dump_bytes += wi->num_bytes;
}
struct mlx5_wq_cyc *wq = &sq->wq;
u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
- tx_fill_wi(sq, pi, 1, NULL, 0);
+ tx_fill_wi(sq, pi, 1, 0, NULL);
mlx5e_post_nop_fence(wq, sq->sqn, &sq->pc);
}
-static struct sk_buff *
+static enum mlx5e_ktls_sync_retval
mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
struct mlx5e_txqsq *sq,
- struct sk_buff *skb,
+ int datalen,
u32 seq)
{
struct mlx5e_sq_stats *stats = sq->stats;
struct mlx5_wq_cyc *wq = &sq->wq;
+ enum mlx5e_ktls_sync_retval ret;
struct tx_sync_info info = {};
u16 contig_wqebbs_room, pi;
u8 num_wqebbs;
- int i;
-
- if (!tx_sync_info_get(priv_tx, seq, &info)) {
+ int i = 0;
+
+ ret = tx_sync_info_get(priv_tx, seq, &info);
+ if (unlikely(ret != MLX5E_KTLS_SYNC_DONE)) {
+ if (ret == MLX5E_KTLS_SYNC_SKIP_NO_DATA) {
+ stats->tls_skip_no_sync_data++;
+ return MLX5E_KTLS_SYNC_SKIP_NO_DATA;
+ }
/* We might get here if a retransmission reaches the driver
* after the relevant record is acked.
* It should be safe to drop the packet in this case
}
if (unlikely(info.sync_len < 0)) {
- u32 payload;
- int headln;
-
- headln = skb_transport_offset(skb) + tcp_hdrlen(skb);
- payload = skb->len - headln;
- if (likely(payload <= -info.sync_len))
- return skb;
+ if (likely(datalen <= -info.sync_len))
+ return MLX5E_KTLS_SYNC_DONE;
stats->tls_drop_bypass_req++;
goto err_out;
stats->tls_ooo++;
- num_wqebbs = MLX5E_KTLS_STATIC_WQEBBS + MLX5E_KTLS_PROGRESS_WQEBBS +
- (info.nr_frags ? info.nr_frags * MLX5E_KTLS_MAX_DUMP_WQEBBS : 1);
+ tx_post_resync_params(sq, priv_tx, info.rcd_sn);
+
+ /* If no dump WQE was sent, we need to have a fence NOP WQE before the
+ * actual data xmit.
+ */
+ if (!info.nr_frags) {
+ tx_post_fence_nop(sq);
+ return MLX5E_KTLS_SYNC_DONE;
+ }
+
+ num_wqebbs = mlx5e_ktls_dumps_num_wqebbs(sq, info.nr_frags, info.sync_len);
pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
+
if (unlikely(contig_wqebbs_room < num_wqebbs))
mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room);
tx_post_resync_params(sq, priv_tx, info.rcd_sn);
- for (i = 0; i < info.nr_frags; i++)
- if (tx_post_resync_dump(sq, info.frags[i], priv_tx->tisn, !i))
- goto err_out;
+ for (; i < info.nr_frags; i++) {
+ unsigned int orig_fsz, frag_offset = 0, n = 0;
+ skb_frag_t *f = &info.frags[i];
- /* If no dump WQE was sent, we need to have a fence NOP WQE before the
- * actual data xmit.
- */
- if (!info.nr_frags)
- tx_post_fence_nop(sq);
+ orig_fsz = skb_frag_size(f);
- return skb;
+ do {
+ bool fence = !(i || frag_offset);
+ unsigned int fsz;
+
+ n++;
+ fsz = min_t(unsigned int, sq->hw_mtu, orig_fsz - frag_offset);
+ skb_frag_size_set(f, fsz);
+ if (tx_post_resync_dump(sq, f, priv_tx->tisn, fence)) {
+ page_ref_add(skb_frag_page(f), n - 1);
+ goto err_out;
+ }
+
+ skb_frag_off_add(f, fsz);
+ frag_offset += fsz;
+ } while (frag_offset < orig_fsz);
+
+ page_ref_add(skb_frag_page(f), n - 1);
+ }
+
+ return MLX5E_KTLS_SYNC_DONE;
err_out:
- dev_kfree_skb_any(skb);
- return NULL;
+ for (; i < info.nr_frags; i++)
+ /* The put_page() here undoes the page ref obtained in tx_sync_info_get().
+ * Page refs obtained for the DUMP WQEs above (by page_ref_add) will be
+ * released only upon their completions (or in mlx5e_free_txqsq_descs,
+ * if channel closes).
+ */
+ put_page(skb_frag_page(&info.frags[i]));
+
+ return MLX5E_KTLS_SYNC_FAIL;
}
struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev,
seq = ntohl(tcp_hdr(skb)->seq);
if (unlikely(priv_tx->expected_seq != seq)) {
- skb = mlx5e_ktls_tx_handle_ooo(priv_tx, sq, skb, seq);
- if (unlikely(!skb))
+ enum mlx5e_ktls_sync_retval ret =
+ mlx5e_ktls_tx_handle_ooo(priv_tx, sq, datalen, seq);
+
+ if (likely(ret == MLX5E_KTLS_SYNC_DONE))
+ *wqe = mlx5e_sq_fetch_wqe(sq, sizeof(**wqe), pi);
+ else if (ret == MLX5E_KTLS_SYNC_FAIL)
+ goto err_out;
+ else /* ret == MLX5E_KTLS_SYNC_SKIP_NO_DATA */
goto out;
- *wqe = mlx5e_sq_fetch_wqe(sq, sizeof(**wqe), pi);
}
priv_tx->expected_seq = seq + datalen;
{
#define MLX5E_MIN_PTYS_EXT_LINK_MODE_BIT ETHTOOL_LINK_MODE_50000baseKR_Full_BIT
int size = __ETHTOOL_LINK_MODE_MASK_NBITS - MLX5E_MIN_PTYS_EXT_LINK_MODE_BIT;
- __ETHTOOL_DECLARE_LINK_MODE_MASK(modes);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(modes) = {0,};
bitmap_set(modes, MLX5E_MIN_PTYS_EXT_LINK_MODE_BIT, size);
return bitmap_intersects(modes, adver, __ETHTOOL_LINK_MODE_MASK_NBITS);
sq->txq_ix = txq_ix;
sq->uar_map = mdev->mlx5e_res.bfreg.map;
sq->min_inline_mode = params->tx_min_inline_mode;
+ sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
sq->stats = &c->priv->channel_stats[c->ix].sq[tc];
sq->stop_room = MLX5E_SQ_STOP_ROOM;
INIT_WORK(&sq->recover_work, mlx5e_tx_err_cqe_work);
set_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state);
if (MLX5_IPSEC_DEV(c->priv->mdev))
set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state);
+#ifdef CONFIG_MLX5_EN_TLS
if (mlx5_accel_is_tls_device(c->priv->mdev)) {
set_bit(MLX5E_SQ_STATE_TLS, &sq->state);
- sq->stop_room += MLX5E_SQ_TLS_ROOM;
+ sq->stop_room += MLX5E_SQ_TLS_ROOM +
+ mlx5e_ktls_dumps_num_wqebbs(sq, MAX_SKB_FRAGS,
+ TLS_MAX_PAYLOAD_SIZE);
}
+#endif
param->wq.db_numa_node = cpu_to_node(c->cpu);
err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, wq, &sq->wq_ctrl);
/* last doorbell out, godspeed .. */
if (mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, 1)) {
u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
+ struct mlx5e_tx_wqe_info *wi;
struct mlx5e_tx_wqe *nop;
- sq->db.wqe_info[pi].skb = NULL;
+ wi = &sq->db.wqe_info[pi];
+
+ memset(wi, 0, sizeof(*wi));
+ wi->num_wqebbs = 1;
nop = mlx5e_post_nop(wq, sq->sqn, &sq->pc);
mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &nop->ctrl);
}
mutex_lock(&esw->offloads.encap_tbl_lock);
encap_connected = !!(e->flags & MLX5_ENCAP_ENTRY_VALID);
- if (e->compl_result || (encap_connected == neigh_connected &&
- ether_addr_equal(e->h_dest, ha)))
+ if (e->compl_result < 0 || (encap_connected == neigh_connected &&
+ ether_addr_equal(e->h_dest, ha)))
goto unlock;
mlx5e_take_all_encap_flows(e, &flow_list);
if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
return 0;
- if (rq->cqd.left)
+ if (rq->cqd.left) {
work_done += mlx5e_decompress_cqes_cont(rq, cqwq, 0, budget);
+ if (rq->cqd.left || work_done >= budget)
+ goto out;
+ }
cqe = mlx5_cqwq_get_cqe(cqwq);
if (!cqe) {
#include <linux/udp.h>
#include <net/udp.h>
#include "en.h"
+#include "en/port.h"
enum {
MLX5E_ST_LINK_STATE,
static int mlx5e_test_link_speed(struct mlx5e_priv *priv)
{
- u32 out[MLX5_ST_SZ_DW(ptys_reg)];
- u32 eth_proto_oper;
- int i;
+ u32 speed;
if (!netif_carrier_ok(priv->netdev))
return 1;
- if (mlx5_query_port_ptys(priv->mdev, out, sizeof(out), MLX5_PTYS_EN, 1))
- return 1;
-
- eth_proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper);
- for (i = 0; i < MLX5E_LINK_MODES_NUMBER; i++) {
- if (eth_proto_oper & MLX5E_PROT_MASK(i))
- return 0;
- }
- return 1;
+ return mlx5e_port_linkspeed(priv->mdev, &speed);
}
struct mlx5ehdr {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_encrypted_bytes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ctx) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ooo) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_bytes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_resync_bytes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_skip_no_sync_data) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_drop_no_sync_data) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_drop_bypass_req) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_packets) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_bytes) },
#endif
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) },
s->tx_tls_encrypted_bytes += sq_stats->tls_encrypted_bytes;
s->tx_tls_ctx += sq_stats->tls_ctx;
s->tx_tls_ooo += sq_stats->tls_ooo;
+ s->tx_tls_dump_bytes += sq_stats->tls_dump_bytes;
+ s->tx_tls_dump_packets += sq_stats->tls_dump_packets;
s->tx_tls_resync_bytes += sq_stats->tls_resync_bytes;
+ s->tx_tls_skip_no_sync_data += sq_stats->tls_skip_no_sync_data;
s->tx_tls_drop_no_sync_data += sq_stats->tls_drop_no_sync_data;
s->tx_tls_drop_bypass_req += sq_stats->tls_drop_bypass_req;
- s->tx_tls_dump_bytes += sq_stats->tls_dump_bytes;
- s->tx_tls_dump_packets += sq_stats->tls_dump_packets;
#endif
s->tx_cqes += sq_stats->cqes;
}
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_bytes) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_ctx) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_ooo) },
- { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_no_sync_data) },
- { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_bypass_req) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_packets) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_bytes) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_resync_bytes) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_skip_no_sync_data) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_no_sync_data) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_bypass_req) },
#endif
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) },
u64 tx_tls_encrypted_bytes;
u64 tx_tls_ctx;
u64 tx_tls_ooo;
+ u64 tx_tls_dump_packets;
+ u64 tx_tls_dump_bytes;
u64 tx_tls_resync_bytes;
+ u64 tx_tls_skip_no_sync_data;
u64 tx_tls_drop_no_sync_data;
u64 tx_tls_drop_bypass_req;
- u64 tx_tls_dump_packets;
- u64 tx_tls_dump_bytes;
#endif
u64 rx_xsk_packets;
u64 tls_encrypted_bytes;
u64 tls_ctx;
u64 tls_ooo;
+ u64 tls_dump_packets;
+ u64 tls_dump_bytes;
u64 tls_resync_bytes;
+ u64 tls_skip_no_sync_data;
u64 tls_drop_no_sync_data;
u64 tls_drop_bypass_req;
- u64 tls_dump_packets;
- u64 tls_dump_bytes;
#endif
/* less likely accessed in data path */
u64 csum_none;
mlx5_eswitch_del_vlan_action(esw, attr);
for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++)
- if (attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP)
+ if (attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP) {
mlx5e_detach_encap(priv, flow, out_index);
+ kfree(attr->parse_attr->tun_info[out_index]);
+ }
kvfree(attr->parse_attr);
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
mlx5_packet_reformat_dealloc(priv->mdev, e->pkt_reformat);
}
+ kfree(e->tun_info);
kfree(e->encap_header);
kfree_rcu(e, rcu);
}
return NULL;
}
+static struct ip_tunnel_info *dup_tun_info(const struct ip_tunnel_info *tun_info)
+{
+ size_t tun_size = sizeof(*tun_info) + tun_info->options_len;
+
+ return kmemdup(tun_info, tun_size, GFP_KERNEL);
+}
+
static int mlx5e_attach_encap(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
struct net_device *mirred_dev,
refcount_set(&e->refcnt, 1);
init_completion(&e->res_ready);
+ tun_info = dup_tun_info(tun_info);
+ if (!tun_info) {
+ err = -ENOMEM;
+ goto out_err_init;
+ }
e->tun_info = tun_info;
err = mlx5e_tc_tun_init_encap_attr(mirred_dev, priv, e, extack);
- if (err) {
- kfree(e);
- e = NULL;
- goto out_err;
- }
+ if (err)
+ goto out_err_init;
INIT_LIST_HEAD(&e->flows);
hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key);
if (e)
mlx5e_encap_put(priv, e);
return err;
+
+out_err_init:
+ mutex_unlock(&esw->offloads.encap_tbl_lock);
+ kfree(tun_info);
+ kfree(e);
+ return err;
}
static int parse_tc_vlan_action(struct mlx5e_priv *priv,
struct mlx5_esw_flow_attr *attr,
u32 *action)
{
- int nest_level = vlan_get_encap_level(attr->parse_attr->filter_dev);
+ int nest_level = attr->parse_attr->filter_dev->lower_level;
struct flow_action_entry vlan_act = {
.id = FLOW_ACTION_VLAN_POP,
};
} else if (encap) {
parse_attr->mirred_ifindex[attr->out_count] =
out_dev->ifindex;
- parse_attr->tun_info[attr->out_count] = info;
+ parse_attr->tun_info[attr->out_count] = dup_tun_info(info);
+ if (!parse_attr->tun_info[attr->out_count])
+ return -ENOMEM;
encap = false;
attr->dests[attr->out_count].flags |=
MLX5_ESW_DEST_ENCAP;
static void mlx5e_dump_error_cqe(struct mlx5e_txqsq *sq,
struct mlx5_err_cqe *err_cqe)
{
- u32 ci = mlx5_cqwq_get_ci(&sq->cq.wq);
+ struct mlx5_cqwq *wq = &sq->cq.wq;
+ u32 ci;
+
+ ci = mlx5_cqwq_ctr2ix(wq, wq->cc - 1);
netdev_err(sq->channel->netdev,
"Error cqe on cqn 0x%x, ci 0x%x, sqn 0x%x, opcode 0x%x, syndrome 0x%x, vendor syndrome 0x%x\n",
skb = wi->skb;
if (unlikely(!skb)) {
-#ifdef CONFIG_MLX5_EN_TLS
- if (wi->resync_dump_frag) {
- struct mlx5e_sq_dma *dma =
- mlx5e_dma_get(sq, dma_fifo_cc++);
-
- mlx5e_ktls_tx_handle_resync_dump_comp(sq, wi, dma);
- }
-#endif
+ mlx5e_ktls_tx_handle_resync_dump_comp(sq, wi, &dma_fifo_cc);
sqcc += wi->num_wqebbs;
continue;
}
{
struct mlx5e_tx_wqe_info *wi;
struct sk_buff *skb;
+ u32 dma_fifo_cc;
+ u16 sqcc;
u16 ci;
int i;
- while (sq->cc != sq->pc) {
- ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->cc);
+ sqcc = sq->cc;
+ dma_fifo_cc = sq->dma_fifo_cc;
+
+ while (sqcc != sq->pc) {
+ ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
wi = &sq->db.wqe_info[ci];
skb = wi->skb;
- if (!skb) { /* nop */
- sq->cc++;
+ if (!skb) {
+ mlx5e_ktls_tx_handle_resync_dump_comp(sq, wi, &dma_fifo_cc);
+ sqcc += wi->num_wqebbs;
continue;
}
for (i = 0; i < wi->num_dma; i++) {
struct mlx5e_sq_dma *dma =
- mlx5e_dma_get(sq, sq->dma_fifo_cc++);
+ mlx5e_dma_get(sq, dma_fifo_cc++);
mlx5e_tx_dma_unmap(sq->pdev, dma);
}
dev_kfree_skb_any(skb);
- sq->cc += wi->num_wqebbs;
+ sqcc += wi->num_wqebbs;
}
+
+ sq->dma_fifo_cc = dma_fifo_cc;
+ sq->cc = sqcc;
}
#ifdef CONFIG_MLX5_CORE_IPOIB
mlx5_eswitch_set_rule_source_port(esw, spec, attr);
- spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
if (attr->outer_match_level != MLX5_MATCH_NONE)
spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
memset(&src->vlan[1], 0, sizeof(src->vlan[1]));
}
+static bool mlx5_eswitch_offload_is_uplink_port(const struct mlx5_eswitch *esw,
+ const struct mlx5_flow_spec *spec)
+{
+ u32 port_mask, port_value;
+
+ if (MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source))
+ return spec->flow_context.flow_source == MLX5_VPORT_UPLINK;
+
+ port_mask = MLX5_GET(fte_match_param, spec->match_criteria,
+ misc_parameters.source_port);
+ port_value = MLX5_GET(fte_match_param, spec->match_value,
+ misc_parameters.source_port);
+ return (port_mask & port_value & 0xffff) == MLX5_VPORT_UPLINK;
+}
+
bool
mlx5_eswitch_termtbl_required(struct mlx5_eswitch *esw,
struct mlx5_flow_act *flow_act,
struct mlx5_flow_spec *spec)
{
- u32 port_mask = MLX5_GET(fte_match_param, spec->match_criteria,
- misc_parameters.source_port);
- u32 port_value = MLX5_GET(fte_match_param, spec->match_value,
- misc_parameters.source_port);
-
if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, termination_table))
return false;
/* push vlan on RX */
return (flow_act->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) &&
- ((port_mask & port_value) == MLX5_VPORT_UPLINK);
+ mlx5_eswitch_offload_is_uplink_port(esw, spec);
}
struct mlx5_flow_handle *
}
err = mlx5_vector2eqn(mdev, smp_processor_id(), &eqn, &irqn);
- if (err)
+ if (err) {
+ kvfree(in);
goto err_cqwq;
+ }
cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
MLX5_SET(cqc, cqc, log_cq_size, ilog2(cq_size));
MLX5_SET(dest_format_struct, in_dests,
destination_eswitch_owner_vhca_id,
dst->dest_attr.vport.vhca_id);
- if (extended_dest) {
+ if (extended_dest &&
+ dst->dest_attr.vport.pkt_reformat) {
MLX5_SET(dest_format_struct, in_dests,
packet_reformat,
!!(dst->dest_attr.vport.flags &
return -ENOMEM;
err = mlx5_crdump_collect(dev, cr_data);
if (err)
- return err;
+ goto free_data;
if (priv_ctx) {
struct mlx5_fw_reporter_ctx *fw_reporter_ctx = priv_ctx;
if (err)
goto err_thermal_init;
- if (mlxsw_driver->params_register && !reload)
+ if (mlxsw_driver->params_register)
devlink_params_publish(devlink);
return 0;
return;
}
- if (mlxsw_core->driver->params_unregister && !reload)
+ if (mlxsw_core->driver->params_unregister)
devlink_params_unpublish(devlink);
mlxsw_thermal_fini(mlxsw_core->thermal);
mlxsw_hwmon_fini(mlxsw_core->hwmon);
port->pvid = vid;
/* Untagged egress vlan clasification */
- if (untagged)
+ if (untagged && port->vid != vid) {
+ if (port->vid) {
+ dev_err(ocelot->dev,
+ "Port already has a native VLAN: %d\n",
+ port->vid);
+ return -EBUSY;
+ }
port->vid = vid;
+ }
ocelot_vlan_port_apply(ocelot, port);
static int ocelot_vlan_rx_add_vid(struct net_device *dev, __be16 proto,
u16 vid)
{
- return ocelot_vlan_vid_add(dev, vid, false, true);
+ return ocelot_vlan_vid_add(dev, vid, false, false);
}
static int ocelot_vlan_rx_kill_vid(struct net_device *dev, __be16 proto,
nfp_port_free(repr->port);
}
-static struct lock_class_key nfp_repr_netdev_xmit_lock_key;
-static struct lock_class_key nfp_repr_netdev_addr_lock_key;
-
-static void nfp_repr_set_lockdep_class_one(struct net_device *dev,
- struct netdev_queue *txq,
- void *_unused)
-{
- lockdep_set_class(&txq->_xmit_lock, &nfp_repr_netdev_xmit_lock_key);
-}
-
-static void nfp_repr_set_lockdep_class(struct net_device *dev)
-{
- lockdep_set_class(&dev->addr_list_lock, &nfp_repr_netdev_addr_lock_key);
- netdev_for_each_tx_queue(dev, nfp_repr_set_lockdep_class_one, NULL);
-}
-
int nfp_repr_init(struct nfp_app *app, struct net_device *netdev,
u32 cmsg_port_id, struct nfp_port *port,
struct net_device *pf_netdev)
u32 repr_cap = nn->tlv_caps.repr_cap;
int err;
- nfp_repr_set_lockdep_class(netdev);
-
repr->port = port;
repr->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX, GFP_KERNEL);
if (!repr->dst)
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2017 - 2019 Pensando Systems, Inc */
+#include <linux/printk.h>
+#include <linux/dynamic_debug.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/rtnetlink.h>
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2017 - 2019 Pensando Systems, Inc */
+#include <linux/printk.h>
+#include <linux/dynamic_debug.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/utsname.h>
#define QED_ROCE_QPS (8192)
#define QED_ROCE_DPIS (8)
#define QED_RDMA_SRQS QED_ROCE_QPS
-#define QED_NVM_CFG_SET_FLAGS 0xE
-#define QED_NVM_CFG_SET_PF_FLAGS 0x1E
#define QED_NVM_CFG_GET_FLAGS 0xA
#define QED_NVM_CFG_GET_PF_FLAGS 0x1A
+#define QED_NVM_CFG_MAX_ATTRS 50
static char version[] =
"QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n";
{
struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
u8 entity_id, len, buf[32];
+ bool need_nvm_init = true;
struct qed_ptt *ptt;
u16 cfg_id, count;
int rc = 0, i;
DP_VERBOSE(cdev, NETIF_MSG_DRV,
"Read config ids: num_attrs = %0d\n", count);
- /* NVM CFG ID attributes */
- for (i = 0; i < count; i++) {
+ /* NVM CFG ID attributes. Start loop index from 1 to avoid additional
+ * arithmetic operations in the implementation.
+ */
+ for (i = 1; i <= count; i++) {
cfg_id = *((u16 *)*data);
*data += 2;
entity_id = **data;
memcpy(buf, *data, len);
*data += len;
- flags = entity_id ? QED_NVM_CFG_SET_PF_FLAGS :
- QED_NVM_CFG_SET_FLAGS;
+ flags = 0;
+ if (need_nvm_init) {
+ flags |= QED_NVM_CFG_OPTION_INIT;
+ need_nvm_init = false;
+ }
+
+ /* Commit to flash and free the resources */
+ if (!(i % QED_NVM_CFG_MAX_ATTRS) || i == count) {
+ flags |= QED_NVM_CFG_OPTION_COMMIT |
+ QED_NVM_CFG_OPTION_FREE;
+ need_nvm_init = true;
+ }
+
+ if (entity_id)
+ flags |= QED_NVM_CFG_OPTION_ENTITY_SEL;
DP_VERBOSE(cdev, NETIF_MSG_DRV,
"cfg_id = %d entity = %d len = %d\n", cfg_id,
(qed_iov_validate_active_txq(p_hwfn, vf))) {
vf->b_malicious = true;
DP_NOTICE(p_hwfn,
- "VF [%02x] - considered malicious; Unable to stop RX/TX queuess\n",
+ "VF [%02x] - considered malicious; Unable to stop RX/TX queues\n",
vf->abs_vf_id);
status = PFVF_STATUS_MALICIOUS;
goto out;
{
int value;
+ /* Work around issue with chip reporting wrong PHY ID */
+ if (reg == MII_PHYSID2)
+ return 0xc912;
+
r8168dp_2_mdio_start(tp);
value = r8169_mdio_read(tp, reg);
} else {
stmmac_set_desc_addr(priv, first, des);
tmp_pay_len = pay_len;
+ des += proto_hdr_len;
}
stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
adapter->open_guard = false;
adapter->txrx_wq = alloc_workqueue(DRV_NAME "/txrx", WQ_MEM_RECLAIM, 0);
+ if (unlikely(!adapter->txrx_wq)) {
+ err = -ENOMEM;
+ goto err_free_netdev;
+ }
+
adapter->control_wq = alloc_workqueue(DRV_NAME "/control",
WQ_MEM_RECLAIM, 0);
+ if (unlikely(!adapter->control_wq)) {
+ err = -ENOMEM;
+ goto err_free_txrx_wq;
+ }
INIT_WORK(&adapter->tx_stall_task, fjes_tx_stall_task);
INIT_WORK(&adapter->raise_intr_rxdata_task,
hw->hw_res.irq = platform_get_irq(plat_dev, 0);
err = fjes_hw_init(&adapter->hw);
if (err)
- goto err_free_netdev;
+ goto err_free_control_wq;
/* setup MAC address (02:00:00:00:00:[epid])*/
netdev->dev_addr[0] = 2;
err_hw_exit:
fjes_hw_exit(&adapter->hw);
+err_free_control_wq:
+ destroy_workqueue(adapter->control_wq);
+err_free_txrx_wq:
+ destroy_workqueue(adapter->txrx_wq);
err_free_netdev:
free_netdev(netdev);
err_out:
static LIST_HEAD(bpq_devices);
-/*
- * bpqether network devices are paired with ethernet devices below them, so
- * form a special "super class" of normal ethernet devices; split their locks
- * off into a separate class since they always nest.
- */
-static struct lock_class_key bpq_netdev_xmit_lock_key;
-static struct lock_class_key bpq_netdev_addr_lock_key;
-
-static void bpq_set_lockdep_class_one(struct net_device *dev,
- struct netdev_queue *txq,
- void *_unused)
-{
- lockdep_set_class(&txq->_xmit_lock, &bpq_netdev_xmit_lock_key);
-}
-
-static void bpq_set_lockdep_class(struct net_device *dev)
-{
- lockdep_set_class(&dev->addr_list_lock, &bpq_netdev_addr_lock_key);
- netdev_for_each_tx_queue(dev, bpq_set_lockdep_class_one, NULL);
-}
-
/* ------------------------------------------------------------------------ */
err = register_netdevice(ndev);
if (err)
goto error;
- bpq_set_lockdep_class(ndev);
/* List protected by RTNL */
list_add_rcu(&bpq->bpq_list, &bpq_devices);
if (netif_running(ndev)) {
ret = rndis_filter_open(nvdev);
if (ret)
- return ret;
+ goto err;
rdev = nvdev->extension;
if (!rdev->link_state)
}
return 0;
+
+err:
+ netif_device_detach(ndev);
+
+ rndis_filter_device_remove(hdev, nvdev);
+
+ return ret;
}
static int netvsc_set_channels(struct net_device *net,
ret = rndis_filter_set_offload_params(ndev, nvdev, &offloads);
- if (ret)
+ if (ret) {
features ^= NETIF_F_LRO;
+ ndev->features = features;
+ }
syncvf:
if (!vf_netdev)
NETIF_F_HW_VLAN_CTAG_RX;
net->vlan_features = net->features;
- netdev_lockdep_set_classes(net);
-
/* MTU range: 68 - 1500 or 65521 */
net->min_mtu = NETVSC_MTU_MIN;
if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2)
dev->gso_max_segs = phy_dev->gso_max_segs;
dev->hard_header_len = phy_dev->hard_header_len;
- netdev_lockdep_set_classes(dev);
-
ipvlan->pcpu_stats = netdev_alloc_pcpu_stats(struct ipvl_pcpu_stats);
if (!ipvlan->pcpu_stats)
return -ENOMEM;
struct pcpu_secy_stats __percpu *stats;
struct list_head secys;
struct gro_cells gro_cells;
- unsigned int nest_level;
};
/**
#define MACSEC_FEATURES \
(NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST)
-static struct lock_class_key macsec_netdev_addr_lock_key;
static int macsec_dev_init(struct net_device *dev)
{
return macsec_priv(dev)->real_dev->ifindex;
}
-static int macsec_get_nest_level(struct net_device *dev)
-{
- return macsec_priv(dev)->nest_level;
-}
-
static const struct net_device_ops macsec_netdev_ops = {
.ndo_init = macsec_dev_init,
.ndo_uninit = macsec_dev_uninit,
.ndo_start_xmit = macsec_start_xmit,
.ndo_get_stats64 = macsec_get_stats64,
.ndo_get_iflink = macsec_get_iflink,
- .ndo_get_lock_subclass = macsec_get_nest_level,
};
static const struct device_type macsec_type = {
static void macsec_free_netdev(struct net_device *dev)
{
struct macsec_dev *macsec = macsec_priv(dev);
- struct net_device *real_dev = macsec->real_dev;
free_percpu(macsec->stats);
free_percpu(macsec->secy.tx_sc.stats);
- dev_put(real_dev);
}
static void macsec_setup(struct net_device *dev)
if (err < 0)
return err;
- dev_hold(real_dev);
-
- macsec->nest_level = dev_get_nest_level(real_dev) + 1;
- netdev_lockdep_set_classes(dev);
- lockdep_set_class_and_subclass(&dev->addr_list_lock,
- &macsec_netdev_addr_lock_key,
- macsec_get_nest_level(dev));
-
err = netdev_upper_dev_link(real_dev, dev, extack);
if (err < 0)
goto unregister;
* "super class" of normal network devices; split their locks off into a
* separate class since they always nest.
*/
-static struct lock_class_key macvlan_netdev_addr_lock_key;
-
#define ALWAYS_ON_OFFLOADS \
(NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_GSO_SOFTWARE | \
NETIF_F_GSO_ROBUST | NETIF_F_GSO_ENCAP_ALL)
#define MACVLAN_STATE_MASK \
((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT))
-static int macvlan_get_nest_level(struct net_device *dev)
-{
- return ((struct macvlan_dev *)netdev_priv(dev))->nest_level;
-}
-
-static void macvlan_set_lockdep_class(struct net_device *dev)
-{
- netdev_lockdep_set_classes(dev);
- lockdep_set_class_and_subclass(&dev->addr_list_lock,
- &macvlan_netdev_addr_lock_key,
- macvlan_get_nest_level(dev));
-}
-
static int macvlan_init(struct net_device *dev)
{
struct macvlan_dev *vlan = netdev_priv(dev);
dev->gso_max_segs = lowerdev->gso_max_segs;
dev->hard_header_len = lowerdev->hard_header_len;
- macvlan_set_lockdep_class(dev);
-
vlan->pcpu_stats = netdev_alloc_pcpu_stats(struct vlan_pcpu_stats);
if (!vlan->pcpu_stats)
return -ENOMEM;
.ndo_fdb_add = macvlan_fdb_add,
.ndo_fdb_del = macvlan_fdb_del,
.ndo_fdb_dump = ndo_dflt_fdb_dump,
- .ndo_get_lock_subclass = macvlan_get_nest_level,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = macvlan_dev_poll_controller,
.ndo_netpoll_setup = macvlan_dev_netpoll_setup,
vlan->dev = dev;
vlan->port = port;
vlan->set_features = MACVLAN_FEATURES;
- vlan->nest_level = dev_get_nest_level(lowerdev) + 1;
vlan->mode = MACVLAN_MODE_VEPA;
if (data && data[IFLA_MACVLAN_MODE])
{
struct nsim_dev_port *nsim_dev_port, *tmp;
+ mutex_lock(&nsim_dev->port_list_lock);
list_for_each_entry_safe(nsim_dev_port, tmp,
&nsim_dev->port_list, list)
__nsim_dev_port_del(nsim_dev_port);
+ mutex_unlock(&nsim_dev->port_list_lock);
}
static int nsim_dev_port_add_all(struct nsim_dev *nsim_dev,
int nsim_dev_probe(struct nsim_bus_dev *nsim_bus_dev)
{
struct nsim_dev *nsim_dev;
+ int i;
+ int err;
nsim_dev = nsim_dev_create(nsim_bus_dev);
if (IS_ERR(nsim_dev))
return PTR_ERR(nsim_dev);
dev_set_drvdata(&nsim_bus_dev->dev, nsim_dev);
+ mutex_lock(&nsim_dev->port_list_lock);
+ for (i = 0; i < nsim_bus_dev->port_count; i++) {
+ err = __nsim_dev_port_add(nsim_dev, i);
+ if (err)
+ goto err_port_del_all;
+ }
+ mutex_unlock(&nsim_dev->port_list_lock);
return 0;
+
+err_port_del_all:
+ mutex_unlock(&nsim_dev->port_list_lock);
+ nsim_dev_port_del_all(nsim_dev);
+ nsim_dev_destroy(nsim_dev);
+ return err;
}
void nsim_dev_remove(struct nsim_bus_dev *nsim_bus_dev)
phylink_printk(KERN_WARNING, pl, fmt, ##__VA_ARGS__)
#define phylink_info(pl, fmt, ...) \
phylink_printk(KERN_INFO, pl, fmt, ##__VA_ARGS__)
+#if defined(CONFIG_DYNAMIC_DEBUG)
#define phylink_dbg(pl, fmt, ...) \
+do { \
+ if ((pl)->config->type == PHYLINK_NETDEV) \
+ netdev_dbg((pl)->netdev, fmt, ##__VA_ARGS__); \
+ else if ((pl)->config->type == PHYLINK_DEV) \
+ dev_dbg((pl)->dev, fmt, ##__VA_ARGS__); \
+} while (0)
+#elif defined(DEBUG)
+#define phylink_dbg(pl, fmt, ...) \
phylink_printk(KERN_DEBUG, pl, fmt, ##__VA_ARGS__)
+#else
+#define phylink_dbg(pl, fmt, ...) \
+({ \
+ if (0) \
+ phylink_printk(KERN_DEBUG, pl, fmt, ##__VA_ARGS__); \
+})
+#endif
/**
* phylink_set_port_modes() - set the port type modes in the ethtool mask
.name = "SMSC LAN8740",
/* PHY_BASIC_FEATURES */
+ .flags = PHY_RST_AFTER_CLK_EN,
.probe = smsc_phy_probe,
{
struct ppp *ppp;
- netdev_lockdep_set_classes(dev);
-
ppp = netdev_priv(dev);
/* Let the netdevice take a reference on the ppp file. This ensures
* that ppp_destroy_interface() won't run before the device gets
int err;
team->dev = dev;
- mutex_init(&team->lock);
team_set_no_mode(team);
team->pcpu_stats = netdev_alloc_pcpu_stats(struct team_pcpu_stats);
goto err_options_register;
netif_carrier_off(dev);
- netdev_lockdep_set_classes(dev);
+ lockdep_register_key(&team->team_lock_key);
+ __mutex_init(&team->lock, "team->team_lock_key", &team->team_lock_key);
return 0;
team_queue_override_fini(team);
mutex_unlock(&team->lock);
netdev_change_features(dev);
+ lockdep_unregister_key(&team->team_lock_key);
}
static void team_destructor(struct net_device *dev)
err = team_port_del(team, port_dev);
mutex_unlock(&team->lock);
- if (!err)
- netdev_change_features(dev);
+ if (err)
+ return err;
+
+ if (netif_is_team_master(port_dev)) {
+ lockdep_unregister_key(&team->team_lock_key);
+ lockdep_register_key(&team->team_lock_key);
+ lockdep_set_class(&team->lock, &team->team_lock_key);
+ }
+ netdev_change_features(dev);
return err;
}
.driver_info = 0,
},
+/* ThinkPad USB-C Dock Gen 2 (based on Realtek RTL8153) */
+{
+ USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0xa387, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+ .driver_info = 0,
+},
+
/* NVIDIA Tegra USB 3.0 Ethernet Adapters (based on Realtek RTL8153) */
{
USB_DEVICE_AND_INTERFACE_INFO(NVIDIA_VENDOR_ID, 0x09ff, USB_CLASS_COMM,
netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
- if (dev->domain_data.phyirq > 0)
+ if (dev->domain_data.phyirq > 0) {
+ local_irq_disable();
generic_handle_irq(dev->domain_data.phyirq);
+ local_irq_enable();
+ }
} else
netdev_warn(dev->net,
"unexpected interrupt: 0x%08x\n", intdata);
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7205)},
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x720c)},
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7214)},
+ {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0xa387)},
{REALTEK_USB_DEVICE(VENDOR_ID_LINKSYS, 0x0041)},
{REALTEK_USB_DEVICE(VENDOR_ID_NVIDIA, 0x09ff)},
{REALTEK_USB_DEVICE(VENDOR_ID_TPLINK, 0x0601)},
/* similarly, oper state is irrelevant; set to up to avoid confusion */
dev->operstate = IF_OPER_UP;
- netdev_lockdep_set_classes(dev);
return 0;
out_rth:
vni = tunnel_id_to_key32(info->key.tun_id);
ifindex = 0;
dst_cache = &info->dst_cache;
- if (info->options_len &&
- info->key.tun_flags & TUNNEL_VXLAN_OPT)
+ if (info->key.tun_flags & TUNNEL_VXLAN_OPT) {
+ if (info->options_len < sizeof(*md))
+ goto drop;
md = ip_tunnel_info_opts(info);
+ }
ttl = info->key.ttl;
tos = info->key.tos;
label = info->key.label;
{
struct vxlan_net *vn = net_generic(net, vxlan_net_id);
struct vxlan_dev *vxlan = netdev_priv(dev);
+ struct net_device *remote_dev = NULL;
struct vxlan_fdb *f = NULL;
bool unregister = false;
+ struct vxlan_rdst *dst;
int err;
+ dst = &vxlan->default_dst;
err = vxlan_dev_configure(net, dev, conf, false, extack);
if (err)
return err;
dev->ethtool_ops = &vxlan_ethtool_ops;
/* create an fdb entry for a valid default destination */
- if (!vxlan_addr_any(&vxlan->default_dst.remote_ip)) {
+ if (!vxlan_addr_any(&dst->remote_ip)) {
err = vxlan_fdb_create(vxlan, all_zeros_mac,
- &vxlan->default_dst.remote_ip,
+ &dst->remote_ip,
NUD_REACHABLE | NUD_PERMANENT,
vxlan->cfg.dst_port,
- vxlan->default_dst.remote_vni,
- vxlan->default_dst.remote_vni,
- vxlan->default_dst.remote_ifindex,
+ dst->remote_vni,
+ dst->remote_vni,
+ dst->remote_ifindex,
NTF_SELF, &f);
if (err)
return err;
goto errout;
unregister = true;
+ if (dst->remote_ifindex) {
+ remote_dev = __dev_get_by_index(net, dst->remote_ifindex);
+ if (!remote_dev)
+ goto errout;
+
+ err = netdev_upper_dev_link(remote_dev, dev, extack);
+ if (err)
+ goto errout;
+ }
+
err = rtnl_configure_link(dev, NULL);
if (err)
- goto errout;
+ goto unlink;
if (f) {
- vxlan_fdb_insert(vxlan, all_zeros_mac,
- vxlan->default_dst.remote_vni, f);
+ vxlan_fdb_insert(vxlan, all_zeros_mac, dst->remote_vni, f);
/* notify default fdb entry */
err = vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f),
RTM_NEWNEIGH, true, extack);
if (err) {
vxlan_fdb_destroy(vxlan, f, false, false);
+ if (remote_dev)
+ netdev_upper_dev_unlink(remote_dev, dev);
goto unregister;
}
}
list_add(&vxlan->next, &vn->vxlan_list);
+ if (remote_dev)
+ dst->remote_dev = remote_dev;
return 0;
-
+unlink:
+ if (remote_dev)
+ netdev_upper_dev_unlink(remote_dev, dev);
errout:
/* unregister_netdevice() destroys the default FDB entry with deletion
* notification. But the addition notification was not sent yet, so
struct netlink_ext_ack *extack)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
- struct vxlan_rdst *dst = &vxlan->default_dst;
struct net_device *lowerdev;
struct vxlan_config conf;
+ struct vxlan_rdst *dst;
int err;
+ dst = &vxlan->default_dst;
err = vxlan_nl2conf(tb, data, dev, &conf, true, extack);
if (err)
return err;
if (err)
return err;
+ if (dst->remote_dev == lowerdev)
+ lowerdev = NULL;
+
+ err = netdev_adjacent_change_prepare(dst->remote_dev, lowerdev, dev,
+ extack);
+ if (err)
+ return err;
+
/* handle default dst entry */
if (!vxlan_addr_equal(&conf.remote_ip, &dst->remote_ip)) {
u32 hash_index = fdb_head_index(vxlan, all_zeros_mac, conf.vni);
NTF_SELF, true, extack);
if (err) {
spin_unlock_bh(&vxlan->hash_lock[hash_index]);
+ netdev_adjacent_change_abort(dst->remote_dev,
+ lowerdev, dev);
return err;
}
}
if (conf.age_interval != vxlan->cfg.age_interval)
mod_timer(&vxlan->age_timer, jiffies);
+ netdev_adjacent_change_commit(dst->remote_dev, lowerdev, dev);
+ if (lowerdev && lowerdev != dst->remote_dev) {
+ dst->remote_dev = lowerdev;
+ netdev_update_lockdep_key(lowerdev);
+ }
vxlan_config_apply(dev, &conf, lowerdev, vxlan->net, true);
return 0;
}
list_del(&vxlan->next);
unregister_netdevice_queue(dev, head);
+ if (vxlan->default_dst.remote_dev)
+ netdev_upper_dev_unlink(vxlan->default_dst.remote_dev, dev);
}
static size_t vxlan_get_size(const struct net_device *dev)
"%d\n", result);
result = 0;
error_cmd:
- kfree(cmd);
kfree_skb(ack_skb);
error_msg_to_dev:
error_alloc:
d_fnend(4, dev, "(wimax_dev %p state %d) = %d\n",
wimax_dev, state, result);
+ kfree(cmd);
return result;
}
} __packed;
/**
- * struct iwl_scan_config
+ * struct iwl_scan_config_v1
* @flags: enum scan_config_flags
* @tx_chains: valid_tx antenna - ANT_* definitions
* @rx_chains: valid_rx antenna - ANT_* definitions
#define SCAN_LB_LMAC_IDX 0
#define SCAN_HB_LMAC_IDX 1
-struct iwl_scan_config {
+struct iwl_scan_config_v2 {
__le32 flags;
__le32 tx_chains;
__le32 rx_chains;
u8 bcast_sta_id;
u8 channel_flags;
u8 channel_array[];
+} __packed; /* SCAN_CONFIG_DB_CMD_API_S_2 */
+
+/**
+ * struct iwl_scan_config
+ * @enable_cam_mode: whether to enable CAM mode.
+ * @enable_promiscouos_mode: whether to enable promiscouos mode
+ * @bcast_sta_id: the index of the station in the fw
+ * @reserved: reserved
+ * @tx_chains: valid_tx antenna - ANT_* definitions
+ * @rx_chains: valid_rx antenna - ANT_* definitions
+ */
+struct iwl_scan_config {
+ u8 enable_cam_mode;
+ u8 enable_promiscouos_mode;
+ u8 bcast_sta_id;
+ u8 reserved;
+ __le32 tx_chains;
+ __le32 rx_chains;
} __packed; /* SCAN_CONFIG_DB_CMD_API_S_3 */
/**
* STA_CONTEXT_DOT11AX_API_S
* @IWL_UCODE_TLV_CAPA_SAR_TABLE_VER: This ucode supports different sar
* version tables.
+ * @IWL_UCODE_TLV_API_REDUCED_SCAN_CONFIG: This ucode supports v3 of
+ * SCAN_CONFIG_DB_CMD_API_S.
*
* @NUM_IWL_UCODE_TLV_API: number of bits used
*/
IWL_UCODE_TLV_API_WOWLAN_TCP_SYN_WAKE = (__force iwl_ucode_tlv_api_t)53,
IWL_UCODE_TLV_API_FTM_RTT_ACCURACY = (__force iwl_ucode_tlv_api_t)54,
IWL_UCODE_TLV_API_SAR_TABLE_VER = (__force iwl_ucode_tlv_api_t)55,
+ IWL_UCODE_TLV_API_REDUCED_SCAN_CONFIG = (__force iwl_ucode_tlv_api_t)56,
IWL_UCODE_TLV_API_ADWELL_HB_DEF_N_AP = (__force iwl_ucode_tlv_api_t)57,
IWL_UCODE_TLV_API_SCAN_EXT_CHAN_VER = (__force iwl_ucode_tlv_api_t)58,
* Indicates MAC is entering a power-saving sleep power-down.
* Not a good time to access device-internal resources.
*/
+#define CSR_GP_CNTRL_REG_FLAG_INIT_DONE (0x00000004)
#define CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP (0x00000010)
#define CSR_GP_CNTRL_REG_FLAG_XTAL_ON (0x00000400)
#define PERSISTENCE_BIT BIT(12)
#define PREG_WFPM_ACCESS BIT(12)
+#define HPM_HIPM_GEN_CFG 0xA03458
+#define HPM_HIPM_GEN_CFG_CR_PG_EN BIT(0)
+#define HPM_HIPM_GEN_CFG_CR_SLP_EN BIT(1)
+#define HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE BIT(10)
+
#define UREG_DOORBELL_TO_ISR6 0xA05C04
#define UREG_DOORBELL_TO_ISR6_NMI_BIT BIT(0)
#define UREG_DOORBELL_TO_ISR6_SUSPEND BIT(18)
IWL_UCODE_TLV_API_SCAN_EXT_CHAN_VER);
}
+static inline bool iwl_mvm_is_reduced_config_scan_supported(struct iwl_mvm *mvm)
+{
+ return fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_REDUCED_SCAN_CONFIG);
+}
+
static inline bool iwl_mvm_has_new_rx_stats_api(struct iwl_mvm *mvm)
{
return fw_has_api(&mvm->fw->ucode_capa,
iwl_mvm_fill_channels(mvm, cfg->channel_array, max_channels);
}
-static void iwl_mvm_fill_scan_config(struct iwl_mvm *mvm, void *config,
- u32 flags, u8 channel_flags,
- u32 max_channels)
+static void iwl_mvm_fill_scan_config_v2(struct iwl_mvm *mvm, void *config,
+ u32 flags, u8 channel_flags,
+ u32 max_channels)
{
- struct iwl_scan_config *cfg = config;
+ struct iwl_scan_config_v2 *cfg = config;
cfg->flags = cpu_to_le32(flags);
cfg->tx_chains = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm));
iwl_mvm_fill_channels(mvm, cfg->channel_array, max_channels);
}
-int iwl_mvm_config_scan(struct iwl_mvm *mvm)
+static int iwl_mvm_legacy_config_scan(struct iwl_mvm *mvm)
{
void *cfg;
int ret, cmd_size;
}
if (iwl_mvm_cdb_scan_api(mvm))
- cmd_size = sizeof(struct iwl_scan_config);
+ cmd_size = sizeof(struct iwl_scan_config_v2);
else
cmd_size = sizeof(struct iwl_scan_config_v1);
cmd_size += num_channels;
flags |= (iwl_mvm_is_scan_fragmented(hb_type)) ?
SCAN_CONFIG_FLAG_SET_LMAC2_FRAGMENTED :
SCAN_CONFIG_FLAG_CLEAR_LMAC2_FRAGMENTED;
- iwl_mvm_fill_scan_config(mvm, cfg, flags, channel_flags,
- num_channels);
+ iwl_mvm_fill_scan_config_v2(mvm, cfg, flags, channel_flags,
+ num_channels);
} else {
iwl_mvm_fill_scan_config_v1(mvm, cfg, flags, channel_flags,
num_channels);
return ret;
}
+int iwl_mvm_config_scan(struct iwl_mvm *mvm)
+{
+ struct iwl_scan_config cfg;
+ struct iwl_host_cmd cmd = {
+ .id = iwl_cmd_id(SCAN_CFG_CMD, IWL_ALWAYS_LONG_GROUP, 0),
+ .len[0] = sizeof(cfg),
+ .data[0] = &cfg,
+ .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
+ };
+
+ if (!iwl_mvm_is_reduced_config_scan_supported(mvm))
+ return iwl_mvm_legacy_config_scan(mvm);
+
+ memset(&cfg, 0, sizeof(cfg));
+
+ cfg.bcast_sta_id = mvm->aux_sta.sta_id;
+ cfg.tx_chains = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm));
+ cfg.rx_chains = cpu_to_le32(iwl_mvm_scan_rx_ant(mvm));
+
+ IWL_DEBUG_SCAN(mvm, "Sending UMAC scan config\n");
+
+ return iwl_mvm_send_cmd(mvm, &cmd);
+}
+
static int iwl_mvm_scan_uid_by_status(struct iwl_mvm *mvm, int status)
{
int i;
mvm_sta->sta_id, i);
txq_id = iwl_mvm_tvqm_enable_txq(mvm, mvm_sta->sta_id,
i, wdg);
+ /*
+ * on failures, just set it to IWL_MVM_INVALID_QUEUE
+ * to try again later, we have no other good way of
+ * failing here
+ */
+ if (txq_id < 0)
+ txq_id = IWL_MVM_INVALID_QUEUE;
tid_data->txq_id = txq_id;
/*
sta->sta_id = IWL_MVM_INVALID_STA;
}
-static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 *queue,
+static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 queue,
u8 sta_id, u8 fifo)
{
unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ?
mvm->trans->trans_cfg->base_params->wd_timeout :
IWL_WATCHDOG_DISABLED;
+ struct iwl_trans_txq_scd_cfg cfg = {
+ .fifo = fifo,
+ .sta_id = sta_id,
+ .tid = IWL_MAX_TID_COUNT,
+ .aggregate = false,
+ .frame_limit = IWL_FRAME_LIMIT,
+ };
+
+ WARN_ON(iwl_mvm_has_new_tx_api(mvm));
+
+ iwl_mvm_enable_txq(mvm, NULL, queue, 0, &cfg, wdg_timeout);
+}
+
+static int iwl_mvm_enable_aux_snif_queue_tvqm(struct iwl_mvm *mvm, u8 sta_id)
+{
+ unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ?
+ mvm->trans->trans_cfg->base_params->wd_timeout :
+ IWL_WATCHDOG_DISABLED;
+
+ WARN_ON(!iwl_mvm_has_new_tx_api(mvm));
+
+ return iwl_mvm_tvqm_enable_txq(mvm, sta_id, IWL_MAX_TID_COUNT,
+ wdg_timeout);
+}
+static int iwl_mvm_add_int_sta_with_queue(struct iwl_mvm *mvm, int macidx,
+ int maccolor,
+ struct iwl_mvm_int_sta *sta,
+ u16 *queue, int fifo)
+{
+ int ret;
+
+ /* Map queue to fifo - needs to happen before adding station */
+ if (!iwl_mvm_has_new_tx_api(mvm))
+ iwl_mvm_enable_aux_snif_queue(mvm, *queue, sta->sta_id, fifo);
+
+ ret = iwl_mvm_add_int_sta_common(mvm, sta, NULL, macidx, maccolor);
+ if (ret) {
+ if (!iwl_mvm_has_new_tx_api(mvm))
+ iwl_mvm_disable_txq(mvm, NULL, *queue,
+ IWL_MAX_TID_COUNT, 0);
+ return ret;
+ }
+
+ /*
+ * For 22000 firmware and on we cannot add queue to a station unknown
+ * to firmware so enable queue here - after the station was added
+ */
if (iwl_mvm_has_new_tx_api(mvm)) {
- int tvqm_queue =
- iwl_mvm_tvqm_enable_txq(mvm, sta_id,
- IWL_MAX_TID_COUNT,
- wdg_timeout);
- *queue = tvqm_queue;
- } else {
- struct iwl_trans_txq_scd_cfg cfg = {
- .fifo = fifo,
- .sta_id = sta_id,
- .tid = IWL_MAX_TID_COUNT,
- .aggregate = false,
- .frame_limit = IWL_FRAME_LIMIT,
- };
+ int txq;
- iwl_mvm_enable_txq(mvm, NULL, *queue, 0, &cfg, wdg_timeout);
+ txq = iwl_mvm_enable_aux_snif_queue_tvqm(mvm, sta->sta_id);
+ if (txq < 0) {
+ iwl_mvm_rm_sta_common(mvm, sta->sta_id);
+ return txq;
+ }
+
+ *queue = txq;
}
+
+ return 0;
}
int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
if (ret)
return ret;
- /* Map Aux queue to fifo - needs to happen before adding Aux station */
- if (!iwl_mvm_has_new_tx_api(mvm))
- iwl_mvm_enable_aux_snif_queue(mvm, &mvm->aux_queue,
- mvm->aux_sta.sta_id,
- IWL_MVM_TX_FIFO_MCAST);
-
- ret = iwl_mvm_add_int_sta_common(mvm, &mvm->aux_sta, NULL,
- MAC_INDEX_AUX, 0);
+ ret = iwl_mvm_add_int_sta_with_queue(mvm, MAC_INDEX_AUX, 0,
+ &mvm->aux_sta, &mvm->aux_queue,
+ IWL_MVM_TX_FIFO_MCAST);
if (ret) {
iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
return ret;
}
- /*
- * For 22000 firmware and on we cannot add queue to a station unknown
- * to firmware so enable queue here - after the station was added
- */
- if (iwl_mvm_has_new_tx_api(mvm))
- iwl_mvm_enable_aux_snif_queue(mvm, &mvm->aux_queue,
- mvm->aux_sta.sta_id,
- IWL_MVM_TX_FIFO_MCAST);
-
return 0;
}
int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- int ret;
lockdep_assert_held(&mvm->mutex);
- /* Map snif queue to fifo - must happen before adding snif station */
- if (!iwl_mvm_has_new_tx_api(mvm))
- iwl_mvm_enable_aux_snif_queue(mvm, &mvm->snif_queue,
- mvm->snif_sta.sta_id,
+ return iwl_mvm_add_int_sta_with_queue(mvm, mvmvif->id, mvmvif->color,
+ &mvm->snif_sta, &mvm->snif_queue,
IWL_MVM_TX_FIFO_BE);
-
- ret = iwl_mvm_add_int_sta_common(mvm, &mvm->snif_sta, vif->addr,
- mvmvif->id, 0);
- if (ret)
- return ret;
-
- /*
- * For 22000 firmware and on we cannot add queue to a station unknown
- * to firmware so enable queue here - after the station was added
- */
- if (iwl_mvm_has_new_tx_api(mvm))
- iwl_mvm_enable_aux_snif_queue(mvm, &mvm->snif_queue,
- mvm->snif_sta.sta_id,
- IWL_MVM_TX_FIFO_BE);
-
- return 0;
}
int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
queue = iwl_mvm_tvqm_enable_txq(mvm, bsta->sta_id,
IWL_MAX_TID_COUNT,
wdg_timeout);
+ if (queue < 0) {
+ iwl_mvm_rm_sta_common(mvm, bsta->sta_id);
+ return queue;
+ }
if (vif->type == NL80211_IFTYPE_AP ||
vif->type == NL80211_IFTYPE_ADHOC)
}
ret = iwl_mvm_add_int_sta_common(mvm, msta, maddr,
mvmvif->id, mvmvif->color);
- if (ret) {
- iwl_mvm_dealloc_int_sta(mvm, msta);
- return ret;
- }
+ if (ret)
+ goto err;
/*
* Enable cab queue after the ADD_STA command is sent.
int queue = iwl_mvm_tvqm_enable_txq(mvm, msta->sta_id,
0,
timeout);
+ if (queue < 0) {
+ ret = queue;
+ goto err;
+ }
mvmvif->cab_queue = queue;
} else if (!fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_STA_TYPE))
timeout);
return 0;
+err:
+ iwl_mvm_dealloc_int_sta(mvm, msta);
+ return ret;
}
static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id,
{IWL_PCI_DEVICE(0x2526, 0x0034, iwl9560_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x0038, iwl9560_2ac_160_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x003C, iwl9560_2ac_160_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x0060, iwl9460_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x0064, iwl9460_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x00A0, iwl9460_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x00A4, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x0060, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x2526, 0x0064, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x2526, 0x00A0, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x2526, 0x00A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2526, 0x0210, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x0214, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x0230, iwl9560_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x0234, iwl9560_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x0238, iwl9560_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x023C, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x0260, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x0260, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2526, 0x0264, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x2526, 0x02A0, iwl9460_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x02A4, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x02A0, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x2526, 0x02A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2526, 0x1010, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x1030, iwl9560_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x1210, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x401C, iwl9260_2ac_160_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x4030, iwl9560_2ac_160_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x4034, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x2526, 0x40A4, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x40A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2526, 0x4234, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2526, 0x42A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2526, 0x6010, iwl9260_2ac_160_cfg)},
{IWL_PCI_DEVICE(0x271B, 0x0210, iwl9160_2ac_cfg)},
{IWL_PCI_DEVICE(0x271B, 0x0214, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x271C, 0x0214, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2720, 0x0034, iwl9560_2ac_160_cfg)},
- {IWL_PCI_DEVICE(0x2720, 0x0038, iwl9560_2ac_160_cfg)},
- {IWL_PCI_DEVICE(0x2720, 0x003C, iwl9560_2ac_160_cfg)},
- {IWL_PCI_DEVICE(0x2720, 0x0060, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x2720, 0x0064, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x2720, 0x00A0, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x2720, 0x00A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x2720, 0x0230, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2720, 0x0234, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2720, 0x0238, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2720, 0x023C, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2720, 0x0260, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x2720, 0x0264, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x2720, 0x02A0, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x2720, 0x02A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x2720, 0x1010, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2720, 0x1030, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x2720, 0x1210, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2720, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x2720, 0x1552, iwl9560_killer_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x2720, 0x2030, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x2720, 0x2034, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x2720, 0x4030, iwl9560_2ac_160_cfg)},
- {IWL_PCI_DEVICE(0x2720, 0x4034, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x2720, 0x40A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x2720, 0x4234, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x2720, 0x42A4, iwl9462_2ac_cfg_soc)},
-
- {IWL_PCI_DEVICE(0x30DC, 0x0030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x30DC, 0x0034, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x30DC, 0x0038, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x30DC, 0x003C, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x30DC, 0x0060, iwl9461_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x30DC, 0x0064, iwl9461_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x30DC, 0x00A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x30DC, 0x00A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x30DC, 0x0230, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x30DC, 0x0234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x30DC, 0x0238, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x30DC, 0x023C, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x30DC, 0x0260, iwl9461_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x30DC, 0x0264, iwl9461_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x30DC, 0x02A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x30DC, 0x02A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x30DC, 0x1030, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x30DC, 0x1551, killer1550s_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x30DC, 0x1552, killer1550i_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x30DC, 0x2030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x30DC, 0x2034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x30DC, 0x4030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x30DC, 0x4034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x30DC, 0x40A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x30DC, 0x4234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x30DC, 0x42A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+
+ {IWL_PCI_DEVICE(0x2720, 0x0034, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x2720, 0x0038, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x2720, 0x003C, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x2720, 0x0060, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x2720, 0x0064, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x2720, 0x00A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x2720, 0x00A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x2720, 0x0230, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x2720, 0x0234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x2720, 0x0238, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x2720, 0x023C, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x2720, 0x0260, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x2720, 0x0264, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x2720, 0x02A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x2720, 0x02A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x2720, 0x1030, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x2720, 0x1551, killer1550s_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x2720, 0x1552, killer1550i_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x2720, 0x2030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x2720, 0x2034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x2720, 0x4030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x2720, 0x4034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x2720, 0x40A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x2720, 0x4234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x2720, 0x42A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+
+ {IWL_PCI_DEVICE(0x30DC, 0x0030, iwl9560_2ac_160_cfg_soc)},
+ {IWL_PCI_DEVICE(0x30DC, 0x0034, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x30DC, 0x0038, iwl9560_2ac_160_cfg_soc)},
+ {IWL_PCI_DEVICE(0x30DC, 0x003C, iwl9560_2ac_160_cfg_soc)},
+ {IWL_PCI_DEVICE(0x30DC, 0x0060, iwl9460_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x30DC, 0x0064, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x30DC, 0x00A0, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x30DC, 0x00A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x30DC, 0x0230, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x30DC, 0x0234, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x30DC, 0x0238, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x30DC, 0x023C, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x30DC, 0x0260, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x30DC, 0x0264, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x30DC, 0x02A0, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x30DC, 0x02A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x30DC, 0x1010, iwl9260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x30DC, 0x1030, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x30DC, 0x1210, iwl9260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x30DC, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x30DC, 0x1552, iwl9560_killer_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x30DC, 0x2030, iwl9560_2ac_160_cfg_soc)},
+ {IWL_PCI_DEVICE(0x30DC, 0x2034, iwl9560_2ac_160_cfg_soc)},
+ {IWL_PCI_DEVICE(0x30DC, 0x4030, iwl9560_2ac_160_cfg_soc)},
+ {IWL_PCI_DEVICE(0x30DC, 0x4034, iwl9560_2ac_160_cfg_soc)},
+ {IWL_PCI_DEVICE(0x30DC, 0x40A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x30DC, 0x4234, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x30DC, 0x42A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x31DC, 0x0030, iwl9560_2ac_160_cfg_shared_clk)},
{IWL_PCI_DEVICE(0x31DC, 0x0034, iwl9560_2ac_cfg_shared_clk)},
}
} else if (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) ==
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) &&
- ((cfg != &iwl_ax200_cfg_cc &&
- cfg != &killer1650x_2ax_cfg &&
- cfg != &killer1650w_2ax_cfg &&
- cfg != &iwl_ax201_cfg_quz_hr) ||
- iwl_trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0)) {
+ iwl_trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0) {
u32 hw_status;
hw_status = iwl_read_prph(iwl_trans, UMAG_GEN_HW_STATUS);
#include "internal.h"
#include "fw/dbg.h"
+static int iwl_pcie_gen2_force_power_gating(struct iwl_trans *trans)
+{
+ iwl_set_bits_prph(trans, HPM_HIPM_GEN_CFG,
+ HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
+ udelay(20);
+ iwl_set_bits_prph(trans, HPM_HIPM_GEN_CFG,
+ HPM_HIPM_GEN_CFG_CR_PG_EN |
+ HPM_HIPM_GEN_CFG_CR_SLP_EN);
+ udelay(20);
+ iwl_clear_bits_prph(trans, HPM_HIPM_GEN_CFG,
+ HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
+
+ iwl_trans_sw_reset(trans);
+ iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+
+ return 0;
+}
+
/*
* Start up NIC's basic functionality after it has been reset
* (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop())
iwl_pcie_apm_config(trans);
+ if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000 &&
+ trans->cfg->integrated) {
+ ret = iwl_pcie_gen2_force_power_gating(trans);
+ if (ret)
+ return ret;
+ }
+
ret = iwl_finish_nic_init(trans, trans->trans_cfg);
if (ret)
return ret;
}
}
-
-/*
- * HostAP uses two layers of net devices, where the inner
- * layer gets called all the time from the outer layer.
- * This is a natural nesting, which needs a split lock type.
- */
-static struct lock_class_key hostap_netdev_xmit_lock_key;
-static struct lock_class_key hostap_netdev_addr_lock_key;
-
-static void prism2_set_lockdep_class_one(struct net_device *dev,
- struct netdev_queue *txq,
- void *_unused)
-{
- lockdep_set_class(&txq->_xmit_lock,
- &hostap_netdev_xmit_lock_key);
-}
-
-static void prism2_set_lockdep_class(struct net_device *dev)
-{
- lockdep_set_class(&dev->addr_list_lock,
- &hostap_netdev_addr_lock_key);
- netdev_for_each_tx_queue(dev, prism2_set_lockdep_class_one, NULL);
-}
-
static struct net_device *
prism2_init_local_data(struct prism2_helper_functions *funcs, int card_idx,
struct device *sdev)
if (ret >= 0)
ret = register_netdevice(dev);
- prism2_set_lockdep_class(dev);
rtnl_unlock();
if (ret < 0) {
printk(KERN_WARNING "%s: register netdevice failed!\n",
mmio.o util.o trace.o dma.o mac80211.o debugfs.o eeprom.o \
tx.o agg-rx.o mcu.o
+mt76-$(CONFIG_PCI) += pci.o
+
mt76-usb-y := usb.o usb_trace.o
CFLAGS_trace.o := -I$(src)
u32 ctrl;
int i, idx = -1;
- if (txwi)
+ if (txwi) {
q->entry[q->head].txwi = DMA_DUMMY_DATA;
+ q->entry[q->head].skip_buf0 = true;
+ }
for (i = 0; i < nbufs; i += 2, buf += 2) {
u32 buf0 = buf[0].addr, buf1 = 0;
__le32 __ctrl = READ_ONCE(q->desc[idx].ctrl);
u32 ctrl = le32_to_cpu(__ctrl);
- if (!e->txwi || !e->skb) {
+ if (!e->skip_buf0) {
__le32 addr = READ_ONCE(q->desc[idx].buf0);
u32 len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctrl);
struct urb *urb;
};
enum mt76_txq_id qid;
- bool schedule;
- bool done;
+ bool skip_buf0:1;
+ bool schedule:1;
+ bool done:1;
};
struct mt76_queue_regs {
#define mt76_poll_msec(dev, ...) __mt76_poll_msec(&((dev)->mt76), __VA_ARGS__)
void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs);
+void mt76_pci_disable_aspm(struct pci_dev *pdev);
static inline u16 mt76_chip(struct mt76_dev *dev)
{
/* RG_SSUSB_CDR_BR_PE1D = 0x3 */
mt76_rmw_field(dev, 0x15c58, 0x3 << 6, 0x3);
+ mt76_pci_disable_aspm(pdev);
+
return 0;
error:
--- /dev/null
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2019 Lorenzo Bianconi <lorenzo@kernel.org>
+ */
+
+#include <linux/pci.h>
+
+void mt76_pci_disable_aspm(struct pci_dev *pdev)
+{
+ struct pci_dev *parent = pdev->bus->self;
+ u16 aspm_conf, parent_aspm_conf = 0;
+
+ pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &aspm_conf);
+ aspm_conf &= PCI_EXP_LNKCTL_ASPMC;
+ if (parent) {
+ pcie_capability_read_word(parent, PCI_EXP_LNKCTL,
+ &parent_aspm_conf);
+ parent_aspm_conf &= PCI_EXP_LNKCTL_ASPMC;
+ }
+
+ if (!aspm_conf && (!parent || !parent_aspm_conf)) {
+ /* aspm already disabled */
+ return;
+ }
+
+ dev_info(&pdev->dev, "disabling ASPM %s %s\n",
+ (aspm_conf & PCI_EXP_LNKCTL_ASPM_L0S) ? "L0s" : "",
+ (aspm_conf & PCI_EXP_LNKCTL_ASPM_L1) ? "L1" : "");
+
+ if (IS_ENABLED(CONFIG_PCIEASPM)) {
+ int err;
+
+ err = pci_disable_link_state(pdev, aspm_conf);
+ if (!err)
+ return;
+ }
+
+ /* both device and parent should have the same ASPM setting.
+ * disable ASPM in downstream component first and then upstream.
+ */
+ pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, aspm_conf);
+ if (parent)
+ pcie_capability_clear_word(parent, PCI_EXP_LNKCTL,
+ aspm_conf);
+}
+EXPORT_SYMBOL_GPL(mt76_pci_disable_aspm);
hdr = rtl_get_hdr(skb);
fc = rtl_get_fc(skb);
- if (!stats.crc && !stats.hwerror) {
+ if (!stats.crc && !stats.hwerror && (skb->len > FCS_LEN)) {
memcpy(IEEE80211_SKB_RXCB(skb), &rx_status,
sizeof(rx_status));
_rtl_pci_rx_to_mac80211(hw, skb, rx_status);
}
} else {
+ /* drop packets with errors or those too short */
dev_kfree_skb_any(skb);
}
new_trx_end:
return;
} else {
noa_num = (noa_len - 2) / 13;
+ if (noa_num > P2P_MAX_NOA_NUM)
+ noa_num = P2P_MAX_NOA_NUM;
+
}
noa_index = ie[3];
if (rtlpriv->psc.p2p_ps_info.p2p_ps_mode ==
return;
} else {
noa_num = (noa_len - 2) / 13;
+ if (noa_num > P2P_MAX_NOA_NUM)
+ noa_num = P2P_MAX_NOA_NUM;
+
}
noa_index = ie[3];
if (rtlpriv->psc.p2p_ps_info.p2p_ps_mode ==
priv->is_connected = false;
priv->is_up = false;
INIT_DELAYED_WORK(&priv->connect, virt_wifi_connect_complete);
+ __module_get(THIS_MODULE);
return 0;
unregister_netdev:
netdev_upper_dev_unlink(priv->lowerdev, dev);
unregister_netdevice_queue(dev, head);
+ module_put(THIS_MODULE);
/* Deleting the wiphy is handled in the module destructor. */
}
.priv_size = sizeof(struct virt_wifi_netdev_priv),
};
+static bool netif_is_virt_wifi_dev(const struct net_device *dev)
+{
+ return rcu_access_pointer(dev->rx_handler) == virt_wifi_rx_handler;
+}
+
+static int virt_wifi_event(struct notifier_block *this, unsigned long event,
+ void *ptr)
+{
+ struct net_device *lower_dev = netdev_notifier_info_to_dev(ptr);
+ struct virt_wifi_netdev_priv *priv;
+ struct net_device *upper_dev;
+ LIST_HEAD(list_kill);
+
+ if (!netif_is_virt_wifi_dev(lower_dev))
+ return NOTIFY_DONE;
+
+ switch (event) {
+ case NETDEV_UNREGISTER:
+ priv = rtnl_dereference(lower_dev->rx_handler_data);
+ if (!priv)
+ return NOTIFY_DONE;
+
+ upper_dev = priv->upperdev;
+
+ upper_dev->rtnl_link_ops->dellink(upper_dev, &list_kill);
+ unregister_netdevice_many(&list_kill);
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block virt_wifi_notifier = {
+ .notifier_call = virt_wifi_event,
+};
+
/* Acquires and releases the rtnl lock. */
static int __init virt_wifi_init_module(void)
{
/* Guaranteed to be locallly-administered and not multicast. */
eth_random_addr(fake_router_bssid);
+ err = register_netdevice_notifier(&virt_wifi_notifier);
+ if (err)
+ return err;
+
+ err = -ENOMEM;
common_wiphy = virt_wifi_make_wiphy();
if (!common_wiphy)
- return -ENOMEM;
+ goto notifier;
err = rtnl_link_register(&virt_wifi_link_ops);
if (err)
- virt_wifi_destroy_wiphy(common_wiphy);
+ goto destroy_wiphy;
+ return 0;
+
+destroy_wiphy:
+ virt_wifi_destroy_wiphy(common_wiphy);
+notifier:
+ unregister_netdevice_notifier(&virt_wifi_notifier);
return err;
}
/* Will delete any devices that depend on the wiphy. */
rtnl_link_unregister(&virt_wifi_link_ops);
virt_wifi_destroy_wiphy(common_wiphy);
+ unregister_netdevice_notifier(&virt_wifi_notifier);
}
module_init(virt_wifi_init_module);
return 0;
}
-static int nvme_read_ana_log(struct nvme_ctrl *ctrl, bool groups_only)
+static int nvme_read_ana_log(struct nvme_ctrl *ctrl)
{
u32 nr_change_groups = 0;
int error;
mutex_lock(&ctrl->ana_lock);
- error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_ANA,
- groups_only ? NVME_ANA_LOG_RGO : 0,
+ error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_ANA, 0,
ctrl->ana_log_buf, ctrl->ana_log_size, 0);
if (error) {
dev_warn(ctrl->device, "Failed to get ANA log: %d\n", error);
{
struct nvme_ctrl *ctrl = container_of(work, struct nvme_ctrl, ana_work);
- nvme_read_ana_log(ctrl, false);
+ nvme_read_ana_log(ctrl);
}
static void nvme_anatt_timeout(struct timer_list *t)
goto out;
}
- error = nvme_read_ana_log(ctrl, true);
+ error = nvme_read_ana_log(ctrl);
if (error)
goto out_free_ana_log_buf;
return 0;
struct nvme_tcp_queue *queue = hctx->driver_data;
struct sock *sk = queue->sock->sk;
- if (sk_can_busy_loop(sk) && skb_queue_empty(&sk->sk_receive_queue))
+ if (sk_can_busy_loop(sk) && skb_queue_empty_lockless(&sk->sk_receive_queue))
sk_busy_loop(sk, true);
nvme_tcp_try_recv(queue);
return queue->nr_cqe;
if (!target)
return -ENODEV;
- if (!of_device_is_available(target))
+ if (!of_device_is_available(target)) {
+ of_node_put(target);
return 0;
+ }
rmem = __find_rmem(target);
of_node_put(target);
of_fdt_unflatten_tree(unittest_data, NULL, &unittest_data_node);
if (!unittest_data_node) {
pr_warn("%s: No tree to attach; not running tests\n", __func__);
+ kfree(unittest_data);
return -ENODATA;
}
goto free_regulators;
}
- ret = regulator_enable(reg);
- if (ret < 0) {
- regulator_put(reg);
- goto free_regulators;
- }
-
opp_table->regulators[i] = reg;
}
return opp_table;
free_regulators:
- while (i--) {
- regulator_disable(opp_table->regulators[i]);
- regulator_put(opp_table->regulators[i]);
- }
+ while (i != 0)
+ regulator_put(opp_table->regulators[--i]);
kfree(opp_table->regulators);
opp_table->regulators = NULL;
/* Make sure there are no concurrent readers while updating opp_table */
WARN_ON(!list_empty(&opp_table->opp_list));
- for (i = opp_table->regulator_count - 1; i >= 0; i--) {
- regulator_disable(opp_table->regulators[i]);
+ for (i = opp_table->regulator_count - 1; i >= 0; i--)
regulator_put(opp_table->regulators[i]);
- }
_free_set_opp_data(opp_table);
{
struct dev_pm_opp *opp;
- lockdep_assert_held(&opp_table_lock);
-
mutex_lock(&opp_table->lock);
list_for_each_entry(opp, &opp_table->opp_list, node) {
return 0;
}
+ /*
+ * Re-initialize list_kref every time we add static OPPs to the OPP
+ * table as the reference count may be 0 after the last tie static OPPs
+ * were removed.
+ */
+ kref_init(&opp_table->list_kref);
+
/* We have opp-table node now, iterate over it and add OPPs */
for_each_available_child_of_node(opp_table->np, np) {
opp = _opp_add_static_v2(opp_table, dev, np);
#define K25 7
SIG_EXPR_LIST_DECL_SESG(K25, MACLINK4, MACLINK4, SIG_DESC_SET(SCU410, 7));
-SIG_EXPR_LIST_DECL_SESG(K25, SDA14, SDA14, SIG_DESC_SET(SCU4B0, 7));
+SIG_EXPR_LIST_DECL_SESG(K25, SDA14, I2C14, SIG_DESC_SET(SCU4B0, 7));
PIN_DECL_2(K25, GPIOA7, MACLINK4, SDA14);
FUNC_GROUP_DECL(MACLINK4, K25);
#define AD11 206
SIG_EXPR_LIST_DECL_SEMG(AD11, SPI1DQ2, QSPI1, SPI1, SIG_DESC_SET(SCU438, 14));
SIG_EXPR_LIST_DECL_SEMG(AD11, TXD13, UART13G1, UART13,
- SIG_DESC_SET(SCU438, 14));
+ SIG_DESC_CLEAR(SCU4B8, 2), SIG_DESC_SET(SCU4D8, 14));
PIN_DECL_2(AD11, GPIOZ6, SPI1DQ2, TXD13);
#define AF10 207
SIG_EXPR_LIST_DECL_SEMG(AF10, SPI1DQ3, QSPI1, SPI1, SIG_DESC_SET(SCU438, 15));
SIG_EXPR_LIST_DECL_SEMG(AF10, RXD13, UART13G1, UART13,
- SIG_DESC_SET(SCU438, 15));
+ SIG_DESC_CLEAR(SCU4B8, 3), SIG_DESC_SET(SCU4D8, 15));
PIN_DECL_2(AF10, GPIOZ7, SPI1DQ3, RXD13);
GROUP_DECL(QSPI1, AB11, AC11, AA11, AD11, AF10);
FUNC_GROUP_DECL(RMII2, D4, C2, C1, D3, D2, D1, F4, E2, E1);
#define AB4 232
-SIG_EXPR_LIST_DECL_SESG(AB4, SD3CLK, SD3, SIG_DESC_SET(SCU400, 24));
-PIN_DECL_1(AB4, GPIO18D0, SD3CLK);
+SIG_EXPR_LIST_DECL_SEMG(AB4, EMMCCLK, EMMCG1, EMMC, SIG_DESC_SET(SCU400, 24));
+PIN_DECL_1(AB4, GPIO18D0, EMMCCLK);
#define AA4 233
-SIG_EXPR_LIST_DECL_SESG(AA4, SD3CMD, SD3, SIG_DESC_SET(SCU400, 25));
-PIN_DECL_1(AA4, GPIO18D1, SD3CMD);
+SIG_EXPR_LIST_DECL_SEMG(AA4, EMMCCMD, EMMCG1, EMMC, SIG_DESC_SET(SCU400, 25));
+PIN_DECL_1(AA4, GPIO18D1, EMMCCMD);
#define AC4 234
-SIG_EXPR_LIST_DECL_SESG(AC4, SD3DAT0, SD3, SIG_DESC_SET(SCU400, 26));
-PIN_DECL_1(AC4, GPIO18D2, SD3DAT0);
+SIG_EXPR_LIST_DECL_SEMG(AC4, EMMCDAT0, EMMCG1, EMMC, SIG_DESC_SET(SCU400, 26));
+PIN_DECL_1(AC4, GPIO18D2, EMMCDAT0);
#define AA5 235
-SIG_EXPR_LIST_DECL_SESG(AA5, SD3DAT1, SD3, SIG_DESC_SET(SCU400, 27));
-PIN_DECL_1(AA5, GPIO18D3, SD3DAT1);
+SIG_EXPR_LIST_DECL_SEMG(AA5, EMMCDAT1, EMMCG4, EMMC, SIG_DESC_SET(SCU400, 27));
+PIN_DECL_1(AA5, GPIO18D3, EMMCDAT1);
#define Y5 236
-SIG_EXPR_LIST_DECL_SESG(Y5, SD3DAT2, SD3, SIG_DESC_SET(SCU400, 28));
-PIN_DECL_1(Y5, GPIO18D4, SD3DAT2);
+SIG_EXPR_LIST_DECL_SEMG(Y5, EMMCDAT2, EMMCG4, EMMC, SIG_DESC_SET(SCU400, 28));
+PIN_DECL_1(Y5, GPIO18D4, EMMCDAT2);
#define AB5 237
-SIG_EXPR_LIST_DECL_SESG(AB5, SD3DAT3, SD3, SIG_DESC_SET(SCU400, 29));
-PIN_DECL_1(AB5, GPIO18D5, SD3DAT3);
+SIG_EXPR_LIST_DECL_SEMG(AB5, EMMCDAT3, EMMCG4, EMMC, SIG_DESC_SET(SCU400, 29));
+PIN_DECL_1(AB5, GPIO18D5, EMMCDAT3);
#define AB6 238
-SIG_EXPR_LIST_DECL_SESG(AB6, SD3CD, SD3, SIG_DESC_SET(SCU400, 30));
-PIN_DECL_1(AB6, GPIO18D6, SD3CD);
+SIG_EXPR_LIST_DECL_SEMG(AB6, EMMCCD, EMMCG1, EMMC, SIG_DESC_SET(SCU400, 30));
+PIN_DECL_1(AB6, GPIO18D6, EMMCCD);
#define AC5 239
-SIG_EXPR_LIST_DECL_SESG(AC5, SD3WP, SD3, SIG_DESC_SET(SCU400, 31));
-PIN_DECL_1(AC5, GPIO18D7, SD3WP);
+SIG_EXPR_LIST_DECL_SEMG(AC5, EMMCWP, EMMCG1, EMMC, SIG_DESC_SET(SCU400, 31));
+PIN_DECL_1(AC5, GPIO18D7, EMMCWP);
-FUNC_GROUP_DECL(SD3, AB4, AA4, AC4, AA5, Y5, AB5, AB6, AC5);
+GROUP_DECL(EMMCG1, AB4, AA4, AC4, AB6, AC5);
+GROUP_DECL(EMMCG4, AB4, AA4, AC4, AA5, Y5, AB5, AB6, AC5);
#define Y1 240
SIG_EXPR_LIST_DECL_SEMG(Y1, FWSPIDCS, FWSPID, FWSPID, SIG_DESC_SET(SCU500, 3));
SIG_EXPR_LIST_DECL_SESG(Y1, VBCS, VB, SIG_DESC_SET(SCU500, 5));
-SIG_EXPR_LIST_DECL_SESG(Y1, SD3DAT4, SD3DAT4, SIG_DESC_SET(SCU404, 0));
-PIN_DECL_3(Y1, GPIO18E0, FWSPIDCS, VBCS, SD3DAT4);
-FUNC_GROUP_DECL(SD3DAT4, Y1);
+SIG_EXPR_LIST_DECL_SEMG(Y1, EMMCDAT4, EMMCG8, EMMC, SIG_DESC_SET(SCU404, 0));
+PIN_DECL_3(Y1, GPIO18E0, FWSPIDCS, VBCS, EMMCDAT4);
#define Y2 241
SIG_EXPR_LIST_DECL_SEMG(Y2, FWSPIDCK, FWSPID, FWSPID, SIG_DESC_SET(SCU500, 3));
SIG_EXPR_LIST_DECL_SESG(Y2, VBCK, VB, SIG_DESC_SET(SCU500, 5));
-SIG_EXPR_LIST_DECL_SESG(Y2, SD3DAT5, SD3DAT5, SIG_DESC_SET(SCU404, 1));
-PIN_DECL_3(Y2, GPIO18E1, FWSPIDCK, VBCK, SD3DAT5);
-FUNC_GROUP_DECL(SD3DAT5, Y2);
+SIG_EXPR_LIST_DECL_SEMG(Y2, EMMCDAT5, EMMCG8, EMMC, SIG_DESC_SET(SCU404, 1));
+PIN_DECL_3(Y2, GPIO18E1, FWSPIDCK, VBCK, EMMCDAT5);
#define Y3 242
SIG_EXPR_LIST_DECL_SEMG(Y3, FWSPIDMOSI, FWSPID, FWSPID,
SIG_DESC_SET(SCU500, 3));
SIG_EXPR_LIST_DECL_SESG(Y3, VBMOSI, VB, SIG_DESC_SET(SCU500, 5));
-SIG_EXPR_LIST_DECL_SESG(Y3, SD3DAT6, SD3DAT6, SIG_DESC_SET(SCU404, 2));
-PIN_DECL_3(Y3, GPIO18E2, FWSPIDMOSI, VBMOSI, SD3DAT6);
-FUNC_GROUP_DECL(SD3DAT6, Y3);
+SIG_EXPR_LIST_DECL_SEMG(Y3, EMMCDAT6, EMMCG8, EMMC, SIG_DESC_SET(SCU404, 2));
+PIN_DECL_3(Y3, GPIO18E2, FWSPIDMOSI, VBMOSI, EMMCDAT6);
#define Y4 243
SIG_EXPR_LIST_DECL_SEMG(Y4, FWSPIDMISO, FWSPID, FWSPID,
SIG_DESC_SET(SCU500, 3));
SIG_EXPR_LIST_DECL_SESG(Y4, VBMISO, VB, SIG_DESC_SET(SCU500, 5));
-SIG_EXPR_LIST_DECL_SESG(Y4, SD3DAT7, SD3DAT7, SIG_DESC_SET(SCU404, 3));
-PIN_DECL_3(Y4, GPIO18E3, FWSPIDMISO, VBMISO, SD3DAT7);
-FUNC_GROUP_DECL(SD3DAT7, Y4);
+SIG_EXPR_LIST_DECL_SEMG(Y4, EMMCDAT7, EMMCG8, EMMC, SIG_DESC_SET(SCU404, 3));
+PIN_DECL_3(Y4, GPIO18E3, FWSPIDMISO, VBMISO, EMMCDAT7);
GROUP_DECL(FWSPID, Y1, Y2, Y3, Y4);
GROUP_DECL(FWQSPID, Y1, Y2, Y3, Y4, AE12, AF12);
+GROUP_DECL(EMMCG8, AB4, AA4, AC4, AA5, Y5, AB5, AB6, AC5, Y1, Y2, Y3, Y4);
FUNC_DECL_2(FWSPID, FWSPID, FWQSPID);
FUNC_GROUP_DECL(VB, Y1, Y2, Y3, Y4);
-
+FUNC_DECL_3(EMMC, EMMCG1, EMMCG4, EMMCG8);
/*
* FIXME: Confirm bits and priorities are the right way around for the
* following 4 pins
*/
#define AF25 244
-SIG_EXPR_LIST_DECL_SEMG(AF25, I3C3SCL, I3C3, I3C3, SIG_DESC_SET(SCU438, 20),
- SIG_DESC_SET(SCU4D8, 20));
-SIG_EXPR_LIST_DECL_SESG(AF25, FSI1CLK, FSI1, SIG_DESC_CLEAR(SCU438, 20),
- SIG_DESC_SET(SCU4D8, 20));
+SIG_EXPR_LIST_DECL_SEMG(AF25, I3C3SCL, I3C3, I3C3, SIG_DESC_SET(SCU438, 20));
+SIG_EXPR_LIST_DECL_SESG(AF25, FSI1CLK, FSI1, SIG_DESC_SET(SCU4D8, 20));
PIN_DECL_(AF25, SIG_EXPR_LIST_PTR(AF25, I3C3SCL),
SIG_EXPR_LIST_PTR(AF25, FSI1CLK));
#define AE26 245
-SIG_EXPR_LIST_DECL_SEMG(AE26, I3C3SDA, I3C3, I3C3, SIG_DESC_SET(SCU438, 21),
- SIG_DESC_SET(SCU4D8, 21));
-SIG_EXPR_LIST_DECL_SESG(AE26, FSI1DATA, FSI1, SIG_DESC_CLEAR(SCU438, 21),
- SIG_DESC_SET(SCU4D8, 21));
+SIG_EXPR_LIST_DECL_SEMG(AE26, I3C3SDA, I3C3, I3C3, SIG_DESC_SET(SCU438, 21));
+SIG_EXPR_LIST_DECL_SESG(AE26, FSI1DATA, FSI1, SIG_DESC_SET(SCU4D8, 21));
PIN_DECL_(AE26, SIG_EXPR_LIST_PTR(AE26, I3C3SDA),
SIG_EXPR_LIST_PTR(AE26, FSI1DATA));
FUNC_GROUP_DECL(FSI1, AF25, AE26);
#define AE25 246
-SIG_EXPR_LIST_DECL_SEMG(AE25, I3C4SCL, I3C4, I3C4, SIG_DESC_SET(SCU438, 22),
- SIG_DESC_SET(SCU4D8, 22));
-SIG_EXPR_LIST_DECL_SESG(AE25, FSI2CLK, FSI2, SIG_DESC_CLEAR(SCU438, 22),
- SIG_DESC_SET(SCU4D8, 22));
+SIG_EXPR_LIST_DECL_SEMG(AE25, I3C4SCL, I3C4, I3C4, SIG_DESC_SET(SCU438, 22));
+SIG_EXPR_LIST_DECL_SESG(AE25, FSI2CLK, FSI2, SIG_DESC_SET(SCU4D8, 22));
PIN_DECL_(AE25, SIG_EXPR_LIST_PTR(AE25, I3C4SCL),
SIG_EXPR_LIST_PTR(AE25, FSI2CLK));
#define AF24 247
-SIG_EXPR_LIST_DECL_SEMG(AF24, I3C4SDA, I3C4, I3C4, SIG_DESC_SET(SCU438, 23),
- SIG_DESC_SET(SCU4D8, 23));
-SIG_EXPR_LIST_DECL_SESG(AF24, FSI2DATA, FSI2, SIG_DESC_CLEAR(SCU438, 23),
- SIG_DESC_SET(SCU4D8, 23));
+SIG_EXPR_LIST_DECL_SEMG(AF24, I3C4SDA, I3C4, I3C4, SIG_DESC_SET(SCU438, 23));
+SIG_EXPR_LIST_DECL_SESG(AF24, FSI2DATA, FSI2, SIG_DESC_SET(SCU4D8, 23));
PIN_DECL_(AF24, SIG_EXPR_LIST_PTR(AF24, I3C4SDA),
SIG_EXPR_LIST_PTR(AF24, FSI2DATA));
ASPEED_PINCTRL_PIN(A3),
ASPEED_PINCTRL_PIN(AA11),
ASPEED_PINCTRL_PIN(AA12),
+ ASPEED_PINCTRL_PIN(AA16),
+ ASPEED_PINCTRL_PIN(AA17),
ASPEED_PINCTRL_PIN(AA23),
ASPEED_PINCTRL_PIN(AA24),
ASPEED_PINCTRL_PIN(AA25),
ASPEED_PINCTRL_PIN(AB11),
ASPEED_PINCTRL_PIN(AB12),
ASPEED_PINCTRL_PIN(AB15),
+ ASPEED_PINCTRL_PIN(AB16),
+ ASPEED_PINCTRL_PIN(AB17),
ASPEED_PINCTRL_PIN(AB18),
ASPEED_PINCTRL_PIN(AB19),
ASPEED_PINCTRL_PIN(AB22),
ASPEED_PINCTRL_PIN(AC11),
ASPEED_PINCTRL_PIN(AC12),
ASPEED_PINCTRL_PIN(AC15),
+ ASPEED_PINCTRL_PIN(AC16),
ASPEED_PINCTRL_PIN(AC17),
ASPEED_PINCTRL_PIN(AC18),
ASPEED_PINCTRL_PIN(AC19),
ASPEED_PINCTRL_PIN(AD12),
ASPEED_PINCTRL_PIN(AD14),
ASPEED_PINCTRL_PIN(AD15),
+ ASPEED_PINCTRL_PIN(AD16),
ASPEED_PINCTRL_PIN(AD19),
ASPEED_PINCTRL_PIN(AD20),
ASPEED_PINCTRL_PIN(AD22),
ASPEED_PINCTRL_PIN(AE12),
ASPEED_PINCTRL_PIN(AE14),
ASPEED_PINCTRL_PIN(AE15),
+ ASPEED_PINCTRL_PIN(AE16),
ASPEED_PINCTRL_PIN(AE18),
ASPEED_PINCTRL_PIN(AE19),
+ ASPEED_PINCTRL_PIN(AE25),
+ ASPEED_PINCTRL_PIN(AE26),
ASPEED_PINCTRL_PIN(AE7),
ASPEED_PINCTRL_PIN(AE8),
ASPEED_PINCTRL_PIN(AF10),
ASPEED_PINCTRL_PIN(AF12),
ASPEED_PINCTRL_PIN(AF14),
ASPEED_PINCTRL_PIN(AF15),
+ ASPEED_PINCTRL_PIN(AF24),
+ ASPEED_PINCTRL_PIN(AF25),
ASPEED_PINCTRL_PIN(AF7),
ASPEED_PINCTRL_PIN(AF8),
ASPEED_PINCTRL_PIN(AF9),
ASPEED_PINCTRL_PIN(Y3),
ASPEED_PINCTRL_PIN(Y4),
ASPEED_PINCTRL_PIN(Y5),
- ASPEED_PINCTRL_PIN(AB16),
- ASPEED_PINCTRL_PIN(AA17),
- ASPEED_PINCTRL_PIN(AB17),
- ASPEED_PINCTRL_PIN(AE16),
- ASPEED_PINCTRL_PIN(AC16),
- ASPEED_PINCTRL_PIN(AA16),
- ASPEED_PINCTRL_PIN(AD16),
- ASPEED_PINCTRL_PIN(AF25),
- ASPEED_PINCTRL_PIN(AE26),
- ASPEED_PINCTRL_PIN(AE25),
- ASPEED_PINCTRL_PIN(AF24),
};
static const struct aspeed_pin_group aspeed_g6_groups[] = {
ASPEED_PINCTRL_GROUP(SALT9G1),
ASPEED_PINCTRL_GROUP(SD1),
ASPEED_PINCTRL_GROUP(SD2),
- ASPEED_PINCTRL_GROUP(SD3),
- ASPEED_PINCTRL_GROUP(SD3DAT4),
- ASPEED_PINCTRL_GROUP(SD3DAT5),
- ASPEED_PINCTRL_GROUP(SD3DAT6),
- ASPEED_PINCTRL_GROUP(SD3DAT7),
+ ASPEED_PINCTRL_GROUP(EMMCG1),
+ ASPEED_PINCTRL_GROUP(EMMCG4),
+ ASPEED_PINCTRL_GROUP(EMMCG8),
ASPEED_PINCTRL_GROUP(SGPM1),
ASPEED_PINCTRL_GROUP(SGPS1),
ASPEED_PINCTRL_GROUP(SIOONCTRL),
ASPEED_PINCTRL_FUNC(ADC8),
ASPEED_PINCTRL_FUNC(ADC9),
ASPEED_PINCTRL_FUNC(BMCINT),
+ ASPEED_PINCTRL_FUNC(EMMC),
ASPEED_PINCTRL_FUNC(ESPI),
ASPEED_PINCTRL_FUNC(ESPIALT),
ASPEED_PINCTRL_FUNC(FSI1),
ASPEED_PINCTRL_FUNC(SALT9),
ASPEED_PINCTRL_FUNC(SD1),
ASPEED_PINCTRL_FUNC(SD2),
- ASPEED_PINCTRL_FUNC(SD3),
- ASPEED_PINCTRL_FUNC(SD3DAT4),
- ASPEED_PINCTRL_FUNC(SD3DAT5),
- ASPEED_PINCTRL_FUNC(SD3DAT6),
- ASPEED_PINCTRL_FUNC(SD3DAT7),
ASPEED_PINCTRL_FUNC(SGPM1),
ASPEED_PINCTRL_FUNC(SGPS1),
ASPEED_PINCTRL_FUNC(SIOONCTRL),
* @idx: The bit index in the register
*/
#define SIG_DESC_SET(reg, idx) SIG_DESC_IP_BIT(ASPEED_IP_SCU, reg, idx, 1)
-#define SIG_DESC_CLEAR(reg, idx) SIG_DESC_IP_BIT(ASPEED_IP_SCU, reg, idx, 0)
+#define SIG_DESC_CLEAR(reg, idx) { ASPEED_IP_SCU, reg, BIT_MASK(idx), 0, 0 }
#define SIG_DESC_LIST_SYM(sig, group) sig_descs_ ## sig ## _ ## group
#define SIG_DESC_LIST_DECL(sig, group, ...) \
static const char *FUNC_SYM(func)[] = { __VA_ARGS__ }
#define FUNC_DECL_2(func, one, two) FUNC_DECL_(func, #one, #two)
+#define FUNC_DECL_3(func, one, two, three) FUNC_DECL_(func, #one, #two, #three)
#define FUNC_GROUP_DECL(func, ...) \
GROUP_DECL(func, __VA_ARGS__); \
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2014-2017 Broadcom
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
/* optional GPIO interrupt support */
irq = platform_get_irq(pdev, 0);
- if (irq) {
+ if (irq > 0) {
struct irq_chip *irqc;
struct gpio_irq_chip *girq;
const struct ns2_pin_function *func;
const struct ns2_pin_group *grp;
- if (grp_select > pinctrl->num_groups ||
- func_select > pinctrl->num_functions)
+ if (grp_select >= pinctrl->num_groups ||
+ func_select >= pinctrl->num_functions)
return -EINVAL;
func = &pinctrl->functions[func_select];
BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO5 */
BERLIN_PINCTRL_FUNCTION(0x1, "i2s1"), /* DO3 */
BERLIN_PINCTRL_FUNCTION(0x2, "pwm"), /* PWM5 */
- BERLIN_PINCTRL_FUNCTION(0x3, "spififib"), /* SPDIFIB */
+ BERLIN_PINCTRL_FUNCTION(0x3, "spdifib"), /* SPDIFIB */
BERLIN_PINCTRL_FUNCTION(0x4, "spdifo"), /* SPDIFO */
BERLIN_PINCTRL_FUNCTION(0x5, "phy")), /* DBG5 */
BERLIN_PINCTRL_GROUP("I2S1_MCLK", 0x0, 0x3, 0x12,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
DMI_MATCH(DMI_PRODUCT_FAMILY, "Intel_Strago"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
},
},
{
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
DMI_MATCH(DMI_PRODUCT_NAME, "Setzer"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
},
},
{
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
DMI_MATCH(DMI_PRODUCT_NAME, "Cyan"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
},
},
{
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
DMI_MATCH(DMI_PRODUCT_NAME, "Celes"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
},
},
{}
* @pctldesc: Pin controller description
* @pctldev: Pointer to the pin controller device
* @chip: GPIO chip in this pin controller
+ * @irqchip: IRQ chip in this pin controller
* @soc: SoC/PCH specific pin configuration data
* @communities: All communities in this pin controller
* @ncommunities: Number of communities in this pin controller
struct pinctrl_desc pctldesc;
struct pinctrl_dev *pctldev;
struct gpio_chip chip;
+ struct irq_chip irqchip;
const struct intel_pinctrl_soc_data *soc;
struct intel_community *communities;
size_t ncommunities;
return ret;
}
-static struct irq_chip intel_gpio_irqchip = {
- .name = "intel-gpio",
- .irq_ack = intel_gpio_irq_ack,
- .irq_mask = intel_gpio_irq_mask,
- .irq_unmask = intel_gpio_irq_unmask,
- .irq_set_type = intel_gpio_irq_type,
- .irq_set_wake = intel_gpio_irq_wake,
- .flags = IRQCHIP_MASK_ON_SUSPEND,
-};
-
static int intel_gpio_add_pin_ranges(struct intel_pinctrl *pctrl,
const struct intel_community *community)
{
pctrl->chip = intel_gpio_chip;
+ /* Setup GPIO chip */
pctrl->chip.ngpio = intel_gpio_ngpio(pctrl);
pctrl->chip.label = dev_name(pctrl->dev);
pctrl->chip.parent = pctrl->dev;
pctrl->chip.base = -1;
pctrl->irq = irq;
+ /* Setup IRQ chip */
+ pctrl->irqchip.name = dev_name(pctrl->dev);
+ pctrl->irqchip.irq_ack = intel_gpio_irq_ack;
+ pctrl->irqchip.irq_mask = intel_gpio_irq_mask;
+ pctrl->irqchip.irq_unmask = intel_gpio_irq_unmask;
+ pctrl->irqchip.irq_set_type = intel_gpio_irq_type;
+ pctrl->irqchip.irq_set_wake = intel_gpio_irq_wake;
+ pctrl->irqchip.flags = IRQCHIP_MASK_ON_SUSPEND;
+
ret = devm_gpiochip_add_data(pctrl->dev, &pctrl->chip, pctrl);
if (ret) {
dev_err(pctrl->dev, "failed to register gpiochip\n");
return ret;
}
- ret = gpiochip_irqchip_add(&pctrl->chip, &intel_gpio_irqchip, 0,
+ ret = gpiochip_irqchip_add(&pctrl->chip, &pctrl->irqchip, 0,
handle_bad_irq, IRQ_TYPE_NONE);
if (ret) {
dev_err(pctrl->dev, "failed to add irqchip\n");
return ret;
}
- gpiochip_set_chained_irqchip(&pctrl->chip, &intel_gpio_irqchip, irq,
- NULL);
+ gpiochip_set_chained_irqchip(&pctrl->chip, &pctrl->irqchip, irq, NULL);
return 0;
}
PIN_GRP_EXTRA("uart2", 9, 2, BIT(1) | BIT(13) | BIT(14) | BIT(19),
BIT(1) | BIT(13) | BIT(14), BIT(1) | BIT(19),
18, 2, "gpio", "uart"),
- PIN_GRP_GPIO("led0_od", 11, 1, BIT(20), "led"),
- PIN_GRP_GPIO("led1_od", 12, 1, BIT(21), "led"),
- PIN_GRP_GPIO("led2_od", 13, 1, BIT(22), "led"),
- PIN_GRP_GPIO("led3_od", 14, 1, BIT(23), "led"),
+ PIN_GRP_GPIO_2("led0_od", 11, 1, BIT(20), BIT(20), 0, "led"),
+ PIN_GRP_GPIO_2("led1_od", 12, 1, BIT(21), BIT(21), 0, "led"),
+ PIN_GRP_GPIO_2("led2_od", 13, 1, BIT(22), BIT(22), 0, "led"),
+ PIN_GRP_GPIO_2("led3_od", 14, 1, BIT(23), BIT(23), 0, "led"),
};
};
static inline void armada_37xx_update_reg(unsigned int *reg,
- unsigned int offset)
+ unsigned int *offset)
{
/* We never have more than 2 registers */
- if (offset >= GPIO_PER_REG) {
- offset -= GPIO_PER_REG;
+ if (*offset >= GPIO_PER_REG) {
+ *offset -= GPIO_PER_REG;
*reg += sizeof(u32);
}
}
{
int offset = irqd_to_hwirq(d);
- armada_37xx_update_reg(reg, offset);
+ armada_37xx_update_reg(reg, &offset);
}
static int armada_37xx_gpio_direction_input(struct gpio_chip *chip,
unsigned int reg = OUTPUT_EN;
unsigned int mask;
- armada_37xx_update_reg(®, offset);
+ armada_37xx_update_reg(®, &offset);
mask = BIT(offset);
return regmap_update_bits(info->regmap, reg, mask, 0);
unsigned int reg = OUTPUT_EN;
unsigned int val, mask;
- armada_37xx_update_reg(®, offset);
+ armada_37xx_update_reg(®, &offset);
mask = BIT(offset);
regmap_read(info->regmap, reg, &val);
unsigned int reg = OUTPUT_EN;
unsigned int mask, val, ret;
- armada_37xx_update_reg(®, offset);
+ armada_37xx_update_reg(®, &offset);
mask = BIT(offset);
ret = regmap_update_bits(info->regmap, reg, mask, mask);
unsigned int reg = INPUT_VAL;
unsigned int val, mask;
- armada_37xx_update_reg(®, offset);
+ armada_37xx_update_reg(®, &offset);
mask = BIT(offset);
regmap_read(info->regmap, reg, &val);
unsigned int reg = OUTPUT_VAL;
unsigned int mask, val;
- armada_37xx_update_reg(®, offset);
+ armada_37xx_update_reg(®, &offset);
mask = BIT(offset);
val = value ? mask : 0;
static int stmfx_pinctrl_remove(struct platform_device *pdev)
{
- struct stmfx *stmfx = dev_get_platdata(&pdev->dev);
+ struct stmfx *stmfx = dev_get_drvdata(pdev->dev.parent);
return stmfx_function_disable(stmfx,
STMFX_FUNC_GPIO |
To compile this driver as a module, choose M here: the module
will be called ptp_kvm.
+config PTP_1588_CLOCK_IDTCM
+ tristate "IDT CLOCKMATRIX as PTP clock"
+ select PTP_1588_CLOCK
+ default n
+ help
+ This driver adds support for using IDT CLOCKMATRIX(TM) as a PTP
+ clock. This clock is only useful if your time stamping MAC
+ is connected to the IDT chip.
+
+ To compile this driver as a module, choose M here: the module
+ will be called ptp_clockmatrix.
+
endmenu
obj-$(CONFIG_PTP_1588_CLOCK_QORIQ) += ptp-qoriq.o
ptp-qoriq-y += ptp_qoriq.o
ptp-qoriq-$(CONFIG_DEBUG_FS) += ptp_qoriq_debugfs.o
+obj-$(CONFIG_PTP_1588_CLOCK_IDTCM) += ptp_clockmatrix.o
\ No newline at end of file
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* idt8a340_reg.h
+ *
+ * Originally generated by regen.tcl on Thu Feb 14 19:23:44 PST 2019
+ * https://github.com/richardcochran/regen
+ *
+ * Hand modified to include some HW registers.
+ * Based on 4.8.0, SCSR rev C commit a03c7ae5
+ */
+#ifndef HAVE_IDT8A340_REG
+#define HAVE_IDT8A340_REG
+
+#define PAGE_ADDR_BASE 0x0000
+#define PAGE_ADDR 0x00fc
+
+#define HW_REVISION 0x8180
+#define REV_ID 0x007a
+
+#define HW_DPLL_0 (0x8a00)
+#define HW_DPLL_1 (0x8b00)
+#define HW_DPLL_2 (0x8c00)
+#define HW_DPLL_3 (0x8d00)
+
+#define HW_DPLL_TOD_SW_TRIG_ADDR__0 (0x080)
+#define HW_DPLL_TOD_CTRL_1 (0x089)
+#define HW_DPLL_TOD_CTRL_2 (0x08A)
+#define HW_DPLL_TOD_OVR__0 (0x098)
+#define HW_DPLL_TOD_OUT_0__0 (0x0B0)
+
+#define HW_Q0_Q1_CH_SYNC_CTRL_0 (0xa740)
+#define HW_Q0_Q1_CH_SYNC_CTRL_1 (0xa741)
+#define HW_Q2_Q3_CH_SYNC_CTRL_0 (0xa742)
+#define HW_Q2_Q3_CH_SYNC_CTRL_1 (0xa743)
+#define HW_Q4_Q5_CH_SYNC_CTRL_0 (0xa744)
+#define HW_Q4_Q5_CH_SYNC_CTRL_1 (0xa745)
+#define HW_Q6_Q7_CH_SYNC_CTRL_0 (0xa746)
+#define HW_Q6_Q7_CH_SYNC_CTRL_1 (0xa747)
+#define HW_Q8_CH_SYNC_CTRL_0 (0xa748)
+#define HW_Q8_CH_SYNC_CTRL_1 (0xa749)
+#define HW_Q9_CH_SYNC_CTRL_0 (0xa74a)
+#define HW_Q9_CH_SYNC_CTRL_1 (0xa74b)
+#define HW_Q10_CH_SYNC_CTRL_0 (0xa74c)
+#define HW_Q10_CH_SYNC_CTRL_1 (0xa74d)
+#define HW_Q11_CH_SYNC_CTRL_0 (0xa74e)
+#define HW_Q11_CH_SYNC_CTRL_1 (0xa74f)
+
+#define SYNC_SOURCE_DPLL0_TOD_PPS 0x14
+#define SYNC_SOURCE_DPLL1_TOD_PPS 0x15
+#define SYNC_SOURCE_DPLL2_TOD_PPS 0x16
+#define SYNC_SOURCE_DPLL3_TOD_PPS 0x17
+
+#define SYNCTRL1_MASTER_SYNC_RST BIT(7)
+#define SYNCTRL1_MASTER_SYNC_TRIG BIT(5)
+#define SYNCTRL1_TOD_SYNC_TRIG BIT(4)
+#define SYNCTRL1_FBDIV_FRAME_SYNC_TRIG BIT(3)
+#define SYNCTRL1_FBDIV_SYNC_TRIG BIT(2)
+#define SYNCTRL1_Q1_DIV_SYNC_TRIG BIT(1)
+#define SYNCTRL1_Q0_DIV_SYNC_TRIG BIT(0)
+
+#define RESET_CTRL 0xc000
+#define SM_RESET 0x0012
+#define SM_RESET_CMD 0x5A
+
+#define GENERAL_STATUS 0xc014
+#define HW_REV_ID 0x000A
+#define BOND_ID 0x000B
+#define HW_CSR_ID 0x000C
+#define HW_IRQ_ID 0x000E
+
+#define MAJ_REL 0x0010
+#define MIN_REL 0x0011
+#define HOTFIX_REL 0x0012
+
+#define PIPELINE_ID 0x0014
+#define BUILD_ID 0x0018
+
+#define JTAG_DEVICE_ID 0x001c
+#define PRODUCT_ID 0x001e
+
+#define STATUS 0xc03c
+#define USER_GPIO0_TO_7_STATUS 0x008a
+#define USER_GPIO8_TO_15_STATUS 0x008b
+
+#define GPIO_USER_CONTROL 0xc160
+#define GPIO0_TO_7_OUT 0x0000
+#define GPIO8_TO_15_OUT 0x0001
+
+#define STICKY_STATUS_CLEAR 0xc164
+
+#define GPIO_TOD_NOTIFICATION_CLEAR 0xc16c
+
+#define ALERT_CFG 0xc188
+
+#define SYS_DPLL_XO 0xc194
+
+#define SYS_APLL 0xc19c
+
+#define INPUT_0 0xc1b0
+
+#define INPUT_1 0xc1c0
+
+#define INPUT_2 0xc1d0
+
+#define INPUT_3 0xc200
+
+#define INPUT_4 0xc210
+
+#define INPUT_5 0xc220
+
+#define INPUT_6 0xc230
+
+#define INPUT_7 0xc240
+
+#define INPUT_8 0xc250
+
+#define INPUT_9 0xc260
+
+#define INPUT_10 0xc280
+
+#define INPUT_11 0xc290
+
+#define INPUT_12 0xc2a0
+
+#define INPUT_13 0xc2b0
+
+#define INPUT_14 0xc2c0
+
+#define INPUT_15 0xc2d0
+
+#define REF_MON_0 0xc2e0
+
+#define REF_MON_1 0xc2ec
+
+#define REF_MON_2 0xc300
+
+#define REF_MON_3 0xc30c
+
+#define REF_MON_4 0xc318
+
+#define REF_MON_5 0xc324
+
+#define REF_MON_6 0xc330
+
+#define REF_MON_7 0xc33c
+
+#define REF_MON_8 0xc348
+
+#define REF_MON_9 0xc354
+
+#define REF_MON_10 0xc360
+
+#define REF_MON_11 0xc36c
+
+#define REF_MON_12 0xc380
+
+#define REF_MON_13 0xc38c
+
+#define REF_MON_14 0xc398
+
+#define REF_MON_15 0xc3a4
+
+#define DPLL_0 0xc3b0
+#define DPLL_CTRL_REG_0 0x0002
+#define DPLL_CTRL_REG_1 0x0003
+#define DPLL_CTRL_REG_2 0x0004
+#define DPLL_TOD_SYNC_CFG 0x0031
+#define DPLL_COMBO_SLAVE_CFG_0 0x0032
+#define DPLL_COMBO_SLAVE_CFG_1 0x0033
+#define DPLL_SLAVE_REF_CFG 0x0034
+#define DPLL_REF_MODE 0x0035
+#define DPLL_PHASE_MEASUREMENT_CFG 0x0036
+#define DPLL_MODE 0x0037
+
+#define DPLL_1 0xc400
+
+#define DPLL_2 0xc438
+
+#define DPLL_3 0xc480
+
+#define DPLL_4 0xc4b8
+
+#define DPLL_5 0xc500
+
+#define DPLL_6 0xc538
+
+#define DPLL_7 0xc580
+
+#define SYS_DPLL 0xc5b8
+
+#define DPLL_CTRL_0 0xc600
+#define DPLL_CTRL_DPLL_MANU_REF_CFG 0x0001
+
+#define DPLL_CTRL_1 0xc63c
+
+#define DPLL_CTRL_2 0xc680
+
+#define DPLL_CTRL_3 0xc6bc
+
+#define DPLL_CTRL_4 0xc700
+
+#define DPLL_CTRL_5 0xc73c
+
+#define DPLL_CTRL_6 0xc780
+
+#define DPLL_CTRL_7 0xc7bc
+
+#define SYS_DPLL_CTRL 0xc800
+
+#define DPLL_PHASE_0 0xc818
+
+/* Signed 42-bit FFO in units of 2^(-53) */
+#define DPLL_WR_PHASE 0x0000
+
+#define DPLL_PHASE_1 0xc81c
+
+#define DPLL_PHASE_2 0xc820
+
+#define DPLL_PHASE_3 0xc824
+
+#define DPLL_PHASE_4 0xc828
+
+#define DPLL_PHASE_5 0xc82c
+
+#define DPLL_PHASE_6 0xc830
+
+#define DPLL_PHASE_7 0xc834
+
+#define DPLL_FREQ_0 0xc838
+
+/* Signed 42-bit FFO in units of 2^(-53) */
+#define DPLL_WR_FREQ 0x0000
+
+#define DPLL_FREQ_1 0xc840
+
+#define DPLL_FREQ_2 0xc848
+
+#define DPLL_FREQ_3 0xc850
+
+#define DPLL_FREQ_4 0xc858
+
+#define DPLL_FREQ_5 0xc860
+
+#define DPLL_FREQ_6 0xc868
+
+#define DPLL_FREQ_7 0xc870
+
+#define DPLL_PHASE_PULL_IN_0 0xc880
+#define PULL_IN_OFFSET 0x0000 /* Signed 32 bit */
+#define PULL_IN_SLOPE_LIMIT 0x0004 /* Unsigned 24 bit */
+#define PULL_IN_CTRL 0x0007
+
+#define DPLL_PHASE_PULL_IN_1 0xc888
+
+#define DPLL_PHASE_PULL_IN_2 0xc890
+
+#define DPLL_PHASE_PULL_IN_3 0xc898
+
+#define DPLL_PHASE_PULL_IN_4 0xc8a0
+
+#define DPLL_PHASE_PULL_IN_5 0xc8a8
+
+#define DPLL_PHASE_PULL_IN_6 0xc8b0
+
+#define DPLL_PHASE_PULL_IN_7 0xc8b8
+
+#define GPIO_CFG 0xc8c0
+#define GPIO_CFG_GBL 0x0000
+
+#define GPIO_0 0xc8c2
+#define GPIO_DCO_INC_DEC 0x0000
+#define GPIO_OUT_CTRL_0 0x0001
+#define GPIO_OUT_CTRL_1 0x0002
+#define GPIO_TOD_TRIG 0x0003
+#define GPIO_DPLL_INDICATOR 0x0004
+#define GPIO_LOS_INDICATOR 0x0005
+#define GPIO_REF_INPUT_DSQ_0 0x0006
+#define GPIO_REF_INPUT_DSQ_1 0x0007
+#define GPIO_REF_INPUT_DSQ_2 0x0008
+#define GPIO_REF_INPUT_DSQ_3 0x0009
+#define GPIO_MAN_CLK_SEL_0 0x000a
+#define GPIO_MAN_CLK_SEL_1 0x000b
+#define GPIO_MAN_CLK_SEL_2 0x000c
+#define GPIO_SLAVE 0x000d
+#define GPIO_ALERT_OUT_CFG 0x000e
+#define GPIO_TOD_NOTIFICATION_CFG 0x000f
+#define GPIO_CTRL 0x0010
+
+#define GPIO_1 0xc8d4
+
+#define GPIO_2 0xc8e6
+
+#define GPIO_3 0xc900
+
+#define GPIO_4 0xc912
+
+#define GPIO_5 0xc924
+
+#define GPIO_6 0xc936
+
+#define GPIO_7 0xc948
+
+#define GPIO_8 0xc95a
+
+#define GPIO_9 0xc980
+
+#define GPIO_10 0xc992
+
+#define GPIO_11 0xc9a4
+
+#define GPIO_12 0xc9b6
+
+#define GPIO_13 0xc9c8
+
+#define GPIO_14 0xc9da
+
+#define GPIO_15 0xca00
+
+#define OUT_DIV_MUX 0xca12
+
+#define OUTPUT_0 0xca14
+/* FOD frequency output divider value */
+#define OUT_DIV 0x0000
+#define OUT_DUTY_CYCLE_HIGH 0x0004
+#define OUT_CTRL_0 0x0008
+#define OUT_CTRL_1 0x0009
+/* Phase adjustment in FOD cycles */
+#define OUT_PHASE_ADJ 0x000c
+
+#define OUTPUT_1 0xca24
+
+#define OUTPUT_2 0xca34
+
+#define OUTPUT_3 0xca44
+
+#define OUTPUT_4 0xca54
+
+#define OUTPUT_5 0xca64
+
+#define OUTPUT_6 0xca80
+
+#define OUTPUT_7 0xca90
+
+#define OUTPUT_8 0xcaa0
+
+#define OUTPUT_9 0xcab0
+
+#define OUTPUT_10 0xcac0
+
+#define OUTPUT_11 0xcad0
+
+#define SERIAL 0xcae0
+
+#define PWM_ENCODER_0 0xcb00
+
+#define PWM_ENCODER_1 0xcb08
+
+#define PWM_ENCODER_2 0xcb10
+
+#define PWM_ENCODER_3 0xcb18
+
+#define PWM_ENCODER_4 0xcb20
+
+#define PWM_ENCODER_5 0xcb28
+
+#define PWM_ENCODER_6 0xcb30
+
+#define PWM_ENCODER_7 0xcb38
+
+#define PWM_DECODER_0 0xcb40
+
+#define PWM_DECODER_1 0xcb48
+
+#define PWM_DECODER_2 0xcb50
+
+#define PWM_DECODER_3 0xcb58
+
+#define PWM_DECODER_4 0xcb60
+
+#define PWM_DECODER_5 0xcb68
+
+#define PWM_DECODER_6 0xcb70
+
+#define PWM_DECODER_7 0xcb80
+
+#define PWM_DECODER_8 0xcb88
+
+#define PWM_DECODER_9 0xcb90
+
+#define PWM_DECODER_10 0xcb98
+
+#define PWM_DECODER_11 0xcba0
+
+#define PWM_DECODER_12 0xcba8
+
+#define PWM_DECODER_13 0xcbb0
+
+#define PWM_DECODER_14 0xcbb8
+
+#define PWM_DECODER_15 0xcbc0
+
+#define PWM_USER_DATA 0xcbc8
+
+#define TOD_0 0xcbcc
+
+/* Enable TOD counter, output channel sync and even-PPS mode */
+#define TOD_CFG 0x0000
+
+#define TOD_1 0xcbce
+
+#define TOD_2 0xcbd0
+
+#define TOD_3 0xcbd2
+
+
+#define TOD_WRITE_0 0xcc00
+/* 8-bit subns, 32-bit ns, 48-bit seconds */
+#define TOD_WRITE 0x0000
+/* Counter increments after TOD write is completed */
+#define TOD_WRITE_COUNTER 0x000c
+/* TOD write trigger configuration */
+#define TOD_WRITE_SELECT_CFG_0 0x000d
+/* TOD write trigger selection */
+#define TOD_WRITE_CMD 0x000f
+
+#define TOD_WRITE_1 0xcc10
+
+#define TOD_WRITE_2 0xcc20
+
+#define TOD_WRITE_3 0xcc30
+
+#define TOD_READ_PRIMARY_0 0xcc40
+/* 8-bit subns, 32-bit ns, 48-bit seconds */
+#define TOD_READ_PRIMARY 0x0000
+/* Counter increments after TOD write is completed */
+#define TOD_READ_PRIMARY_COUNTER 0x000b
+/* Read trigger configuration */
+#define TOD_READ_PRIMARY_SEL_CFG_0 0x000c
+/* Read trigger selection */
+#define TOD_READ_PRIMARY_CMD 0x000e
+
+#define TOD_READ_PRIMARY_1 0xcc50
+
+#define TOD_READ_PRIMARY_2 0xcc60
+
+#define TOD_READ_PRIMARY_3 0xcc80
+
+#define TOD_READ_SECONDARY_0 0xcc90
+
+#define TOD_READ_SECONDARY_1 0xcca0
+
+#define TOD_READ_SECONDARY_2 0xccb0
+
+#define TOD_READ_SECONDARY_3 0xccc0
+
+#define OUTPUT_TDC_CFG 0xccd0
+
+#define OUTPUT_TDC_0 0xcd00
+
+#define OUTPUT_TDC_1 0xcd08
+
+#define OUTPUT_TDC_2 0xcd10
+
+#define OUTPUT_TDC_3 0xcd18
+
+#define INPUT_TDC 0xcd20
+
+#define SCRATCH 0xcf50
+
+#define EEPROM 0xcf68
+
+#define OTP 0xcf70
+
+#define BYTE 0xcf80
+
+/* Bit definitions for the MAJ_REL register */
+#define MAJOR_SHIFT (1)
+#define MAJOR_MASK (0x7f)
+#define PR_BUILD BIT(0)
+
+/* Bit definitions for the USER_GPIO0_TO_7_STATUS register */
+#define GPIO0_LEVEL BIT(0)
+#define GPIO1_LEVEL BIT(1)
+#define GPIO2_LEVEL BIT(2)
+#define GPIO3_LEVEL BIT(3)
+#define GPIO4_LEVEL BIT(4)
+#define GPIO5_LEVEL BIT(5)
+#define GPIO6_LEVEL BIT(6)
+#define GPIO7_LEVEL BIT(7)
+
+/* Bit definitions for the USER_GPIO8_TO_15_STATUS register */
+#define GPIO8_LEVEL BIT(0)
+#define GPIO9_LEVEL BIT(1)
+#define GPIO10_LEVEL BIT(2)
+#define GPIO11_LEVEL BIT(3)
+#define GPIO12_LEVEL BIT(4)
+#define GPIO13_LEVEL BIT(5)
+#define GPIO14_LEVEL BIT(6)
+#define GPIO15_LEVEL BIT(7)
+
+/* Bit definitions for the GPIO0_TO_7_OUT register */
+#define GPIO0_DRIVE_LEVEL BIT(0)
+#define GPIO1_DRIVE_LEVEL BIT(1)
+#define GPIO2_DRIVE_LEVEL BIT(2)
+#define GPIO3_DRIVE_LEVEL BIT(3)
+#define GPIO4_DRIVE_LEVEL BIT(4)
+#define GPIO5_DRIVE_LEVEL BIT(5)
+#define GPIO6_DRIVE_LEVEL BIT(6)
+#define GPIO7_DRIVE_LEVEL BIT(7)
+
+/* Bit definitions for the GPIO8_TO_15_OUT register */
+#define GPIO8_DRIVE_LEVEL BIT(0)
+#define GPIO9_DRIVE_LEVEL BIT(1)
+#define GPIO10_DRIVE_LEVEL BIT(2)
+#define GPIO11_DRIVE_LEVEL BIT(3)
+#define GPIO12_DRIVE_LEVEL BIT(4)
+#define GPIO13_DRIVE_LEVEL BIT(5)
+#define GPIO14_DRIVE_LEVEL BIT(6)
+#define GPIO15_DRIVE_LEVEL BIT(7)
+
+/* Bit definitions for the DPLL_TOD_SYNC_CFG register */
+#define TOD_SYNC_SOURCE_SHIFT (1)
+#define TOD_SYNC_SOURCE_MASK (0x3)
+#define TOD_SYNC_EN BIT(0)
+
+/* Bit definitions for the DPLL_MODE register */
+#define WRITE_TIMER_MODE BIT(6)
+#define PLL_MODE_SHIFT (3)
+#define PLL_MODE_MASK (0x7)
+#define STATE_MODE_SHIFT (0)
+#define STATE_MODE_MASK (0x7)
+
+/* Bit definitions for the GPIO_CFG_GBL register */
+#define SUPPLY_MODE_SHIFT (0)
+#define SUPPLY_MODE_MASK (0x3)
+
+/* Bit definitions for the GPIO_DCO_INC_DEC register */
+#define INCDEC_DPLL_INDEX_SHIFT (0)
+#define INCDEC_DPLL_INDEX_MASK (0x7)
+
+/* Bit definitions for the GPIO_OUT_CTRL_0 register */
+#define CTRL_OUT_0 BIT(0)
+#define CTRL_OUT_1 BIT(1)
+#define CTRL_OUT_2 BIT(2)
+#define CTRL_OUT_3 BIT(3)
+#define CTRL_OUT_4 BIT(4)
+#define CTRL_OUT_5 BIT(5)
+#define CTRL_OUT_6 BIT(6)
+#define CTRL_OUT_7 BIT(7)
+
+/* Bit definitions for the GPIO_OUT_CTRL_1 register */
+#define CTRL_OUT_8 BIT(0)
+#define CTRL_OUT_9 BIT(1)
+#define CTRL_OUT_10 BIT(2)
+#define CTRL_OUT_11 BIT(3)
+#define CTRL_OUT_12 BIT(4)
+#define CTRL_OUT_13 BIT(5)
+#define CTRL_OUT_14 BIT(6)
+#define CTRL_OUT_15 BIT(7)
+
+/* Bit definitions for the GPIO_TOD_TRIG register */
+#define TOD_TRIG_0 BIT(0)
+#define TOD_TRIG_1 BIT(1)
+#define TOD_TRIG_2 BIT(2)
+#define TOD_TRIG_3 BIT(3)
+
+/* Bit definitions for the GPIO_DPLL_INDICATOR register */
+#define IND_DPLL_INDEX_SHIFT (0)
+#define IND_DPLL_INDEX_MASK (0x7)
+
+/* Bit definitions for the GPIO_LOS_INDICATOR register */
+#define REFMON_INDEX_SHIFT (0)
+#define REFMON_INDEX_MASK (0xf)
+/* Active level of LOS indicator, 0=low 1=high */
+#define ACTIVE_LEVEL BIT(4)
+
+/* Bit definitions for the GPIO_REF_INPUT_DSQ_0 register */
+#define DSQ_INP_0 BIT(0)
+#define DSQ_INP_1 BIT(1)
+#define DSQ_INP_2 BIT(2)
+#define DSQ_INP_3 BIT(3)
+#define DSQ_INP_4 BIT(4)
+#define DSQ_INP_5 BIT(5)
+#define DSQ_INP_6 BIT(6)
+#define DSQ_INP_7 BIT(7)
+
+/* Bit definitions for the GPIO_REF_INPUT_DSQ_1 register */
+#define DSQ_INP_8 BIT(0)
+#define DSQ_INP_9 BIT(1)
+#define DSQ_INP_10 BIT(2)
+#define DSQ_INP_11 BIT(3)
+#define DSQ_INP_12 BIT(4)
+#define DSQ_INP_13 BIT(5)
+#define DSQ_INP_14 BIT(6)
+#define DSQ_INP_15 BIT(7)
+
+/* Bit definitions for the GPIO_REF_INPUT_DSQ_2 register */
+#define DSQ_DPLL_0 BIT(0)
+#define DSQ_DPLL_1 BIT(1)
+#define DSQ_DPLL_2 BIT(2)
+#define DSQ_DPLL_3 BIT(3)
+#define DSQ_DPLL_4 BIT(4)
+#define DSQ_DPLL_5 BIT(5)
+#define DSQ_DPLL_6 BIT(6)
+#define DSQ_DPLL_7 BIT(7)
+
+/* Bit definitions for the GPIO_REF_INPUT_DSQ_3 register */
+#define DSQ_DPLL_SYS BIT(0)
+#define GPIO_DSQ_LEVEL BIT(1)
+
+/* Bit definitions for the GPIO_TOD_NOTIFICATION_CFG register */
+#define DPLL_TOD_SHIFT (0)
+#define DPLL_TOD_MASK (0x3)
+#define TOD_READ_SECONDARY BIT(2)
+#define GPIO_ASSERT_LEVEL BIT(3)
+
+/* Bit definitions for the GPIO_CTRL register */
+#define GPIO_FUNCTION_EN BIT(0)
+#define GPIO_CMOS_OD_MODE BIT(1)
+#define GPIO_CONTROL_DIR BIT(2)
+#define GPIO_PU_PD_MODE BIT(3)
+#define GPIO_FUNCTION_SHIFT (4)
+#define GPIO_FUNCTION_MASK (0xf)
+
+/* Bit definitions for the OUT_CTRL_1 register */
+#define OUT_SYNC_DISABLE BIT(7)
+#define SQUELCH_VALUE BIT(6)
+#define SQUELCH_DISABLE BIT(5)
+#define PAD_VDDO_SHIFT (2)
+#define PAD_VDDO_MASK (0x7)
+#define PAD_CMOSDRV_SHIFT (0)
+#define PAD_CMOSDRV_MASK (0x3)
+
+/* Bit definitions for the TOD_CFG register */
+#define TOD_EVEN_PPS_MODE BIT(2)
+#define TOD_OUT_SYNC_ENABLE BIT(1)
+#define TOD_ENABLE BIT(0)
+
+/* Bit definitions for the TOD_WRITE_SELECT_CFG_0 register */
+#define WR_PWM_DECODER_INDEX_SHIFT (4)
+#define WR_PWM_DECODER_INDEX_MASK (0xf)
+#define WR_REF_INDEX_SHIFT (0)
+#define WR_REF_INDEX_MASK (0xf)
+
+/* Bit definitions for the TOD_WRITE_CMD register */
+#define TOD_WRITE_SELECTION_SHIFT (0)
+#define TOD_WRITE_SELECTION_MASK (0xf)
+
+/* Bit definitions for the TOD_READ_PRIMARY_SEL_CFG_0 register */
+#define RD_PWM_DECODER_INDEX_SHIFT (4)
+#define RD_PWM_DECODER_INDEX_MASK (0xf)
+#define RD_REF_INDEX_SHIFT (0)
+#define RD_REF_INDEX_MASK (0xf)
+
+/* Bit definitions for the TOD_READ_PRIMARY_CMD register */
+#define TOD_READ_TRIGGER_MODE BIT(4)
+#define TOD_READ_TRIGGER_SHIFT (0)
+#define TOD_READ_TRIGGER_MASK (0xf)
+
+#endif
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * PTP hardware clock driver for the IDT ClockMatrix(TM) family of timing and
+ * synchronization devices.
+ *
+ * Copyright (C) 2019 Integrated Device Technology, Inc., a Renesas Company.
+ */
+#include <linux/firmware.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/timekeeping.h>
+
+#include "ptp_private.h"
+#include "ptp_clockmatrix.h"
+
+MODULE_DESCRIPTION("Driver for IDT ClockMatrix(TM) family");
+MODULE_AUTHOR("Richard Cochran <richardcochran@gmail.com>");
+MODULE_AUTHOR("IDT support-1588 <IDT-support-1588@lm.renesas.com>");
+MODULE_VERSION("1.0");
+MODULE_LICENSE("GPL");
+
+#define SETTIME_CORRECTION (0)
+
+static int char_array_to_timespec(u8 *buf,
+ u8 count,
+ struct timespec64 *ts)
+{
+ u8 i;
+ u64 nsec;
+ time64_t sec;
+
+ if (count < TOD_BYTE_COUNT)
+ return 1;
+
+ /* Sub-nanoseconds are in buf[0]. */
+ nsec = buf[4];
+ for (i = 0; i < 3; i++) {
+ nsec <<= 8;
+ nsec |= buf[3 - i];
+ }
+
+ sec = buf[10];
+ for (i = 0; i < 5; i++) {
+ sec <<= 8;
+ sec |= buf[9 - i];
+ }
+
+ ts->tv_sec = sec;
+ ts->tv_nsec = nsec;
+
+ return 0;
+}
+
+static int timespec_to_char_array(struct timespec64 const *ts,
+ u8 *buf,
+ u8 count)
+{
+ u8 i;
+ s32 nsec;
+ time64_t sec;
+
+ if (count < TOD_BYTE_COUNT)
+ return 1;
+
+ nsec = ts->tv_nsec;
+ sec = ts->tv_sec;
+
+ /* Sub-nanoseconds are in buf[0]. */
+ buf[0] = 0;
+ for (i = 1; i < 5; i++) {
+ buf[i] = nsec & 0xff;
+ nsec >>= 8;
+ }
+
+ for (i = 5; i < TOD_BYTE_COUNT; i++) {
+
+ buf[i] = sec & 0xff;
+ sec >>= 8;
+ }
+
+ return 0;
+}
+
+static int idtcm_xfer(struct idtcm *idtcm,
+ u8 regaddr,
+ u8 *buf,
+ u16 count,
+ bool write)
+{
+ struct i2c_client *client = idtcm->client;
+ struct i2c_msg msg[2];
+ int cnt;
+
+ msg[0].addr = client->addr;
+ msg[0].flags = 0;
+ msg[0].len = 1;
+ msg[0].buf = ®addr;
+
+ msg[1].addr = client->addr;
+ msg[1].flags = write ? 0 : I2C_M_RD;
+ msg[1].len = count;
+ msg[1].buf = buf;
+
+ cnt = i2c_transfer(client->adapter, msg, 2);
+
+ if (cnt < 0) {
+ dev_err(&client->dev, "i2c_transfer returned %d\n", cnt);
+ return cnt;
+ } else if (cnt != 2) {
+ dev_err(&client->dev,
+ "i2c_transfer sent only %d of %d messages\n", cnt, 2);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int idtcm_page_offset(struct idtcm *idtcm, u8 val)
+{
+ u8 buf[4];
+ int err;
+
+ if (idtcm->page_offset == val)
+ return 0;
+
+ buf[0] = 0x0;
+ buf[1] = val;
+ buf[2] = 0x10;
+ buf[3] = 0x20;
+
+ err = idtcm_xfer(idtcm, PAGE_ADDR, buf, sizeof(buf), 1);
+
+ if (err)
+ dev_err(&idtcm->client->dev, "failed to set page offset\n");
+ else
+ idtcm->page_offset = val;
+
+ return err;
+}
+
+static int _idtcm_rdwr(struct idtcm *idtcm,
+ u16 regaddr,
+ u8 *buf,
+ u16 count,
+ bool write)
+{
+ u8 hi;
+ u8 lo;
+ int err;
+
+ hi = (regaddr >> 8) & 0xff;
+ lo = regaddr & 0xff;
+
+ err = idtcm_page_offset(idtcm, hi);
+
+ if (err)
+ goto out;
+
+ err = idtcm_xfer(idtcm, lo, buf, count, write);
+out:
+ return err;
+}
+
+static int idtcm_read(struct idtcm *idtcm,
+ u16 module,
+ u16 regaddr,
+ u8 *buf,
+ u16 count)
+{
+ return _idtcm_rdwr(idtcm, module + regaddr, buf, count, false);
+}
+
+static int idtcm_write(struct idtcm *idtcm,
+ u16 module,
+ u16 regaddr,
+ u8 *buf,
+ u16 count)
+{
+ return _idtcm_rdwr(idtcm, module + regaddr, buf, count, true);
+}
+
+static int _idtcm_gettime(struct idtcm_channel *channel,
+ struct timespec64 *ts)
+{
+ struct idtcm *idtcm = channel->idtcm;
+ u8 buf[TOD_BYTE_COUNT];
+ u8 trigger;
+ int err;
+
+ err = idtcm_read(idtcm, channel->tod_read_primary,
+ TOD_READ_PRIMARY_CMD, &trigger, sizeof(trigger));
+ if (err)
+ return err;
+
+ trigger &= ~(TOD_READ_TRIGGER_MASK << TOD_READ_TRIGGER_SHIFT);
+ trigger |= (1 << TOD_READ_TRIGGER_SHIFT);
+ trigger |= TOD_READ_TRIGGER_MODE;
+
+ err = idtcm_write(idtcm, channel->tod_read_primary,
+ TOD_READ_PRIMARY_CMD, &trigger, sizeof(trigger));
+
+ if (err)
+ return err;
+
+ if (idtcm->calculate_overhead_flag)
+ idtcm->start_time = ktime_get_raw();
+
+ err = idtcm_read(idtcm, channel->tod_read_primary,
+ TOD_READ_PRIMARY, buf, sizeof(buf));
+
+ if (err)
+ return err;
+
+ err = char_array_to_timespec(buf, sizeof(buf), ts);
+
+ return err;
+}
+
+static int _sync_pll_output(struct idtcm *idtcm,
+ u8 pll,
+ u8 sync_src,
+ u8 qn,
+ u8 qn_plus_1)
+{
+ int err;
+ u8 val;
+ u16 sync_ctrl0;
+ u16 sync_ctrl1;
+
+ if ((qn == 0) && (qn_plus_1 == 0))
+ return 0;
+
+ switch (pll) {
+ case 0:
+ sync_ctrl0 = HW_Q0_Q1_CH_SYNC_CTRL_0;
+ sync_ctrl1 = HW_Q0_Q1_CH_SYNC_CTRL_1;
+ break;
+ case 1:
+ sync_ctrl0 = HW_Q2_Q3_CH_SYNC_CTRL_0;
+ sync_ctrl1 = HW_Q2_Q3_CH_SYNC_CTRL_1;
+ break;
+ case 2:
+ sync_ctrl0 = HW_Q4_Q5_CH_SYNC_CTRL_0;
+ sync_ctrl1 = HW_Q4_Q5_CH_SYNC_CTRL_1;
+ break;
+ case 3:
+ sync_ctrl0 = HW_Q6_Q7_CH_SYNC_CTRL_0;
+ sync_ctrl1 = HW_Q6_Q7_CH_SYNC_CTRL_1;
+ break;
+ case 4:
+ sync_ctrl0 = HW_Q8_CH_SYNC_CTRL_0;
+ sync_ctrl1 = HW_Q8_CH_SYNC_CTRL_1;
+ break;
+ case 5:
+ sync_ctrl0 = HW_Q9_CH_SYNC_CTRL_0;
+ sync_ctrl1 = HW_Q9_CH_SYNC_CTRL_1;
+ break;
+ case 6:
+ sync_ctrl0 = HW_Q10_CH_SYNC_CTRL_0;
+ sync_ctrl1 = HW_Q10_CH_SYNC_CTRL_1;
+ break;
+ case 7:
+ sync_ctrl0 = HW_Q11_CH_SYNC_CTRL_0;
+ sync_ctrl1 = HW_Q11_CH_SYNC_CTRL_1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ val = SYNCTRL1_MASTER_SYNC_RST;
+
+ /* Place master sync in reset */
+ err = idtcm_write(idtcm, 0, sync_ctrl1, &val, sizeof(val));
+ if (err)
+ return err;
+
+ err = idtcm_write(idtcm, 0, sync_ctrl0, &sync_src, sizeof(sync_src));
+ if (err)
+ return err;
+
+ /* Set sync trigger mask */
+ val |= SYNCTRL1_FBDIV_FRAME_SYNC_TRIG | SYNCTRL1_FBDIV_SYNC_TRIG;
+
+ if (qn)
+ val |= SYNCTRL1_Q0_DIV_SYNC_TRIG;
+
+ if (qn_plus_1)
+ val |= SYNCTRL1_Q1_DIV_SYNC_TRIG;
+
+ err = idtcm_write(idtcm, 0, sync_ctrl1, &val, sizeof(val));
+ if (err)
+ return err;
+
+ /* Place master sync out of reset */
+ val &= ~(SYNCTRL1_MASTER_SYNC_RST);
+ err = idtcm_write(idtcm, 0, sync_ctrl1, &val, sizeof(val));
+
+ return err;
+}
+
+static int idtcm_sync_pps_output(struct idtcm_channel *channel)
+{
+ struct idtcm *idtcm = channel->idtcm;
+
+ u8 pll;
+ u8 sync_src;
+ u8 qn;
+ u8 qn_plus_1;
+ int err = 0;
+
+ u16 output_mask = channel->output_mask;
+
+ switch (channel->dpll_n) {
+ case DPLL_0:
+ sync_src = SYNC_SOURCE_DPLL0_TOD_PPS;
+ break;
+ case DPLL_1:
+ sync_src = SYNC_SOURCE_DPLL1_TOD_PPS;
+ break;
+ case DPLL_2:
+ sync_src = SYNC_SOURCE_DPLL2_TOD_PPS;
+ break;
+ case DPLL_3:
+ sync_src = SYNC_SOURCE_DPLL3_TOD_PPS;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ for (pll = 0; pll < 8; pll++) {
+
+ qn = output_mask & 0x1;
+ output_mask = output_mask >> 1;
+
+ if (pll < 4) {
+ /* First 4 pll has 2 outputs */
+ qn_plus_1 = output_mask & 0x1;
+ output_mask = output_mask >> 1;
+ } else {
+ qn_plus_1 = 0;
+ }
+
+ if ((qn != 0) || (qn_plus_1 != 0))
+ err = _sync_pll_output(idtcm, pll, sync_src, qn,
+ qn_plus_1);
+
+ if (err)
+ return err;
+ }
+
+ return err;
+}
+
+static int _idtcm_set_dpll_tod(struct idtcm_channel *channel,
+ struct timespec64 const *ts,
+ enum hw_tod_write_trig_sel wr_trig)
+{
+ struct idtcm *idtcm = channel->idtcm;
+
+ u8 buf[TOD_BYTE_COUNT];
+ u8 cmd;
+ int err;
+ struct timespec64 local_ts = *ts;
+ s64 total_overhead_ns;
+
+ /* Configure HW TOD write trigger. */
+ err = idtcm_read(idtcm, channel->hw_dpll_n, HW_DPLL_TOD_CTRL_1,
+ &cmd, sizeof(cmd));
+
+ if (err)
+ return err;
+
+ cmd &= ~(0x0f);
+ cmd |= wr_trig | 0x08;
+
+ err = idtcm_write(idtcm, channel->hw_dpll_n, HW_DPLL_TOD_CTRL_1,
+ &cmd, sizeof(cmd));
+
+ if (err)
+ return err;
+
+ if (wr_trig != HW_TOD_WR_TRIG_SEL_MSB) {
+
+ err = timespec_to_char_array(&local_ts, buf, sizeof(buf));
+
+ if (err)
+ return err;
+
+ err = idtcm_write(idtcm, channel->hw_dpll_n,
+ HW_DPLL_TOD_OVR__0, buf, sizeof(buf));
+
+ if (err)
+ return err;
+ }
+
+ /* ARM HW TOD write trigger. */
+ cmd &= ~(0x08);
+
+ err = idtcm_write(idtcm, channel->hw_dpll_n, HW_DPLL_TOD_CTRL_1,
+ &cmd, sizeof(cmd));
+
+ if (wr_trig == HW_TOD_WR_TRIG_SEL_MSB) {
+
+ if (idtcm->calculate_overhead_flag) {
+ total_overhead_ns = ktime_to_ns(ktime_get_raw()
+ - idtcm->start_time)
+ + idtcm->tod_write_overhead_ns
+ + SETTIME_CORRECTION;
+
+ timespec64_add_ns(&local_ts, total_overhead_ns);
+
+ idtcm->calculate_overhead_flag = 0;
+ }
+
+ err = timespec_to_char_array(&local_ts, buf, sizeof(buf));
+
+ if (err)
+ return err;
+
+ err = idtcm_write(idtcm, channel->hw_dpll_n,
+ HW_DPLL_TOD_OVR__0, buf, sizeof(buf));
+ }
+
+ return err;
+}
+
+static int _idtcm_settime(struct idtcm_channel *channel,
+ struct timespec64 const *ts,
+ enum hw_tod_write_trig_sel wr_trig)
+{
+ struct idtcm *idtcm = channel->idtcm;
+ s32 retval;
+ int err;
+ int i;
+ u8 trig_sel;
+
+ err = _idtcm_set_dpll_tod(channel, ts, wr_trig);
+
+ if (err)
+ return err;
+
+ /* Wait for the operation to complete. */
+ for (i = 0; i < 10000; i++) {
+ err = idtcm_read(idtcm, channel->hw_dpll_n,
+ HW_DPLL_TOD_CTRL_1, &trig_sel,
+ sizeof(trig_sel));
+
+ if (err)
+ return err;
+
+ if (trig_sel == 0x4a)
+ break;
+
+ err = 1;
+ }
+
+ if (err)
+ return err;
+
+ retval = idtcm_sync_pps_output(channel);
+
+ return retval;
+}
+
+static int idtcm_set_phase_pull_in_offset(struct idtcm_channel *channel,
+ s32 offset_ns)
+{
+ int err;
+ int i;
+ struct idtcm *idtcm = channel->idtcm;
+
+ u8 buf[4];
+
+ for (i = 0; i < 4; i++) {
+ buf[i] = 0xff & (offset_ns);
+ offset_ns >>= 8;
+ }
+
+ err = idtcm_write(idtcm, channel->dpll_phase_pull_in, PULL_IN_OFFSET,
+ buf, sizeof(buf));
+
+ return err;
+}
+
+static int idtcm_set_phase_pull_in_slope_limit(struct idtcm_channel *channel,
+ u32 max_ffo_ppb)
+{
+ int err;
+ u8 i;
+ struct idtcm *idtcm = channel->idtcm;
+
+ u8 buf[3];
+
+ if (max_ffo_ppb & 0xff000000)
+ max_ffo_ppb = 0;
+
+ for (i = 0; i < 3; i++) {
+ buf[i] = 0xff & (max_ffo_ppb);
+ max_ffo_ppb >>= 8;
+ }
+
+ err = idtcm_write(idtcm, channel->dpll_phase_pull_in,
+ PULL_IN_SLOPE_LIMIT, buf, sizeof(buf));
+
+ return err;
+}
+
+static int idtcm_start_phase_pull_in(struct idtcm_channel *channel)
+{
+ int err;
+ struct idtcm *idtcm = channel->idtcm;
+
+ u8 buf;
+
+ err = idtcm_read(idtcm, channel->dpll_phase_pull_in, PULL_IN_CTRL,
+ &buf, sizeof(buf));
+
+ if (err)
+ return err;
+
+ if (buf == 0) {
+ buf = 0x01;
+ err = idtcm_write(idtcm, channel->dpll_phase_pull_in,
+ PULL_IN_CTRL, &buf, sizeof(buf));
+ } else {
+ err = -EBUSY;
+ }
+
+ return err;
+}
+
+static int idtcm_do_phase_pull_in(struct idtcm_channel *channel,
+ s32 offset_ns,
+ u32 max_ffo_ppb)
+{
+ int err;
+
+ err = idtcm_set_phase_pull_in_offset(channel, -offset_ns);
+
+ if (err)
+ return err;
+
+ err = idtcm_set_phase_pull_in_slope_limit(channel, max_ffo_ppb);
+
+ if (err)
+ return err;
+
+ err = idtcm_start_phase_pull_in(channel);
+
+ return err;
+}
+
+static int _idtcm_adjtime(struct idtcm_channel *channel, s64 delta)
+{
+ int err;
+ struct idtcm *idtcm = channel->idtcm;
+ struct timespec64 ts;
+ s64 now;
+
+ if (abs(delta) < PHASE_PULL_IN_THRESHOLD_NS) {
+ err = idtcm_do_phase_pull_in(channel, delta, 0);
+ } else {
+ idtcm->calculate_overhead_flag = 1;
+
+ err = _idtcm_gettime(channel, &ts);
+
+ if (err)
+ return err;
+
+ now = timespec64_to_ns(&ts);
+ now += delta;
+
+ ts = ns_to_timespec64(now);
+
+ err = _idtcm_settime(channel, &ts, HW_TOD_WR_TRIG_SEL_MSB);
+ }
+
+ return err;
+}
+
+static int idtcm_state_machine_reset(struct idtcm *idtcm)
+{
+ int err;
+ u8 byte = SM_RESET_CMD;
+
+ err = idtcm_write(idtcm, RESET_CTRL, SM_RESET, &byte, sizeof(byte));
+
+ if (!err)
+ msleep_interruptible(POST_SM_RESET_DELAY_MS);
+
+ return err;
+}
+
+static int idtcm_read_hw_rev_id(struct idtcm *idtcm, u8 *hw_rev_id)
+{
+ return idtcm_read(idtcm,
+ GENERAL_STATUS,
+ HW_REV_ID,
+ hw_rev_id,
+ sizeof(u8));
+}
+
+static int idtcm_read_bond_id(struct idtcm *idtcm, u8 *bond_id)
+{
+ return idtcm_read(idtcm,
+ GENERAL_STATUS,
+ BOND_ID,
+ bond_id,
+ sizeof(u8));
+}
+
+static int idtcm_read_hw_csr_id(struct idtcm *idtcm, u16 *hw_csr_id)
+{
+ int err;
+ u8 buf[2] = {0};
+
+ err = idtcm_read(idtcm, GENERAL_STATUS, HW_CSR_ID, buf, sizeof(buf));
+
+ *hw_csr_id = (buf[1] << 8) | buf[0];
+
+ return err;
+}
+
+static int idtcm_read_hw_irq_id(struct idtcm *idtcm, u16 *hw_irq_id)
+{
+ int err;
+ u8 buf[2] = {0};
+
+ err = idtcm_read(idtcm, GENERAL_STATUS, HW_IRQ_ID, buf, sizeof(buf));
+
+ *hw_irq_id = (buf[1] << 8) | buf[0];
+
+ return err;
+}
+
+static int idtcm_read_product_id(struct idtcm *idtcm, u16 *product_id)
+{
+ int err;
+ u8 buf[2] = {0};
+
+ err = idtcm_read(idtcm, GENERAL_STATUS, PRODUCT_ID, buf, sizeof(buf));
+
+ *product_id = (buf[1] << 8) | buf[0];
+
+ return err;
+}
+
+static int idtcm_read_major_release(struct idtcm *idtcm, u8 *major)
+{
+ int err;
+ u8 buf = 0;
+
+ err = idtcm_read(idtcm, GENERAL_STATUS, MAJ_REL, &buf, sizeof(buf));
+
+ *major = buf >> 1;
+
+ return err;
+}
+
+static int idtcm_read_minor_release(struct idtcm *idtcm, u8 *minor)
+{
+ return idtcm_read(idtcm, GENERAL_STATUS, MIN_REL, minor, sizeof(u8));
+}
+
+static int idtcm_read_hotfix_release(struct idtcm *idtcm, u8 *hotfix)
+{
+ return idtcm_read(idtcm,
+ GENERAL_STATUS,
+ HOTFIX_REL,
+ hotfix,
+ sizeof(u8));
+}
+
+static int idtcm_read_pipeline(struct idtcm *idtcm, u32 *pipeline)
+{
+ int err;
+ u8 buf[4] = {0};
+
+ err = idtcm_read(idtcm,
+ GENERAL_STATUS,
+ PIPELINE_ID,
+ &buf[0],
+ sizeof(buf));
+
+ *pipeline = (buf[3] << 24) | (buf[2] << 16) | (buf[1] << 8) | buf[0];
+
+ return err;
+}
+
+static int process_pll_mask(struct idtcm *idtcm, u32 addr, u8 val, u8 *mask)
+{
+ int err = 0;
+
+ if (addr == PLL_MASK_ADDR) {
+ if ((val & 0xf0) || !(val & 0xf)) {
+ dev_err(&idtcm->client->dev,
+ "Invalid PLL mask 0x%hhx\n", val);
+ err = -EINVAL;
+ }
+ *mask = val;
+ }
+
+ return err;
+}
+
+static int set_pll_output_mask(struct idtcm *idtcm, u16 addr, u8 val)
+{
+ int err = 0;
+
+ switch (addr) {
+ case OUTPUT_MASK_PLL0_ADDR:
+ SET_U16_LSB(idtcm->channel[0].output_mask, val);
+ break;
+ case OUTPUT_MASK_PLL0_ADDR + 1:
+ SET_U16_MSB(idtcm->channel[0].output_mask, val);
+ break;
+ case OUTPUT_MASK_PLL1_ADDR:
+ SET_U16_LSB(idtcm->channel[1].output_mask, val);
+ break;
+ case OUTPUT_MASK_PLL1_ADDR + 1:
+ SET_U16_MSB(idtcm->channel[1].output_mask, val);
+ break;
+ case OUTPUT_MASK_PLL2_ADDR:
+ SET_U16_LSB(idtcm->channel[2].output_mask, val);
+ break;
+ case OUTPUT_MASK_PLL2_ADDR + 1:
+ SET_U16_MSB(idtcm->channel[2].output_mask, val);
+ break;
+ case OUTPUT_MASK_PLL3_ADDR:
+ SET_U16_LSB(idtcm->channel[3].output_mask, val);
+ break;
+ case OUTPUT_MASK_PLL3_ADDR + 1:
+ SET_U16_MSB(idtcm->channel[3].output_mask, val);
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ return err;
+}
+
+static int check_and_set_masks(struct idtcm *idtcm,
+ u16 regaddr,
+ u8 val)
+{
+ int err = 0;
+
+ if (set_pll_output_mask(idtcm, regaddr, val)) {
+ /* Not an output mask, check for pll mask */
+ err = process_pll_mask(idtcm, regaddr, val, &idtcm->pll_mask);
+ }
+
+ return err;
+}
+
+static void display_pll_and_output_masks(struct idtcm *idtcm)
+{
+ u8 i;
+ u8 mask;
+
+ dev_dbg(&idtcm->client->dev, "pllmask = 0x%02x\n", idtcm->pll_mask);
+
+ for (i = 0; i < MAX_PHC_PLL; i++) {
+ mask = 1 << i;
+
+ if (mask & idtcm->pll_mask)
+ dev_dbg(&idtcm->client->dev,
+ "PLL%d output_mask = 0x%04x\n",
+ i, idtcm->channel[i].output_mask);
+ }
+}
+
+static int idtcm_load_firmware(struct idtcm *idtcm,
+ struct device *dev)
+{
+ const struct firmware *fw;
+ struct idtcm_fwrc *rec;
+ u32 regaddr;
+ int err;
+ s32 len;
+ u8 val;
+ u8 loaddr;
+
+ dev_dbg(&idtcm->client->dev, "requesting firmware '%s'\n", FW_FILENAME);
+
+ err = request_firmware(&fw, FW_FILENAME, dev);
+
+ if (err)
+ return err;
+
+ dev_dbg(&idtcm->client->dev, "firmware size %zu bytes\n", fw->size);
+
+ rec = (struct idtcm_fwrc *) fw->data;
+
+ if (fw->size > 0)
+ idtcm_state_machine_reset(idtcm);
+
+ for (len = fw->size; len > 0; len -= sizeof(*rec)) {
+
+ if (rec->reserved) {
+ dev_err(&idtcm->client->dev,
+ "bad firmware, reserved field non-zero\n");
+ err = -EINVAL;
+ } else {
+ regaddr = rec->hiaddr << 8;
+ regaddr |= rec->loaddr;
+
+ val = rec->value;
+ loaddr = rec->loaddr;
+
+ rec++;
+
+ err = check_and_set_masks(idtcm, regaddr, val);
+ }
+
+ if (err == 0) {
+ /* Top (status registers) and bottom are read-only */
+ if ((regaddr < GPIO_USER_CONTROL)
+ || (regaddr >= SCRATCH))
+ continue;
+
+ /* Page size 128, last 4 bytes of page skipped */
+ if (((loaddr > 0x7b) && (loaddr <= 0x7f))
+ || ((loaddr > 0xfb) && (loaddr <= 0xff)))
+ continue;
+
+ err = idtcm_write(idtcm, regaddr, 0, &val, sizeof(val));
+ }
+
+ if (err)
+ goto out;
+ }
+
+ display_pll_and_output_masks(idtcm);
+
+out:
+ release_firmware(fw);
+ return err;
+}
+
+static int idtcm_pps_enable(struct idtcm_channel *channel, bool enable)
+{
+ struct idtcm *idtcm = channel->idtcm;
+ u32 module;
+ u8 val;
+ int err;
+
+ /*
+ * This assumes that the 1-PPS is on the second of the two
+ * output. But is this always true?
+ */
+ switch (channel->dpll_n) {
+ case DPLL_0:
+ module = OUTPUT_1;
+ break;
+ case DPLL_1:
+ module = OUTPUT_3;
+ break;
+ case DPLL_2:
+ module = OUTPUT_5;
+ break;
+ case DPLL_3:
+ module = OUTPUT_7;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ err = idtcm_read(idtcm, module, OUT_CTRL_1, &val, sizeof(val));
+
+ if (err)
+ return err;
+
+ if (enable)
+ val |= SQUELCH_DISABLE;
+ else
+ val &= ~SQUELCH_DISABLE;
+
+ err = idtcm_write(idtcm, module, OUT_CTRL_1, &val, sizeof(val));
+
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int idtcm_set_pll_mode(struct idtcm_channel *channel,
+ enum pll_mode pll_mode)
+{
+ struct idtcm *idtcm = channel->idtcm;
+ int err;
+ u8 dpll_mode;
+
+ err = idtcm_read(idtcm, channel->dpll_n, DPLL_MODE,
+ &dpll_mode, sizeof(dpll_mode));
+ if (err)
+ return err;
+
+ dpll_mode &= ~(PLL_MODE_MASK << PLL_MODE_SHIFT);
+
+ dpll_mode |= (pll_mode << PLL_MODE_SHIFT);
+
+ channel->pll_mode = pll_mode;
+
+ err = idtcm_write(idtcm, channel->dpll_n, DPLL_MODE,
+ &dpll_mode, sizeof(dpll_mode));
+ if (err)
+ return err;
+
+ return 0;
+}
+
+/* PTP Hardware Clock interface */
+
+static int idtcm_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+{
+ struct idtcm_channel *channel =
+ container_of(ptp, struct idtcm_channel, caps);
+ struct idtcm *idtcm = channel->idtcm;
+ u8 i;
+ bool neg_adj = 0;
+ int err;
+ u8 buf[6] = {0};
+ s64 fcw;
+
+ if (channel->pll_mode != PLL_MODE_WRITE_FREQUENCY) {
+ err = idtcm_set_pll_mode(channel, PLL_MODE_WRITE_FREQUENCY);
+ if (err)
+ return err;
+ }
+
+ /*
+ * Frequency Control Word unit is: 1.11 * 10^-10 ppm
+ *
+ * adjfreq:
+ * ppb * 10^9
+ * FCW = ----------
+ * 111
+ *
+ * adjfine:
+ * ppm_16 * 5^12
+ * FCW = -------------
+ * 111 * 2^4
+ */
+ if (ppb < 0) {
+ neg_adj = 1;
+ ppb = -ppb;
+ }
+
+ /* 2 ^ -53 = 1.1102230246251565404236316680908e-16 */
+ fcw = ppb * 1000000000000ULL;
+
+ fcw = div_u64(fcw, 111022);
+
+ if (neg_adj)
+ fcw = -fcw;
+
+ for (i = 0; i < 6; i++) {
+ buf[i] = fcw & 0xff;
+ fcw >>= 8;
+ }
+
+ mutex_lock(&idtcm->reg_lock);
+
+ err = idtcm_write(idtcm, channel->dpll_freq, DPLL_WR_FREQ,
+ buf, sizeof(buf));
+
+ mutex_unlock(&idtcm->reg_lock);
+ return err;
+}
+
+static int idtcm_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
+{
+ struct idtcm_channel *channel =
+ container_of(ptp, struct idtcm_channel, caps);
+ struct idtcm *idtcm = channel->idtcm;
+ int err;
+
+ mutex_lock(&idtcm->reg_lock);
+
+ err = _idtcm_gettime(channel, ts);
+
+ mutex_unlock(&idtcm->reg_lock);
+
+ return err;
+}
+
+static int idtcm_settime(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct idtcm_channel *channel =
+ container_of(ptp, struct idtcm_channel, caps);
+ struct idtcm *idtcm = channel->idtcm;
+ int err;
+
+ mutex_lock(&idtcm->reg_lock);
+
+ err = _idtcm_settime(channel, ts, HW_TOD_WR_TRIG_SEL_MSB);
+
+ mutex_unlock(&idtcm->reg_lock);
+
+ return err;
+}
+
+static int idtcm_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct idtcm_channel *channel =
+ container_of(ptp, struct idtcm_channel, caps);
+ struct idtcm *idtcm = channel->idtcm;
+ int err;
+
+ mutex_lock(&idtcm->reg_lock);
+
+ err = _idtcm_adjtime(channel, delta);
+
+ mutex_unlock(&idtcm->reg_lock);
+
+ return err;
+}
+
+static int idtcm_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ struct idtcm_channel *channel =
+ container_of(ptp, struct idtcm_channel, caps);
+
+ switch (rq->type) {
+ case PTP_CLK_REQ_PEROUT:
+ if (!on)
+ return idtcm_pps_enable(channel, false);
+
+ /* Only accept a 1-PPS aligned to the second. */
+ if (rq->perout.start.nsec || rq->perout.period.sec != 1 ||
+ rq->perout.period.nsec)
+ return -ERANGE;
+
+ return idtcm_pps_enable(channel, true);
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int idtcm_enable_tod(struct idtcm_channel *channel)
+{
+ struct idtcm *idtcm = channel->idtcm;
+ struct timespec64 ts = {0, 0};
+ u8 cfg;
+ int err;
+
+ err = idtcm_pps_enable(channel, false);
+ if (err)
+ return err;
+
+ /*
+ * Start the TOD clock ticking.
+ */
+ err = idtcm_read(idtcm, channel->tod_n, TOD_CFG, &cfg, sizeof(cfg));
+ if (err)
+ return err;
+
+ cfg |= TOD_ENABLE;
+
+ err = idtcm_write(idtcm, channel->tod_n, TOD_CFG, &cfg, sizeof(cfg));
+ if (err)
+ return err;
+
+ return _idtcm_settime(channel, &ts, HW_TOD_WR_TRIG_SEL_MSB);
+}
+
+static void idtcm_display_version_info(struct idtcm *idtcm)
+{
+ u8 major;
+ u8 minor;
+ u8 hotfix;
+ u32 pipeline;
+ u16 product_id;
+ u16 csr_id;
+ u16 irq_id;
+ u8 hw_rev_id;
+ u8 bond_id;
+
+ idtcm_read_major_release(idtcm, &major);
+ idtcm_read_minor_release(idtcm, &minor);
+ idtcm_read_hotfix_release(idtcm, &hotfix);
+ idtcm_read_pipeline(idtcm, &pipeline);
+
+ idtcm_read_product_id(idtcm, &product_id);
+ idtcm_read_hw_rev_id(idtcm, &hw_rev_id);
+ idtcm_read_bond_id(idtcm, &bond_id);
+ idtcm_read_hw_csr_id(idtcm, &csr_id);
+ idtcm_read_hw_irq_id(idtcm, &irq_id);
+
+ dev_info(&idtcm->client->dev, "Version: %d.%d.%d, Pipeline %u\t"
+ "0x%04x, Rev %d, Bond %d, CSR %d, IRQ %d\n",
+ major, minor, hotfix, pipeline,
+ product_id, hw_rev_id, bond_id, csr_id, irq_id);
+}
+
+static struct ptp_clock_info idtcm_caps = {
+ .owner = THIS_MODULE,
+ .max_adj = 244000,
+ .n_per_out = 1,
+ .adjfreq = &idtcm_adjfreq,
+ .adjtime = &idtcm_adjtime,
+ .gettime64 = &idtcm_gettime,
+ .settime64 = &idtcm_settime,
+ .enable = &idtcm_enable,
+};
+
+static int idtcm_enable_channel(struct idtcm *idtcm, u32 index)
+{
+ struct idtcm_channel *channel;
+ int err;
+
+ if (!(index < MAX_PHC_PLL))
+ return -EINVAL;
+
+ channel = &idtcm->channel[index];
+
+ switch (index) {
+ case 0:
+ channel->dpll_freq = DPLL_FREQ_0;
+ channel->dpll_n = DPLL_0;
+ channel->tod_read_primary = TOD_READ_PRIMARY_0;
+ channel->tod_write = TOD_WRITE_0;
+ channel->tod_n = TOD_0;
+ channel->hw_dpll_n = HW_DPLL_0;
+ channel->dpll_phase = DPLL_PHASE_0;
+ channel->dpll_ctrl_n = DPLL_CTRL_0;
+ channel->dpll_phase_pull_in = DPLL_PHASE_PULL_IN_0;
+ break;
+ case 1:
+ channel->dpll_freq = DPLL_FREQ_1;
+ channel->dpll_n = DPLL_1;
+ channel->tod_read_primary = TOD_READ_PRIMARY_1;
+ channel->tod_write = TOD_WRITE_1;
+ channel->tod_n = TOD_1;
+ channel->hw_dpll_n = HW_DPLL_1;
+ channel->dpll_phase = DPLL_PHASE_1;
+ channel->dpll_ctrl_n = DPLL_CTRL_1;
+ channel->dpll_phase_pull_in = DPLL_PHASE_PULL_IN_1;
+ break;
+ case 2:
+ channel->dpll_freq = DPLL_FREQ_2;
+ channel->dpll_n = DPLL_2;
+ channel->tod_read_primary = TOD_READ_PRIMARY_2;
+ channel->tod_write = TOD_WRITE_2;
+ channel->tod_n = TOD_2;
+ channel->hw_dpll_n = HW_DPLL_2;
+ channel->dpll_phase = DPLL_PHASE_2;
+ channel->dpll_ctrl_n = DPLL_CTRL_2;
+ channel->dpll_phase_pull_in = DPLL_PHASE_PULL_IN_2;
+ break;
+ case 3:
+ channel->dpll_freq = DPLL_FREQ_3;
+ channel->dpll_n = DPLL_3;
+ channel->tod_read_primary = TOD_READ_PRIMARY_3;
+ channel->tod_write = TOD_WRITE_3;
+ channel->tod_n = TOD_3;
+ channel->hw_dpll_n = HW_DPLL_3;
+ channel->dpll_phase = DPLL_PHASE_3;
+ channel->dpll_ctrl_n = DPLL_CTRL_3;
+ channel->dpll_phase_pull_in = DPLL_PHASE_PULL_IN_3;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ channel->idtcm = idtcm;
+
+ channel->caps = idtcm_caps;
+ snprintf(channel->caps.name, sizeof(channel->caps.name),
+ "IDT CM PLL%u", index);
+
+ err = idtcm_set_pll_mode(channel, PLL_MODE_WRITE_FREQUENCY);
+ if (err)
+ return err;
+
+ err = idtcm_enable_tod(channel);
+ if (err)
+ return err;
+
+ channel->ptp_clock = ptp_clock_register(&channel->caps, NULL);
+
+ if (IS_ERR(channel->ptp_clock)) {
+ err = PTR_ERR(channel->ptp_clock);
+ channel->ptp_clock = NULL;
+ return err;
+ }
+
+ if (!channel->ptp_clock)
+ return -ENOTSUPP;
+
+ dev_info(&idtcm->client->dev, "PLL%d registered as ptp%d\n",
+ index, channel->ptp_clock->index);
+
+ return 0;
+}
+
+static void ptp_clock_unregister_all(struct idtcm *idtcm)
+{
+ u8 i;
+ struct idtcm_channel *channel;
+
+ for (i = 0; i < MAX_PHC_PLL; i++) {
+
+ channel = &idtcm->channel[i];
+
+ if (channel->ptp_clock)
+ ptp_clock_unregister(channel->ptp_clock);
+ }
+}
+
+static void set_default_masks(struct idtcm *idtcm)
+{
+ idtcm->pll_mask = DEFAULT_PLL_MASK;
+
+ idtcm->channel[0].output_mask = DEFAULT_OUTPUT_MASK_PLL0;
+ idtcm->channel[1].output_mask = DEFAULT_OUTPUT_MASK_PLL1;
+ idtcm->channel[2].output_mask = DEFAULT_OUTPUT_MASK_PLL2;
+ idtcm->channel[3].output_mask = DEFAULT_OUTPUT_MASK_PLL3;
+}
+
+static int set_tod_write_overhead(struct idtcm *idtcm)
+{
+ int err;
+ u8 i;
+
+ s64 total_ns = 0;
+
+ ktime_t start;
+ ktime_t stop;
+
+ char buf[TOD_BYTE_COUNT];
+
+ struct idtcm_channel *channel = &idtcm->channel[2];
+
+ /* Set page offset */
+ idtcm_write(idtcm, channel->hw_dpll_n, HW_DPLL_TOD_OVR__0,
+ buf, sizeof(buf));
+
+ for (i = 0; i < TOD_WRITE_OVERHEAD_COUNT_MAX; i++) {
+
+ start = ktime_get_raw();
+
+ err = idtcm_write(idtcm, channel->hw_dpll_n,
+ HW_DPLL_TOD_OVR__0, buf, sizeof(buf));
+
+ if (err)
+ return err;
+
+ stop = ktime_get_raw();
+
+ total_ns += ktime_to_ns(stop - start);
+ }
+
+ idtcm->tod_write_overhead_ns = div_s64(total_ns,
+ TOD_WRITE_OVERHEAD_COUNT_MAX);
+
+ return err;
+}
+
+static int idtcm_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct idtcm *idtcm;
+ int err;
+ u8 i;
+
+ /* Unused for now */
+ (void)id;
+
+ idtcm = devm_kzalloc(&client->dev, sizeof(struct idtcm), GFP_KERNEL);
+
+ if (!idtcm)
+ return -ENOMEM;
+
+ idtcm->client = client;
+ idtcm->page_offset = 0xff;
+ idtcm->calculate_overhead_flag = 0;
+
+ set_default_masks(idtcm);
+
+ mutex_init(&idtcm->reg_lock);
+ mutex_lock(&idtcm->reg_lock);
+
+ idtcm_display_version_info(idtcm);
+
+ err = set_tod_write_overhead(idtcm);
+
+ if (err)
+ return err;
+
+ err = idtcm_load_firmware(idtcm, &client->dev);
+
+ if (err)
+ dev_warn(&idtcm->client->dev,
+ "loading firmware failed with %d\n", err);
+
+ if (idtcm->pll_mask) {
+ for (i = 0; i < MAX_PHC_PLL; i++) {
+ if (idtcm->pll_mask & (1 << i)) {
+ err = idtcm_enable_channel(idtcm, i);
+ if (err)
+ break;
+ }
+ }
+ } else {
+ dev_err(&idtcm->client->dev,
+ "no PLLs flagged as PHCs, nothing to do\n");
+ err = -ENODEV;
+ }
+
+ mutex_unlock(&idtcm->reg_lock);
+
+ if (err) {
+ ptp_clock_unregister_all(idtcm);
+ return err;
+ }
+
+ i2c_set_clientdata(client, idtcm);
+
+ return 0;
+}
+
+static int idtcm_remove(struct i2c_client *client)
+{
+ struct idtcm *idtcm = i2c_get_clientdata(client);
+
+ ptp_clock_unregister_all(idtcm);
+
+ mutex_destroy(&idtcm->reg_lock);
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id idtcm_dt_id[] = {
+ { .compatible = "idt,8a34000" },
+ { .compatible = "idt,8a34001" },
+ { .compatible = "idt,8a34002" },
+ { .compatible = "idt,8a34003" },
+ { .compatible = "idt,8a34004" },
+ { .compatible = "idt,8a34005" },
+ { .compatible = "idt,8a34006" },
+ { .compatible = "idt,8a34007" },
+ { .compatible = "idt,8a34008" },
+ { .compatible = "idt,8a34009" },
+ { .compatible = "idt,8a34010" },
+ { .compatible = "idt,8a34011" },
+ { .compatible = "idt,8a34012" },
+ { .compatible = "idt,8a34013" },
+ { .compatible = "idt,8a34014" },
+ { .compatible = "idt,8a34015" },
+ { .compatible = "idt,8a34016" },
+ { .compatible = "idt,8a34017" },
+ { .compatible = "idt,8a34018" },
+ { .compatible = "idt,8a34019" },
+ { .compatible = "idt,8a34040" },
+ { .compatible = "idt,8a34041" },
+ { .compatible = "idt,8a34042" },
+ { .compatible = "idt,8a34043" },
+ { .compatible = "idt,8a34044" },
+ { .compatible = "idt,8a34045" },
+ { .compatible = "idt,8a34046" },
+ { .compatible = "idt,8a34047" },
+ { .compatible = "idt,8a34048" },
+ { .compatible = "idt,8a34049" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, idtcm_dt_id);
+#endif
+
+static const struct i2c_device_id idtcm_i2c_id[] = {
+ { "8a34000" },
+ { "8a34001" },
+ { "8a34002" },
+ { "8a34003" },
+ { "8a34004" },
+ { "8a34005" },
+ { "8a34006" },
+ { "8a34007" },
+ { "8a34008" },
+ { "8a34009" },
+ { "8a34010" },
+ { "8a34011" },
+ { "8a34012" },
+ { "8a34013" },
+ { "8a34014" },
+ { "8a34015" },
+ { "8a34016" },
+ { "8a34017" },
+ { "8a34018" },
+ { "8a34019" },
+ { "8a34040" },
+ { "8a34041" },
+ { "8a34042" },
+ { "8a34043" },
+ { "8a34044" },
+ { "8a34045" },
+ { "8a34046" },
+ { "8a34047" },
+ { "8a34048" },
+ { "8a34049" },
+ {},
+};
+MODULE_DEVICE_TABLE(i2c, idtcm_i2c_id);
+
+static struct i2c_driver idtcm_driver = {
+ .driver = {
+ .of_match_table = of_match_ptr(idtcm_dt_id),
+ .name = "idtcm",
+ },
+ .probe = idtcm_probe,
+ .remove = idtcm_remove,
+ .id_table = idtcm_i2c_id,
+};
+
+module_i2c_driver(idtcm_driver);
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * PTP hardware clock driver for the IDT ClockMatrix(TM) family of timing and
+ * synchronization devices.
+ *
+ * Copyright (C) 2019 Integrated Device Technology, Inc., a Renesas Company.
+ */
+#ifndef PTP_IDTCLOCKMATRIX_H
+#define PTP_IDTCLOCKMATRIX_H
+
+#include <linux/ktime.h>
+
+#include "idt8a340_reg.h"
+
+#define FW_FILENAME "idtcm.bin"
+#define MAX_PHC_PLL 4
+
+#define PLL_MASK_ADDR (0xFFA5)
+#define DEFAULT_PLL_MASK (0x04)
+
+#define SET_U16_LSB(orig, val8) (orig = (0xff00 & (orig)) | (val8))
+#define SET_U16_MSB(orig, val8) (orig = (0x00ff & (orig)) | (val8 << 8))
+
+#define OUTPUT_MASK_PLL0_ADDR (0xFFB0)
+#define OUTPUT_MASK_PLL1_ADDR (0xFFB2)
+#define OUTPUT_MASK_PLL2_ADDR (0xFFB4)
+#define OUTPUT_MASK_PLL3_ADDR (0xFFB6)
+
+#define DEFAULT_OUTPUT_MASK_PLL0 (0x003)
+#define DEFAULT_OUTPUT_MASK_PLL1 (0x00c)
+#define DEFAULT_OUTPUT_MASK_PLL2 (0x030)
+#define DEFAULT_OUTPUT_MASK_PLL3 (0x0c0)
+
+#define POST_SM_RESET_DELAY_MS (3000)
+#define PHASE_PULL_IN_THRESHOLD_NS (150000)
+#define TOD_WRITE_OVERHEAD_COUNT_MAX (5)
+#define TOD_BYTE_COUNT (11)
+
+/* Values of DPLL_N.DPLL_MODE.PLL_MODE */
+enum pll_mode {
+ PLL_MODE_MIN = 0,
+ PLL_MODE_NORMAL = PLL_MODE_MIN,
+ PLL_MODE_WRITE_PHASE = 1,
+ PLL_MODE_WRITE_FREQUENCY = 2,
+ PLL_MODE_GPIO_INC_DEC = 3,
+ PLL_MODE_SYNTHESIS = 4,
+ PLL_MODE_PHASE_MEASUREMENT = 5,
+ PLL_MODE_MAX = PLL_MODE_PHASE_MEASUREMENT,
+};
+
+enum hw_tod_write_trig_sel {
+ HW_TOD_WR_TRIG_SEL_MIN = 0,
+ HW_TOD_WR_TRIG_SEL_MSB = HW_TOD_WR_TRIG_SEL_MIN,
+ HW_TOD_WR_TRIG_SEL_RESERVED = 1,
+ HW_TOD_WR_TRIG_SEL_TOD_PPS = 2,
+ HW_TOD_WR_TRIG_SEL_IRIGB_PPS = 3,
+ HW_TOD_WR_TRIG_SEL_PWM_PPS = 4,
+ HW_TOD_WR_TRIG_SEL_GPIO = 5,
+ HW_TOD_WR_TRIG_SEL_FOD_SYNC = 6,
+ WR_TRIG_SEL_MAX = HW_TOD_WR_TRIG_SEL_FOD_SYNC,
+};
+
+struct idtcm;
+
+struct idtcm_channel {
+ struct ptp_clock_info caps;
+ struct ptp_clock *ptp_clock;
+ struct idtcm *idtcm;
+ u16 dpll_phase;
+ u16 dpll_freq;
+ u16 dpll_n;
+ u16 dpll_ctrl_n;
+ u16 dpll_phase_pull_in;
+ u16 tod_read_primary;
+ u16 tod_write;
+ u16 tod_n;
+ u16 hw_dpll_n;
+ enum pll_mode pll_mode;
+ u16 output_mask;
+};
+
+struct idtcm {
+ struct idtcm_channel channel[MAX_PHC_PLL];
+ struct i2c_client *client;
+ u8 page_offset;
+ u8 pll_mask;
+
+ /* Overhead calculation for adjtime */
+ u8 calculate_overhead_flag;
+ s64 tod_write_overhead_ns;
+ ktime_t start_time;
+
+ /* Protects I2C read/modify/write registers from concurrent access */
+ struct mutex reg_lock;
+};
+
+struct idtcm_fwrc {
+ u8 hiaddr;
+ u8 loaddr;
+ u8 value;
+ u8 reserved;
+} __packed;
+
+#endif /* PTP_IDTCLOCKMATRIX_H */
init_data = regulator_of_get_init_data(dev, regulator_desc, config,
&rdev->dev.of_node);
+
+ /*
+ * Sometimes not all resources are probed already so we need to take
+ * that into account. This happens most the time if the ena_gpiod comes
+ * from a gpio extender or something else.
+ */
+ if (PTR_ERR(init_data) == -EPROBE_DEFER) {
+ kfree(config);
+ kfree(rdev);
+ ret = -EPROBE_DEFER;
+ goto rinse;
+ }
+
/*
* We need to keep track of any GPIO descriptor coming from the
* device tree until we have handled it over to the core. If the
static unsigned da9062_buck_get_mode(struct regulator_dev *rdev)
{
struct da9062_regulator *regl = rdev_get_drvdata(rdev);
- struct regmap_field *field;
unsigned int val, mode = 0;
int ret;
return REGULATOR_MODE_NORMAL;
}
- /* Detect current regulator state */
- ret = regmap_field_read(regl->suspend, &val);
- if (ret < 0)
- return 0;
-
- /* Read regulator mode from proper register, depending on state */
- if (val)
- field = regl->suspend_sleep;
- else
- field = regl->sleep;
-
- ret = regmap_field_read(field, &val);
+ ret = regmap_field_read(regl->sleep, &val);
if (ret < 0)
return 0;
static unsigned da9062_ldo_get_mode(struct regulator_dev *rdev)
{
struct da9062_regulator *regl = rdev_get_drvdata(rdev);
- struct regmap_field *field;
int ret, val;
- /* Detect current regulator state */
- ret = regmap_field_read(regl->suspend, &val);
- if (ret < 0)
- return 0;
-
- /* Read regulator mode from proper register, depending on state */
- if (val)
- field = regl->suspend_sleep;
- else
- field = regl->sleep;
-
- ret = regmap_field_read(field, &val);
+ ret = regmap_field_read(regl->sleep, &val);
if (ret < 0)
return 0;
__builtin_ffs((int)DA9062AA_BUCK1_MODE_MASK) - 1,
sizeof(unsigned int) * 8 -
__builtin_clz((DA9062AA_BUCK1_MODE_MASK)) - 1),
- .suspend = REG_FIELD(DA9062AA_DVC_1,
- __builtin_ffs((int)DA9062AA_VBUCK1_SEL_MASK) - 1,
+ .suspend = REG_FIELD(DA9062AA_BUCK1_CONT,
+ __builtin_ffs((int)DA9062AA_BUCK1_CONF_MASK) - 1,
sizeof(unsigned int) * 8 -
- __builtin_clz((DA9062AA_VBUCK1_SEL_MASK)) - 1),
+ __builtin_clz(DA9062AA_BUCK1_CONF_MASK) - 1),
},
{
.desc.id = DA9061_ID_BUCK2,
__builtin_ffs((int)DA9062AA_BUCK3_MODE_MASK) - 1,
sizeof(unsigned int) * 8 -
__builtin_clz((DA9062AA_BUCK3_MODE_MASK)) - 1),
- .suspend = REG_FIELD(DA9062AA_DVC_1,
- __builtin_ffs((int)DA9062AA_VBUCK3_SEL_MASK) - 1,
+ .suspend = REG_FIELD(DA9062AA_BUCK3_CONT,
+ __builtin_ffs((int)DA9062AA_BUCK3_CONF_MASK) - 1,
sizeof(unsigned int) * 8 -
- __builtin_clz((DA9062AA_VBUCK3_SEL_MASK)) - 1),
+ __builtin_clz(DA9062AA_BUCK3_CONF_MASK) - 1),
},
{
.desc.id = DA9061_ID_BUCK3,
__builtin_ffs((int)DA9062AA_BUCK4_MODE_MASK) - 1,
sizeof(unsigned int) * 8 -
__builtin_clz((DA9062AA_BUCK4_MODE_MASK)) - 1),
- .suspend = REG_FIELD(DA9062AA_DVC_1,
- __builtin_ffs((int)DA9062AA_VBUCK4_SEL_MASK) - 1,
+ .suspend = REG_FIELD(DA9062AA_BUCK4_CONT,
+ __builtin_ffs((int)DA9062AA_BUCK4_CONF_MASK) - 1,
sizeof(unsigned int) * 8 -
- __builtin_clz((DA9062AA_VBUCK4_SEL_MASK)) - 1),
+ __builtin_clz(DA9062AA_BUCK4_CONF_MASK) - 1),
},
{
.desc.id = DA9061_ID_LDO1,
sizeof(unsigned int) * 8 -
__builtin_clz((DA9062AA_LDO1_SL_B_MASK)) - 1),
.suspend_vsel_reg = DA9062AA_VLDO1_B,
- .suspend = REG_FIELD(DA9062AA_DVC_1,
- __builtin_ffs((int)DA9062AA_VLDO1_SEL_MASK) - 1,
+ .suspend = REG_FIELD(DA9062AA_LDO1_CONT,
+ __builtin_ffs((int)DA9062AA_LDO1_CONF_MASK) - 1,
sizeof(unsigned int) * 8 -
- __builtin_clz((DA9062AA_VLDO1_SEL_MASK)) - 1),
+ __builtin_clz(DA9062AA_LDO1_CONF_MASK) - 1),
.oc_event = REG_FIELD(DA9062AA_STATUS_D,
__builtin_ffs((int)DA9062AA_LDO1_ILIM_MASK) - 1,
sizeof(unsigned int) * 8 -
sizeof(unsigned int) * 8 -
__builtin_clz((DA9062AA_LDO2_SL_B_MASK)) - 1),
.suspend_vsel_reg = DA9062AA_VLDO2_B,
- .suspend = REG_FIELD(DA9062AA_DVC_1,
- __builtin_ffs((int)DA9062AA_VLDO2_SEL_MASK) - 1,
+ .suspend = REG_FIELD(DA9062AA_LDO2_CONT,
+ __builtin_ffs((int)DA9062AA_LDO2_CONF_MASK) - 1,
sizeof(unsigned int) * 8 -
- __builtin_clz((DA9062AA_VLDO2_SEL_MASK)) - 1),
+ __builtin_clz(DA9062AA_LDO2_CONF_MASK) - 1),
.oc_event = REG_FIELD(DA9062AA_STATUS_D,
__builtin_ffs((int)DA9062AA_LDO2_ILIM_MASK) - 1,
sizeof(unsigned int) * 8 -
sizeof(unsigned int) * 8 -
__builtin_clz((DA9062AA_LDO3_SL_B_MASK)) - 1),
.suspend_vsel_reg = DA9062AA_VLDO3_B,
- .suspend = REG_FIELD(DA9062AA_DVC_1,
- __builtin_ffs((int)DA9062AA_VLDO3_SEL_MASK) - 1,
+ .suspend = REG_FIELD(DA9062AA_LDO3_CONT,
+ __builtin_ffs((int)DA9062AA_LDO3_CONF_MASK) - 1,
sizeof(unsigned int) * 8 -
- __builtin_clz((DA9062AA_VLDO3_SEL_MASK)) - 1),
+ __builtin_clz(DA9062AA_LDO3_CONF_MASK) - 1),
.oc_event = REG_FIELD(DA9062AA_STATUS_D,
__builtin_ffs((int)DA9062AA_LDO3_ILIM_MASK) - 1,
sizeof(unsigned int) * 8 -
sizeof(unsigned int) * 8 -
__builtin_clz((DA9062AA_LDO4_SL_B_MASK)) - 1),
.suspend_vsel_reg = DA9062AA_VLDO4_B,
- .suspend = REG_FIELD(DA9062AA_DVC_1,
- __builtin_ffs((int)DA9062AA_VLDO4_SEL_MASK) - 1,
+ .suspend = REG_FIELD(DA9062AA_LDO4_CONT,
+ __builtin_ffs((int)DA9062AA_LDO4_CONF_MASK) - 1,
sizeof(unsigned int) * 8 -
- __builtin_clz((DA9062AA_VLDO4_SEL_MASK)) - 1),
+ __builtin_clz(DA9062AA_LDO4_CONF_MASK) - 1),
.oc_event = REG_FIELD(DA9062AA_STATUS_D,
__builtin_ffs((int)DA9062AA_LDO4_ILIM_MASK) - 1,
sizeof(unsigned int) * 8 -
__builtin_ffs((int)DA9062AA_BUCK1_MODE_MASK) - 1,
sizeof(unsigned int) * 8 -
__builtin_clz((DA9062AA_BUCK1_MODE_MASK)) - 1),
- .suspend = REG_FIELD(DA9062AA_DVC_1,
- __builtin_ffs((int)DA9062AA_VBUCK1_SEL_MASK) - 1,
+ .suspend = REG_FIELD(DA9062AA_BUCK1_CONT,
+ __builtin_ffs((int)DA9062AA_BUCK1_CONF_MASK) - 1,
sizeof(unsigned int) * 8 -
- __builtin_clz((DA9062AA_VBUCK1_SEL_MASK)) - 1),
+ __builtin_clz(DA9062AA_BUCK1_CONF_MASK) - 1),
},
{
.desc.id = DA9062_ID_BUCK2,
__builtin_ffs((int)DA9062AA_BUCK2_MODE_MASK) - 1,
sizeof(unsigned int) * 8 -
__builtin_clz((DA9062AA_BUCK2_MODE_MASK)) - 1),
- .suspend = REG_FIELD(DA9062AA_DVC_1,
- __builtin_ffs((int)DA9062AA_VBUCK2_SEL_MASK) - 1,
+ .suspend = REG_FIELD(DA9062AA_BUCK2_CONT,
+ __builtin_ffs((int)DA9062AA_BUCK2_CONF_MASK) - 1,
sizeof(unsigned int) * 8 -
- __builtin_clz((DA9062AA_VBUCK2_SEL_MASK)) - 1),
+ __builtin_clz(DA9062AA_BUCK2_CONF_MASK) - 1),
},
{
.desc.id = DA9062_ID_BUCK3,
__builtin_ffs((int)DA9062AA_BUCK3_MODE_MASK) - 1,
sizeof(unsigned int) * 8 -
__builtin_clz((DA9062AA_BUCK3_MODE_MASK)) - 1),
- .suspend = REG_FIELD(DA9062AA_DVC_1,
- __builtin_ffs((int)DA9062AA_VBUCK3_SEL_MASK) - 1,
+ .suspend = REG_FIELD(DA9062AA_BUCK3_CONT,
+ __builtin_ffs((int)DA9062AA_BUCK3_CONF_MASK) - 1,
sizeof(unsigned int) * 8 -
- __builtin_clz((DA9062AA_VBUCK3_SEL_MASK)) - 1),
+ __builtin_clz(DA9062AA_BUCK3_CONF_MASK) - 1),
},
{
.desc.id = DA9062_ID_BUCK4,
__builtin_ffs((int)DA9062AA_BUCK4_MODE_MASK) - 1,
sizeof(unsigned int) * 8 -
__builtin_clz((DA9062AA_BUCK4_MODE_MASK)) - 1),
- .suspend = REG_FIELD(DA9062AA_DVC_1,
- __builtin_ffs((int)DA9062AA_VBUCK4_SEL_MASK) - 1,
+ .suspend = REG_FIELD(DA9062AA_BUCK4_CONT,
+ __builtin_ffs((int)DA9062AA_BUCK4_CONF_MASK) - 1,
sizeof(unsigned int) * 8 -
- __builtin_clz((DA9062AA_VBUCK4_SEL_MASK)) - 1),
+ __builtin_clz(DA9062AA_BUCK4_CONF_MASK) - 1),
},
{
.desc.id = DA9062_ID_LDO1,
sizeof(unsigned int) * 8 -
__builtin_clz((DA9062AA_LDO1_SL_B_MASK)) - 1),
.suspend_vsel_reg = DA9062AA_VLDO1_B,
- .suspend = REG_FIELD(DA9062AA_DVC_1,
- __builtin_ffs((int)DA9062AA_VLDO1_SEL_MASK) - 1,
+ .suspend = REG_FIELD(DA9062AA_LDO1_CONT,
+ __builtin_ffs((int)DA9062AA_LDO1_CONF_MASK) - 1,
sizeof(unsigned int) * 8 -
- __builtin_clz((DA9062AA_VLDO1_SEL_MASK)) - 1),
+ __builtin_clz(DA9062AA_LDO1_CONF_MASK) - 1),
.oc_event = REG_FIELD(DA9062AA_STATUS_D,
__builtin_ffs((int)DA9062AA_LDO1_ILIM_MASK) - 1,
sizeof(unsigned int) * 8 -
sizeof(unsigned int) * 8 -
__builtin_clz((DA9062AA_LDO2_SL_B_MASK)) - 1),
.suspend_vsel_reg = DA9062AA_VLDO2_B,
- .suspend = REG_FIELD(DA9062AA_DVC_1,
- __builtin_ffs((int)DA9062AA_VLDO2_SEL_MASK) - 1,
+ .suspend = REG_FIELD(DA9062AA_LDO2_CONT,
+ __builtin_ffs((int)DA9062AA_LDO2_CONF_MASK) - 1,
sizeof(unsigned int) * 8 -
- __builtin_clz((DA9062AA_VLDO2_SEL_MASK)) - 1),
+ __builtin_clz(DA9062AA_LDO2_CONF_MASK) - 1),
.oc_event = REG_FIELD(DA9062AA_STATUS_D,
__builtin_ffs((int)DA9062AA_LDO2_ILIM_MASK) - 1,
sizeof(unsigned int) * 8 -
sizeof(unsigned int) * 8 -
__builtin_clz((DA9062AA_LDO3_SL_B_MASK)) - 1),
.suspend_vsel_reg = DA9062AA_VLDO3_B,
- .suspend = REG_FIELD(DA9062AA_DVC_1,
- __builtin_ffs((int)DA9062AA_VLDO3_SEL_MASK) - 1,
+ .suspend = REG_FIELD(DA9062AA_LDO3_CONT,
+ __builtin_ffs((int)DA9062AA_LDO3_CONF_MASK) - 1,
sizeof(unsigned int) * 8 -
- __builtin_clz((DA9062AA_VLDO3_SEL_MASK)) - 1),
+ __builtin_clz(DA9062AA_LDO3_CONF_MASK) - 1),
.oc_event = REG_FIELD(DA9062AA_STATUS_D,
__builtin_ffs((int)DA9062AA_LDO3_ILIM_MASK) - 1,
sizeof(unsigned int) * 8 -
sizeof(unsigned int) * 8 -
__builtin_clz((DA9062AA_LDO4_SL_B_MASK)) - 1),
.suspend_vsel_reg = DA9062AA_VLDO4_B,
- .suspend = REG_FIELD(DA9062AA_DVC_1,
- __builtin_ffs((int)DA9062AA_VLDO4_SEL_MASK) - 1,
+ .suspend = REG_FIELD(DA9062AA_LDO4_CONT,
+ __builtin_ffs((int)DA9062AA_LDO4_CONF_MASK) - 1,
sizeof(unsigned int) * 8 -
- __builtin_clz((DA9062AA_VLDO4_SEL_MASK)) - 1),
+ __builtin_clz(DA9062AA_LDO4_CONF_MASK) - 1),
.oc_event = REG_FIELD(DA9062AA_STATUS_D,
__builtin_ffs((int)DA9062AA_LDO4_ILIM_MASK) - 1,
sizeof(unsigned int) * 8 -
struct device *dev = &pdev->dev;
struct fixed_voltage_config *config;
struct fixed_voltage_data *drvdata;
- const struct fixed_dev_type *drvtype =
- of_match_device(dev->driver->of_match_table, dev)->data;
+ const struct fixed_dev_type *drvtype = of_device_get_match_data(dev);
struct regulator_config cfg = { };
enum gpiod_flags gflags;
int ret;
drvdata->desc.type = REGULATOR_VOLTAGE;
drvdata->desc.owner = THIS_MODULE;
- if (drvtype->has_enable_clock) {
+ if (drvtype && drvtype->has_enable_clock) {
drvdata->desc.ops = &fixed_voltage_clkenabled_ops;
drvdata->enable_clock = devm_clk_get(dev, NULL);
.enable_time = 3000,
.ramp_delay = 1000,
+ .off_on_delay = 15000,
.owner = THIS_MODULE,
},
"regulator-off-in-suspend"))
suspend_state->enabled = DISABLE_IN_SUSPEND;
- if (!of_property_read_u32(np, "regulator-suspend-min-microvolt",
- &pval))
+ if (!of_property_read_u32(suspend_np,
+ "regulator-suspend-min-microvolt", &pval))
suspend_state->min_uV = pval;
- if (!of_property_read_u32(np, "regulator-suspend-max-microvolt",
- &pval))
+ if (!of_property_read_u32(suspend_np,
+ "regulator-suspend-max-microvolt", &pval))
suspend_state->max_uV = pval;
if (!of_property_read_u32(suspend_np,
goto error;
}
- if (desc->of_parse_cb && desc->of_parse_cb(child, desc, config)) {
- dev_err(dev,
- "driver callback failed to parse DT for regulator %pOFn\n",
- child);
- goto error;
+ if (desc->of_parse_cb) {
+ int ret;
+
+ ret = desc->of_parse_cb(child, desc, config);
+ if (ret) {
+ if (ret == -EPROBE_DEFER) {
+ of_node_put(child);
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+ dev_err(dev,
+ "driver callback failed to parse DT for regulator %pOFn\n",
+ child);
+ goto error;
+ }
}
*node = child;
/* SW2~SW4 high bit check and modify the voltage value table */
if (i >= sw_check_start && i <= sw_check_end) {
- regmap_read(pfuze_chip->regmap, desc->vsel_reg, &val);
+ ret = regmap_read(pfuze_chip->regmap,
+ desc->vsel_reg, &val);
+ if (ret) {
+ dev_err(&client->dev, "Fails to read from the register.\n");
+ return ret;
+ }
+
if (val & sw_hi) {
if (pfuze_chip->chip_id == PFUZE3000 ||
pfuze_chip->chip_id == PFUZE3001) {
static const struct rpmh_vreg_hw_data pmic5_bob = {
.regulator_type = VRM,
.ops = &rpmh_regulator_vrm_bypass_ops,
- .voltage_range = REGULATOR_LINEAR_RANGE(300000, 0, 135, 32000),
- .n_voltages = 136,
+ .voltage_range = REGULATOR_LINEAR_RANGE(3000000, 0, 31, 32000),
+ .n_voltages = 32,
.pmic_mode_map = pmic_mode_map_pmic5_bob,
.of_map_mode = rpmh_regulator_pmic4_bob_of_map_mode,
};
while (timeout++ <= abb->settling_time) {
status = ti_abb_check_txdone(abb);
if (status)
- break;
+ return 0;
udelay(1);
}
- if (timeout > abb->settling_time) {
- dev_warn_ratelimited(dev,
- "%s:TRANXDONE timeout(%duS) int=0x%08x\n",
- __func__, timeout, readl(abb->int_base));
- return -ETIMEDOUT;
- }
-
- return 0;
+ dev_warn_ratelimited(dev, "%s:TRANXDONE timeout(%duS) int=0x%08x\n",
+ __func__, timeout, readl(abb->int_base));
+ return -ETIMEDOUT;
}
/**
status = ti_abb_check_txdone(abb);
if (!status)
- break;
+ return 0;
udelay(1);
}
- if (timeout > abb->settling_time) {
- dev_warn_ratelimited(dev,
- "%s:TRANXDONE timeout(%duS) int=0x%08x\n",
- __func__, timeout, readl(abb->int_base));
- return -ETIMEDOUT;
- }
-
- return 0;
+ dev_warn_ratelimited(dev, "%s:TRANXDONE timeout(%duS) int=0x%08x\n",
+ __func__, timeout, readl(abb->int_base));
+ return -ETIMEDOUT;
}
/**
if (filp->f_inode->i_cdev == &zcrypt_cdev) {
struct zcdn_device *zcdndev;
- if (mutex_lock_interruptible(&ap_perms_mutex))
- return -ERESTARTSYS;
+ mutex_lock(&ap_perms_mutex);
zcdndev = find_zcdndev_by_devt(filp->f_inode->i_rdev);
mutex_unlock(&ap_perms_mutex);
if (zcdndev) {
config 53C700_LE_ON_BE
bool
- depends on SCSI_LASI700
+ depends on SCSI_LASI700 || SCSI_SNI_53C710
default y
config SCSI_STEX
scsi_changer *ch = file->private_data;
scsi_device_put(ch->device);
- ch->device = NULL;
file->private_data = NULL;
kref_put(&ch->ref, ch_destroy);
return 0;
unsigned int tpg_desc_tbl_off;
unsigned char orig_transition_tmo;
unsigned long flags;
+ bool transitioning_sense = false;
if (!pg->expiry) {
unsigned long transition_tmo = ALUA_FAILOVER_TIMEOUT * HZ;
goto retry;
}
/*
- * Retry on ALUA state transition or if any
- * UNIT ATTENTION occurred.
+ * If the array returns with 'ALUA state transition'
+ * sense code here it cannot return RTPG data during
+ * transition. So set the state to 'transitioning' directly.
*/
if (sense_hdr.sense_key == NOT_READY &&
- sense_hdr.asc == 0x04 && sense_hdr.ascq == 0x0a)
- err = SCSI_DH_RETRY;
- else if (sense_hdr.sense_key == UNIT_ATTENTION)
+ sense_hdr.asc == 0x04 && sense_hdr.ascq == 0x0a) {
+ transitioning_sense = true;
+ goto skip_rtpg;
+ }
+ /*
+ * Retry on any other UNIT ATTENTION occurred.
+ */
+ if (sense_hdr.sense_key == UNIT_ATTENTION)
err = SCSI_DH_RETRY;
if (err == SCSI_DH_RETRY &&
pg->expiry != 0 && time_before(jiffies, pg->expiry)) {
off = 8 + (desc[7] * 4);
}
+ skip_rtpg:
spin_lock_irqsave(&pg->lock, flags);
+ if (transitioning_sense)
+ pg->state = SCSI_ACCESS_STATE_TRANSITIONING;
+
sdev_printk(KERN_INFO, sdev,
"%s: port group %02x state %c %s supports %c%c%c%c%c%c%c\n",
ALUA_DH_NAME, pg->group_id, print_alua_state(pg->state),
return SCSI_MLQUEUE_HOST_BUSY;
}
+ c->device = dev;
+
enqueue_cmd_and_start_io(h, c);
/* the cmd'll come back via intr handler in complete_scsi_command() */
return 0;
hpsa_cmd_init(h, c->cmdindex, c);
c->cmd_type = CMD_SCSI;
c->scsi_cmd = cmd;
+ c->device = dev;
rc = hpsa_scsi_ioaccel_raid_map(h, c);
if (rc < 0) /* scsi_dma_map failed. */
rc = SCSI_MLQUEUE_HOST_BUSY;
hpsa_cmd_init(h, c->cmdindex, c);
c->cmd_type = CMD_SCSI;
c->scsi_cmd = cmd;
+ c->device = dev;
rc = hpsa_scsi_ioaccel_direct_map(h, c);
if (rc < 0) /* scsi_dma_map failed. */
rc = SCSI_MLQUEUE_HOST_BUSY;
}
}
-#if defined(BUILD_NVME)
/* Clear NVME stats */
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
sizeof(phba->sli4_hba.hdwq[idx].nvme_cstat));
}
}
-#endif
/* Clear SCSI stats */
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
list_del_init(&psb->list);
psb->exch_busy = 0;
psb->status = IOSTAT_SUCCESS;
-#ifdef BUILD_NVME
if (psb->cur_iocbq.iocb_flag == LPFC_IO_NVME) {
qp->abts_nvme_io_bufs--;
spin_unlock(&qp->abts_io_buf_list_lock);
lpfc_sli4_nvme_xri_aborted(phba, axri, psb);
return;
}
-#endif
qp->abts_scsi_io_bufs--;
spin_unlock(&qp->abts_io_buf_list_lock);
req->req_q_in, req->req_q_out, rsp->rsp_q_in, rsp->rsp_q_out);
ha->wq = alloc_workqueue("qla2xxx_wq", 0, 0);
+ if (unlikely(!ha->wq)) {
+ ret = -ENOMEM;
+ goto probe_failed;
+ }
if (ha->isp_ops->initialize_adapter(base_vha)) {
ql_log(ql_log_fatal, base_vha, 0x00d6,
const char *buf, size_t count)
{
struct kernfs_node *kn;
+ struct scsi_device *sdev = to_scsi_device(dev);
+
+ /*
+ * We need to try to get module, avoiding the module been removed
+ * during delete.
+ */
+ if (scsi_device_get(sdev))
+ return -ENODEV;
kn = sysfs_break_active_protection(&dev->kobj, &attr->attr);
WARN_ON_ONCE(!kn);
* state into SDEV_DEL.
*/
device_remove_file(dev, attr);
- scsi_remove_device(to_scsi_device(dev));
+ scsi_remove_device(sdev);
if (kn)
sysfs_unbreak_active_protection(kn);
+ scsi_device_put(sdev);
return count;
};
static DEVICE_ATTR(delete, S_IWUSR, NULL, sdev_store_delete);
base = res->start;
hostdata = kzalloc(sizeof(*hostdata), GFP_KERNEL);
- if (!hostdata) {
- dev_printk(KERN_ERR, dev, "Failed to allocate host data\n");
+ if (!hostdata)
return -ENOMEM;
- }
hostdata->dev = &dev->dev;
dma_set_mask(&dev->dev, DMA_BIT_MASK(32));
hdr->func = IMX_SC_MISC_FUNC_UNIQUE_ID;
hdr->size = 1;
- ret = imx_scu_call_rpc(soc_ipc_handle, &msg, false);
+ ret = imx_scu_call_rpc(soc_ipc_handle, &msg, true);
if (ret) {
pr_err("%s: get soc uid failed, ret %d\n", __func__, ret);
return ret;
/* Set the encryption - we only support wep */
if (is_wep) {
if (sme->key) {
- if (sme->key_idx >= NUM_WEPKEYS) {
- err = -EINVAL;
- goto exit;
- }
+ if (sme->key_idx >= NUM_WEPKEYS)
+ return -EINVAL;
result = prism2_domibset_uint32(wlandev,
DIDMIB_DOT11SMT_PRIVACYTABLE_WEPDEFAULTKEYID,
unsigned int size;
/*
- * Clear a lun set in the cdb if the initiator talking to use spoke
- * and old standards version, as we can't assume the underlying device
- * won't choke up on it.
- */
- switch (cdb[0]) {
- case READ_10: /* SBC - RDProtect */
- case READ_12: /* SBC - RDProtect */
- case READ_16: /* SBC - RDProtect */
- case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */
- case VERIFY: /* SBC - VRProtect */
- case VERIFY_16: /* SBC - VRProtect */
- case WRITE_VERIFY: /* SBC - VRProtect */
- case WRITE_VERIFY_12: /* SBC - VRProtect */
- case MAINTENANCE_IN: /* SPC - Parameter Data Format for SA RTPG */
- break;
- default:
- cdb[1] &= 0x1f; /* clear logical unit number */
- break;
- }
-
- /*
* For REPORT LUNS we always need to emulate the response, for everything
* else, pass it up.
*/
struct cpufreq_policy *policy;
struct list_head node;
struct time_in_idle *idle_time;
- struct dev_pm_qos_request qos_req;
+ struct freq_qos_request qos_req;
};
static DEFINE_IDA(cpufreq_ida);
cpufreq_cdev->cpufreq_state = state;
- return dev_pm_qos_update_request(&cpufreq_cdev->qos_req,
+ return freq_qos_update_request(&cpufreq_cdev->qos_req,
cpufreq_cdev->freq_table[state].frequency);
}
cooling_ops = &cpufreq_cooling_ops;
}
- ret = dev_pm_qos_add_request(dev, &cpufreq_cdev->qos_req,
- DEV_PM_QOS_MAX_FREQUENCY,
- cpufreq_cdev->freq_table[0].frequency);
+ ret = freq_qos_add_request(&policy->constraints,
+ &cpufreq_cdev->qos_req, FREQ_QOS_MAX,
+ cpufreq_cdev->freq_table[0].frequency);
if (ret < 0) {
pr_err("%s: Failed to add freq constraint (%d)\n", __func__,
ret);
return cdev;
remove_qos_req:
- dev_pm_qos_remove_request(&cpufreq_cdev->qos_req);
+ freq_qos_remove_request(&cpufreq_cdev->qos_req);
remove_ida:
ida_simple_remove(&cpufreq_ida, cpufreq_cdev->id);
free_table:
mutex_unlock(&cooling_list_lock);
thermal_cooling_device_unregister(cdev);
- dev_pm_qos_remove_request(&cpufreq_cdev->qos_req);
+ freq_qos_remove_request(&cpufreq_cdev->qos_req);
ida_simple_remove(&cpufreq_ida, cpufreq_cdev->id);
kfree(cpufreq_cdev->idle_time);
kfree(cpufreq_cdev->freq_table);
{
struct serial_8250_men_mcb_data *data;
struct resource *mem;
- unsigned int num_ports;
- unsigned int i;
+ int num_ports;
+ int i;
void __iomem *membase;
mem = mcb_get_resource(mdev, IORESOURCE_MEM);
dev_dbg(&mdev->dev, "found a 16z%03u with %u ports\n",
mdev->id, num_ports);
- if (num_ports == 0 || num_ports > 4) {
+ if (num_ports <= 0 || num_ports > 4) {
dev_err(&mdev->dev, "unexpected number of ports: %u\n",
num_ports);
return -ENODEV;
static void serial_8250_men_mcb_remove(struct mcb_device *mdev)
{
- unsigned int num_ports, i;
+ int num_ports, i;
struct serial_8250_men_mcb_data *data = mcb_get_drvdata(mdev);
if (!data)
goto err;
switch (cdns->dr_mode) {
- case USB_DR_MODE_UNKNOWN:
case USB_DR_MODE_OTG:
ret = cdns3_hw_role_switch(cdns);
if (ret)
if (ret)
goto err;
break;
+ default:
+ ret = -EINVAL;
+ goto err;
}
return ret;
request = cdns3_next_request(&priv_ep->pending_req_list);
priv_req = to_cdns3_request(request);
+ trb = priv_ep->trb_pool + priv_ep->dequeue;
+
+ /* Request was dequeued and TRB was changed to TRB_LINK. */
+ if (TRB_FIELD_TO_TYPE(trb->control) == TRB_LINK) {
+ trace_cdns3_complete_trb(priv_ep, trb);
+ cdns3_move_deq_to_next_trb(priv_req);
+ }
+
/* Re-select endpoint. It could be changed by other CPU during
* handling usb_gadget_giveback_request.
*/
struct usb_request *req, *req_temp;
struct cdns3_request *priv_req;
struct cdns3_trb *link_trb;
+ u8 req_on_hw_ring = 0;
unsigned long flags;
int ret = 0;
list_for_each_entry_safe(req, req_temp, &priv_ep->pending_req_list,
list) {
- if (request == req)
+ if (request == req) {
+ req_on_hw_ring = 1;
goto found;
+ }
}
list_for_each_entry_safe(req, req_temp, &priv_ep->deferred_req_list,
goto not_found;
found:
-
- if (priv_ep->wa1_trb == priv_req->trb)
- cdns3_wa1_restore_cycle_bit(priv_ep);
-
link_trb = priv_req->trb;
- cdns3_move_deq_to_next_trb(priv_req);
- cdns3_gadget_giveback(priv_ep, priv_req, -ECONNRESET);
-
- /* Update ring */
- request = cdns3_next_request(&priv_ep->deferred_req_list);
- if (request) {
- priv_req = to_cdns3_request(request);
+ /* Update ring only if removed request is on pending_req_list list */
+ if (req_on_hw_ring) {
link_trb->buffer = TRB_BUFFER(priv_ep->trb_pool_dma +
(priv_req->start_trb * TRB_SIZE));
link_trb->control = (link_trb->control & TRB_CYCLE) |
- TRB_TYPE(TRB_LINK) | TRB_CHAIN | TRB_TOGGLE;
- } else {
- priv_ep->flags |= EP_UPDATE_EP_TRBADDR;
+ TRB_TYPE(TRB_LINK) | TRB_CHAIN;
+
+ if (priv_ep->wa1_trb == priv_req->trb)
+ cdns3_wa1_restore_cycle_bit(priv_ep);
}
+ cdns3_gadget_giveback(priv_ep, priv_req, -ECONNRESET);
+
not_found:
spin_unlock_irqrestore(&priv_dev->lock, flags);
return ret;
kfree(usblp->readbuf);
kfree(usblp->device_id_string);
kfree(usblp->statusbuf);
+ usb_put_intf(usblp->intf);
kfree(usblp);
}
init_waitqueue_head(&usblp->wwait);
init_usb_anchor(&usblp->urbs);
usblp->ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
- usblp->intf = intf;
+ usblp->intf = usb_get_intf(intf);
/* Malloc device ID string buffer to the largest expected length,
* since we can re-query it on an ioctl and a dynamic string
kfree(usblp->readbuf);
kfree(usblp->statusbuf);
kfree(usblp->device_id_string);
+ usb_put_intf(usblp->intf);
kfree(usblp);
abort_ret:
return retval;
tmp = readl(USBD_RXDATA(udc->udp_baseaddr));
bl = bytes - n;
- if (bl > 3)
- bl = 3;
+ if (bl > 4)
+ bl = 4;
for (i = 0; i < bl; i++)
- data[n + i] = (u8) ((tmp >> (n * 8)) & 0xFF);
+ data[n + i] = (u8) ((tmp >> (i * 8)) & 0xFF);
}
break;
goto exit;
}
- if (mutex_lock_interruptible(&dev->mutex)) {
- retval = -ERESTARTSYS;
- goto exit;
- }
+ mutex_lock(&dev->mutex);
if (dev->open_count != 1) {
retval = -ENODEV;
/* wait for data */
spin_lock_irq(&dev->rbsl);
- if (dev->ring_head == dev->ring_tail) {
+ while (dev->ring_head == dev->ring_tail) {
dev->interrupt_in_done = 0;
spin_unlock_irq(&dev->rbsl);
if (file->f_flags & O_NONBLOCK) {
retval = wait_event_interruptible(dev->read_wait, dev->interrupt_in_done);
if (retval < 0)
goto unlock_exit;
- } else {
- spin_unlock_irq(&dev->rbsl);
+
+ spin_lock_irq(&dev->rbsl);
}
+ spin_unlock_irq(&dev->rbsl);
/* actual_buffer contains actual_length + interrupt_in_buffer */
actual_buffer = (size_t *)(dev->ring_buffer + dev->ring_tail * (sizeof(size_t)+dev->interrupt_in_endpoint_size));
+ if (*actual_buffer > dev->interrupt_in_endpoint_size) {
+ retval = -EIO;
+ goto unlock_exit;
+ }
bytes_to_read = min(count, *actual_buffer);
if (bytes_to_read < *actual_buffer)
dev_warn(&dev->intf->dev, "Read buffer overflow, %zd bytes dropped\n",
dev_warn(&intf->dev, "Interrupt out endpoint not found (using control endpoint instead)\n");
dev->interrupt_in_endpoint_size = usb_endpoint_maxp(dev->interrupt_in_endpoint);
- dev->ring_buffer =
- kmalloc_array(ring_buffer_size,
- sizeof(size_t) + dev->interrupt_in_endpoint_size,
- GFP_KERNEL);
+ dev->ring_buffer = kcalloc(ring_buffer_size,
+ sizeof(size_t) + dev->interrupt_in_endpoint_size,
+ GFP_KERNEL);
if (!dev->ring_buffer)
goto error;
dev->interrupt_in_buffer = kmalloc(dev->interrupt_in_endpoint_size, GFP_KERNEL);
goto exit;
}
- if (mutex_lock_interruptible(&dev->lock)) {
- retval = -ERESTARTSYS;
- goto exit;
- }
+ mutex_lock(&dev->lock);
if (dev->open_count != 1) {
dev_dbg(&dev->udev->dev, "%s: device not opened exactly once\n",
get_version_reply,
sizeof(*get_version_reply),
1000);
- if (result < sizeof(*get_version_reply)) {
+ if (result != sizeof(*get_version_reply)) {
if (result >= 0)
result = -EIO;
dev_err(idev, "get version request failed: %d\n", result);
struct ti_port *tport;
int port_number;
int status;
- int do_unlock;
unsigned long flags;
tdev = usb_get_serial_data(port->serial);
"%s - cannot send close port command, %d\n"
, __func__, status);
- /* if mutex_lock is interrupted, continue anyway */
- do_unlock = !mutex_lock_interruptible(&tdev->td_open_close_lock);
- --tport->tp_tdev->td_open_port_count;
- if (tport->tp_tdev->td_open_port_count <= 0) {
+ mutex_lock(&tdev->td_open_close_lock);
+ --tdev->td_open_port_count;
+ if (tdev->td_open_port_count == 0) {
/* last port is closed, shut down interrupt urb */
usb_kill_urb(port->serial->port[0]->interrupt_in_urb);
- tport->tp_tdev->td_open_port_count = 0;
}
- if (do_unlock)
- mutex_unlock(&tdev->td_open_close_lock);
+ mutex_unlock(&tdev->td_open_close_lock);
}
struct bus_type *bus = NULL;
int ret;
bool resv_msi, msi_remap;
- phys_addr_t resv_msi_base;
+ phys_addr_t resv_msi_base = 0;
struct iommu_domain_geometry geo;
LIST_HEAD(iova_copy);
LIST_HEAD(group_resv_regions);
return 0;
}
+static inline int kern_xfer(void *dst, void *src, size_t len)
+{
+ memcpy(dst, src, len);
+ return 0;
+}
+
/**
* vringh_init_kern - initialize a vringh for a kernelspace vring.
* @vrh: the vringh to initialize.
ssize_t vringh_iov_push_kern(struct vringh_kiov *wiov,
const void *src, size_t len)
{
- return vringh_iov_xfer(wiov, (void *)src, len, xfer_kern);
+ return vringh_iov_xfer(wiov, (void *)src, len, kern_xfer);
}
EXPORT_SYMBOL(vringh_iov_push_kern);
* counter first before updating event flags.
*/
virtio_wmb(vq->weak_barriers);
- } else {
- used_idx = vq->last_used_idx;
- wrap_counter = vq->packed.used_wrap_counter;
}
if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) {
*/
virtio_mb(vq->weak_barriers);
- if (is_used_desc_packed(vq, used_idx, wrap_counter)) {
+ if (is_used_desc_packed(vq,
+ vq->last_used_idx,
+ vq->packed.used_wrap_counter)) {
END_USE(vq);
return false;
}
btrfs_err(info,
"bg %llu is a mixed block group but filesystem hasn't enabled mixed block groups",
cache->key.objectid);
+ btrfs_put_block_group(cache);
ret = -EINVAL;
goto error;
}
struct btrfs_workqueue *fixup_workers;
struct btrfs_workqueue *delayed_workers;
- /* the extent workers do delayed refs on the extent allocation tree */
- struct btrfs_workqueue *extent_workers;
struct task_struct *transaction_kthread;
struct task_struct *cleaner_kthread;
u32 thread_pool_size;
int nitems, bool use_global_rsv);
void btrfs_subvolume_release_metadata(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *rsv);
-void btrfs_delalloc_release_extents(struct btrfs_inode *inode, u64 num_bytes,
- bool qgroup_free);
+void btrfs_delalloc_release_extents(struct btrfs_inode *inode, u64 num_bytes);
int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes);
u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo);
out_qgroup:
btrfs_qgroup_free_meta_prealloc(root, qgroup_reserve);
out_fail:
- btrfs_inode_rsv_release(inode, true);
if (delalloc_lock)
mutex_unlock(&inode->delalloc_mutex);
return ret;
* btrfs_delalloc_release_extents - release our outstanding_extents
* @inode: the inode to balance the reservation for.
* @num_bytes: the number of bytes we originally reserved with
- * @qgroup_free: do we need to free qgroup meta reservation or convert them.
*
* When we reserve space we increase outstanding_extents for the extents we may
* add. Once we've set the range as delalloc or created our ordered extents we
* temporarily tracked outstanding_extents. This _must_ be used in conjunction
* with btrfs_delalloc_reserve_metadata.
*/
-void btrfs_delalloc_release_extents(struct btrfs_inode *inode, u64 num_bytes,
- bool qgroup_free)
+void btrfs_delalloc_release_extents(struct btrfs_inode *inode, u64 num_bytes)
{
struct btrfs_fs_info *fs_info = inode->root->fs_info;
unsigned num_extents;
if (btrfs_is_testing(fs_info))
return;
- btrfs_inode_rsv_release(inode, qgroup_free);
+ btrfs_inode_rsv_release(inode, true);
}
/**
btrfs_destroy_workqueue(fs_info->readahead_workers);
btrfs_destroy_workqueue(fs_info->flush_workers);
btrfs_destroy_workqueue(fs_info->qgroup_rescan_workers);
- btrfs_destroy_workqueue(fs_info->extent_workers);
/*
* Now that all other work queues are destroyed, we can safely destroy
* the queues used for metadata I/O, since tasks from those other work
max_active, 2);
fs_info->qgroup_rescan_workers =
btrfs_alloc_workqueue(fs_info, "qgroup-rescan", flags, 1, 0);
- fs_info->extent_workers =
- btrfs_alloc_workqueue(fs_info, "extent-refs", flags,
- min_t(u64, fs_devices->num_devices,
- max_active), 8);
if (!(fs_info->workers && fs_info->delalloc_workers &&
fs_info->submit_workers && fs_info->flush_workers &&
fs_info->endio_freespace_worker && fs_info->rmw_workers &&
fs_info->caching_workers && fs_info->readahead_workers &&
fs_info->fixup_workers && fs_info->delayed_workers &&
- fs_info->extent_workers &&
fs_info->qgroup_rescan_workers)) {
return -ENOMEM;
}
force_page_uptodate);
if (ret) {
btrfs_delalloc_release_extents(BTRFS_I(inode),
- reserve_bytes, true);
+ reserve_bytes);
break;
}
if (extents_locked == -EAGAIN)
goto again;
btrfs_delalloc_release_extents(BTRFS_I(inode),
- reserve_bytes, true);
+ reserve_bytes);
ret = extents_locked;
break;
}
else
free_extent_state(cached_state);
- btrfs_delalloc_release_extents(BTRFS_I(inode), reserve_bytes,
- true);
+ btrfs_delalloc_release_extents(BTRFS_I(inode), reserve_bytes);
if (ret) {
btrfs_drop_pages(pages, num_pages);
break;
struct btrfs_trans_handle *trans;
struct btrfs_log_ctx ctx;
int ret = 0, err;
- u64 len;
- /*
- * If the inode needs a full sync, make sure we use a full range to
- * avoid log tree corruption, due to hole detection racing with ordered
- * extent completion for adjacent ranges, and assertion failures during
- * hole detection.
- */
- if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
- &BTRFS_I(inode)->runtime_flags)) {
- start = 0;
- end = LLONG_MAX;
- }
-
- /*
- * The range length can be represented by u64, we have to do the typecasts
- * to avoid signed overflow if it's [0, LLONG_MAX] eg. from fsync()
- */
- len = (u64)end - (u64)start + 1;
trace_btrfs_sync_file(file, datasync);
btrfs_init_log_ctx(&ctx, inode);
atomic_inc(&root->log_batch);
/*
+ * If the inode needs a full sync, make sure we use a full range to
+ * avoid log tree corruption, due to hole detection racing with ordered
+ * extent completion for adjacent ranges, and assertion failures during
+ * hole detection. Do this while holding the inode lock, to avoid races
+ * with other tasks.
+ */
+ if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
+ &BTRFS_I(inode)->runtime_flags)) {
+ start = 0;
+ end = LLONG_MAX;
+ }
+
+ /*
* Before we acquired the inode's lock, someone may have dirtied more
* pages in the target range. We need to make sure that writeback for
* any such pages does not start while we are logging the inode, because
/*
* We have to do this here to avoid the priority inversion of waiting on
* IO of a lower priority task while holding a transaction open.
+ *
+ * Also, the range length can be represented by u64, we have to do the
+ * typecasts to avoid signed overflow if it's [0, LLONG_MAX].
*/
- ret = btrfs_wait_ordered_range(inode, start, len);
+ ret = btrfs_wait_ordered_range(inode, start, (u64)end - (u64)start + 1);
if (ret) {
up_write(&BTRFS_I(inode)->dio_sem);
inode_unlock(inode);
ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, prealloc,
prealloc, prealloc, &alloc_hint);
if (ret) {
- btrfs_delalloc_release_extents(BTRFS_I(inode), prealloc, true);
+ btrfs_delalloc_release_extents(BTRFS_I(inode), prealloc);
btrfs_delalloc_release_metadata(BTRFS_I(inode), prealloc, true);
goto out_put;
}
ret = btrfs_write_out_ino_cache(root, trans, path, inode);
- btrfs_delalloc_release_extents(BTRFS_I(inode), prealloc, false);
+ btrfs_delalloc_release_extents(BTRFS_I(inode), prealloc);
out_put:
iput(inode);
out_release:
ClearPageChecked(page);
set_page_dirty(page);
- btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE, false);
+ btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE);
out:
unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, page_end,
&cached_state);
if (!page) {
btrfs_delalloc_release_space(inode, data_reserved,
block_start, blocksize, true);
- btrfs_delalloc_release_extents(BTRFS_I(inode), blocksize, true);
+ btrfs_delalloc_release_extents(BTRFS_I(inode), blocksize);
ret = -ENOMEM;
goto out;
}
if (ret)
btrfs_delalloc_release_space(inode, data_reserved, block_start,
blocksize, true);
- btrfs_delalloc_release_extents(BTRFS_I(inode), blocksize, (ret != 0));
+ btrfs_delalloc_release_extents(BTRFS_I(inode), blocksize);
unlock_page(page);
put_page(page);
out:
} else if (ret >= 0 && (size_t)ret < count)
btrfs_delalloc_release_space(inode, data_reserved,
offset, count - (size_t)ret, true);
- btrfs_delalloc_release_extents(BTRFS_I(inode), count, false);
+ btrfs_delalloc_release_extents(BTRFS_I(inode), count);
}
out:
if (wakeup)
unlock_extent_cached(io_tree, page_start, page_end, &cached_state);
if (!ret2) {
- btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE, true);
+ btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE);
sb_end_pagefault(inode->i_sb);
extent_changeset_free(data_reserved);
return VM_FAULT_LOCKED;
out_unlock:
unlock_page(page);
out:
- btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE, (ret != 0));
+ btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE);
btrfs_delalloc_release_space(inode, data_reserved, page_start,
reserved_space, (ret != 0));
out_noreserve:
unlock_page(pages[i]);
put_page(pages[i]);
}
- btrfs_delalloc_release_extents(BTRFS_I(inode), page_cnt << PAGE_SHIFT,
- false);
+ btrfs_delalloc_release_extents(BTRFS_I(inode), page_cnt << PAGE_SHIFT);
extent_changeset_free(data_reserved);
return i_done;
out:
btrfs_delalloc_release_space(inode, data_reserved,
start_index << PAGE_SHIFT,
page_cnt << PAGE_SHIFT, true);
- btrfs_delalloc_release_extents(BTRFS_I(inode), page_cnt << PAGE_SHIFT,
- true);
+ btrfs_delalloc_release_extents(BTRFS_I(inode), page_cnt << PAGE_SHIFT);
extent_changeset_free(data_reserved);
return ret;
return 0;
BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize));
- trace_qgroup_meta_reserve(root, type, (s64)num_bytes);
+ trace_qgroup_meta_reserve(root, (s64)num_bytes, type);
ret = qgroup_reserve(root, num_bytes, enforce, type);
if (ret < 0)
return ret;
*/
num_bytes = sub_root_meta_rsv(root, num_bytes, type);
BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize));
- trace_qgroup_meta_reserve(root, type, -(s64)num_bytes);
+ trace_qgroup_meta_reserve(root, -(s64)num_bytes, type);
btrfs_qgroup_free_refroot(fs_info, root->root_key.objectid,
num_bytes, type);
}
if (!page) {
btrfs_delalloc_release_metadata(BTRFS_I(inode),
PAGE_SIZE, true);
+ btrfs_delalloc_release_extents(BTRFS_I(inode),
+ PAGE_SIZE);
ret = -ENOMEM;
goto out;
}
btrfs_delalloc_release_metadata(BTRFS_I(inode),
PAGE_SIZE, true);
btrfs_delalloc_release_extents(BTRFS_I(inode),
- PAGE_SIZE, true);
+ PAGE_SIZE);
ret = -EIO;
goto out;
}
btrfs_delalloc_release_metadata(BTRFS_I(inode),
PAGE_SIZE, true);
btrfs_delalloc_release_extents(BTRFS_I(inode),
- PAGE_SIZE, true);
+ PAGE_SIZE);
clear_extent_bits(&BTRFS_I(inode)->io_tree,
page_start, page_end,
put_page(page);
index++;
- btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE,
- false);
+ btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE);
balance_dirty_pages_ratelimited(inode->i_mapping);
btrfs_throttle(fs_info);
}
else
sb->s_maxbytes = MAX_NON_LFS;
- /* Some very old servers like DOS and OS/2 used 2 second granularity */
+ /*
+ * Some very old servers like DOS and OS/2 used 2 second granularity
+ * (while all current servers use 100ns granularity - see MS-DTYP)
+ * but 1 second is the maximum allowed granularity for the VFS
+ * so for old servers set time granularity to 1 second while for
+ * everything else (current servers) set it to 100ns.
+ */
if ((tcon->ses->server->vals->protocol_id == SMB10_PROT_ID) &&
((tcon->ses->capabilities &
tcon->ses->server->vals->cap_nt_find) == 0) &&
struct cifsInodeInfo {
bool can_cache_brlcks;
struct list_head llist; /* locks helb by this inode */
+ /*
+ * NOTE: Some code paths call down_read(lock_sem) twice, so
+ * we must always use use cifs_down_write() instead of down_write()
+ * for this semaphore to avoid deadlocks.
+ */
struct rw_semaphore lock_sem; /* protect the fields above */
/* BB add in lists for dirty pages i.e. write caching info for oplock */
struct list_head openFileList;
struct file_lock *flock, const unsigned int xid);
extern int cifs_push_mandatory_locks(struct cifsFileInfo *cfile);
+extern void cifs_down_write(struct rw_semaphore *sem);
extern struct cifsFileInfo *cifs_new_fileinfo(struct cifs_fid *fid,
struct file *file,
struct tcon_link *tlink,
spin_lock(&GlobalMid_Lock);
list_for_each_safe(tmp, tmp2, &server->pending_mid_q) {
mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
+ kref_get(&mid_entry->refcount);
if (mid_entry->mid_state == MID_REQUEST_SUBMITTED)
mid_entry->mid_state = MID_RETRY_NEEDED;
list_move(&mid_entry->qhead, &retry_list);
+ mid_entry->mid_flags |= MID_DELETED;
}
spin_unlock(&GlobalMid_Lock);
mutex_unlock(&server->srv_mutex);
mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
list_del_init(&mid_entry->qhead);
mid_entry->callback(mid_entry);
+ cifs_mid_q_entry_release(mid_entry);
}
if (cifs_rdma_enabled(server)) {
if (mid->mid_flags & MID_DELETED)
printk_once(KERN_WARNING
"trying to dequeue a deleted mid\n");
- else
+ else {
list_del_init(&mid->qhead);
+ mid->mid_flags |= MID_DELETED;
+ }
spin_unlock(&GlobalMid_Lock);
}
list_for_each_safe(tmp, tmp2, &server->pending_mid_q) {
mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
cifs_dbg(FYI, "Clearing mid 0x%llx\n", mid_entry->mid);
+ kref_get(&mid_entry->refcount);
mid_entry->mid_state = MID_SHUTDOWN;
list_move(&mid_entry->qhead, &dispose_list);
+ mid_entry->mid_flags |= MID_DELETED;
}
spin_unlock(&GlobalMid_Lock);
cifs_dbg(FYI, "Callback mid 0x%llx\n", mid_entry->mid);
list_del_init(&mid_entry->qhead);
mid_entry->callback(mid_entry);
+ cifs_mid_q_entry_release(mid_entry);
}
/* 1/8th of sec is more than enough time for them to exit */
msleep(125);
rc = socket->ops->connect(socket, saddr, slen,
server->noblockcnt ? O_NONBLOCK : 0);
-
- if (rc == -EINPROGRESS)
+ /*
+ * When mounting SMB root file systems, we do not want to block in
+ * connect. Otherwise bail out and then let cifs_reconnect() perform
+ * reconnect failover - if possible.
+ */
+ if (server->noblockcnt && rc == -EINPROGRESS)
rc = 0;
if (rc < 0) {
cifs_dbg(FYI, "Error %d connecting to server\n", rc);
return has_locks;
}
+void
+cifs_down_write(struct rw_semaphore *sem)
+{
+ while (!down_write_trylock(sem))
+ msleep(10);
+}
+
struct cifsFileInfo *
cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
struct tcon_link *tlink, __u32 oplock)
INIT_LIST_HEAD(&fdlocks->locks);
fdlocks->cfile = cfile;
cfile->llist = fdlocks;
- down_write(&cinode->lock_sem);
+ cifs_down_write(&cinode->lock_sem);
list_add(&fdlocks->llist, &cinode->llist);
up_write(&cinode->lock_sem);
bool oplock_break_cancelled;
spin_lock(&tcon->open_file_lock);
-
+ spin_lock(&cifsi->open_file_lock);
spin_lock(&cifs_file->file_info_lock);
if (--cifs_file->count > 0) {
spin_unlock(&cifs_file->file_info_lock);
+ spin_unlock(&cifsi->open_file_lock);
spin_unlock(&tcon->open_file_lock);
return;
}
cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
/* remove it from the lists */
- spin_lock(&cifsi->open_file_lock);
list_del(&cifs_file->flist);
- spin_unlock(&cifsi->open_file_lock);
list_del(&cifs_file->tlist);
atomic_dec(&tcon->num_local_opens);
cifs_set_oplock_level(cifsi, 0);
}
+ spin_unlock(&cifsi->open_file_lock);
spin_unlock(&tcon->open_file_lock);
oplock_break_cancelled = wait_oplock_handler ?
* Delete any outstanding lock records. We'll lose them when the file
* is closed anyway.
*/
- down_write(&cifsi->lock_sem);
+ cifs_down_write(&cifsi->lock_sem);
list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
list_del(&li->llist);
cifs_del_lock_waiters(li);
cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
{
struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
- down_write(&cinode->lock_sem);
+ cifs_down_write(&cinode->lock_sem);
list_add_tail(&lock->llist, &cfile->llist->locks);
up_write(&cinode->lock_sem);
}
try_again:
exist = false;
- down_write(&cinode->lock_sem);
+ cifs_down_write(&cinode->lock_sem);
exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
lock->type, lock->flags, &conf_lock,
(lock->blist.next == &lock->blist));
if (!rc)
goto try_again;
- down_write(&cinode->lock_sem);
+ cifs_down_write(&cinode->lock_sem);
list_del_init(&lock->blist);
}
return rc;
try_again:
- down_write(&cinode->lock_sem);
+ cifs_down_write(&cinode->lock_sem);
if (!cinode->can_cache_brlcks) {
up_write(&cinode->lock_sem);
return rc;
int rc = 0;
/* we are going to update can_cache_brlcks here - need a write access */
- down_write(&cinode->lock_sem);
+ cifs_down_write(&cinode->lock_sem);
if (!cinode->can_cache_brlcks) {
up_write(&cinode->lock_sem);
return rc;
if (!buf)
return -ENOMEM;
- down_write(&cinode->lock_sem);
+ cifs_down_write(&cinode->lock_sem);
for (i = 0; i < 2; i++) {
cur = buf;
num = 0;
rc = tcon->ses->server->ops->flush(xid, tcon, &wfile->fid);
cifsFileInfo_put(wfile);
if (rc)
- return rc;
+ goto cifs_setattr_exit;
} else if (rc != -EBADF)
- return rc;
+ goto cifs_setattr_exit;
else
rc = 0;
}
/* we do not want to loop forever */
last_mid = cur_mid;
cur_mid++;
+ /* avoid 0xFFFF MID */
+ if (cur_mid == 0xffff)
+ cur_mid++;
/*
* This nested loop looks more expensive than it is.
cur = buf;
- down_write(&cinode->lock_sem);
+ cifs_down_write(&cinode->lock_sem);
list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
if (flock->fl_start > li->offset ||
(flock->fl_start + length) <
static void _cifs_mid_q_entry_release(struct kref *refcount)
{
- struct mid_q_entry *mid = container_of(refcount, struct mid_q_entry,
- refcount);
-
- mempool_free(mid, cifs_mid_poolp);
-}
-
-void cifs_mid_q_entry_release(struct mid_q_entry *midEntry)
-{
- spin_lock(&GlobalMid_Lock);
- kref_put(&midEntry->refcount, _cifs_mid_q_entry_release);
- spin_unlock(&GlobalMid_Lock);
-}
-
-void
-DeleteMidQEntry(struct mid_q_entry *midEntry)
-{
+ struct mid_q_entry *midEntry =
+ container_of(refcount, struct mid_q_entry, refcount);
#ifdef CONFIG_CIFS_STATS2
__le16 command = midEntry->server->vals->lock_cmd;
__u16 smb_cmd = le16_to_cpu(midEntry->command);
}
}
#endif
+
+ mempool_free(midEntry, cifs_mid_poolp);
+}
+
+void cifs_mid_q_entry_release(struct mid_q_entry *midEntry)
+{
+ spin_lock(&GlobalMid_Lock);
+ kref_put(&midEntry->refcount, _cifs_mid_q_entry_release);
+ spin_unlock(&GlobalMid_Lock);
+}
+
+void DeleteMidQEntry(struct mid_q_entry *midEntry)
+{
cifs_mid_q_entry_release(midEntry);
}
cifs_delete_mid(struct mid_q_entry *mid)
{
spin_lock(&GlobalMid_Lock);
- list_del_init(&mid->qhead);
- mid->mid_flags |= MID_DELETED;
+ if (!(mid->mid_flags & MID_DELETED)) {
+ list_del_init(&mid->qhead);
+ mid->mid_flags |= MID_DELETED;
+ }
spin_unlock(&GlobalMid_Lock);
DeleteMidQEntry(mid);
rc = -EHOSTDOWN;
break;
default:
- list_del_init(&mid->qhead);
+ if (!(mid->mid_flags & MID_DELETED)) {
+ list_del_init(&mid->qhead);
+ mid->mid_flags |= MID_DELETED;
+ }
cifs_server_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
__func__, mid->mid, mid->mid_state);
rc = -EIO;
for (;;) {
entry = xas_find_conflict(xas);
+ if (!entry || WARN_ON_ONCE(!xa_is_value(entry)))
+ return entry;
if (dax_entry_order(entry) < order)
return XA_RETRY_ENTRY;
- if (!entry || WARN_ON_ONCE(!xa_is_value(entry)) ||
- !dax_is_locked(entry))
+ if (!dax_is_locked(entry))
return entry;
wq = dax_entry_waitqueue(xas, entry, &ewait.key);
obj-$(CONFIG_FUSE_FS) += fuse.o
obj-$(CONFIG_CUSE) += cuse.o
-obj-$(CONFIG_VIRTIO_FS) += virtio_fs.o
+obj-$(CONFIG_VIRTIO_FS) += virtiofs.o
fuse-objs := dev.o dir.o file.o inode.o control.o xattr.o acl.o readdir.o
+virtiofs-y += virtio_fs.o
void fuse_request_end(struct fuse_conn *fc, struct fuse_req *req)
{
struct fuse_iqueue *fiq = &fc->iq;
- bool async = req->args->end;
+ bool async;
if (test_and_set_bit(FR_FINISHED, &req->flags))
goto put_request;
+
+ async = req->args->end;
/*
* test_and_set_bit() implies smp_mb() between bit
* changing and below intr_entry check. Pairs with
else
fuse_invalidate_entry_cache(entry);
- fuse_advise_use_readdirplus(dir);
+ if (inode)
+ fuse_advise_use_readdirplus(dir);
return newent;
out_iput:
is_truncate = true;
}
+ /* Flush dirty data/metadata before non-truncate SETATTR */
+ if (is_wb && S_ISREG(inode->i_mode) &&
+ attr->ia_valid &
+ (ATTR_MODE | ATTR_UID | ATTR_GID | ATTR_MTIME_SET |
+ ATTR_TIMES_SET)) {
+ err = write_inode_now(inode, true);
+ if (err)
+ return err;
+
+ fuse_set_nowrite(inode);
+ fuse_release_nowrite(inode);
+ }
+
if (is_truncate) {
fuse_set_nowrite(inode);
set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
{
struct fuse_conn *fc = get_fuse_conn(inode);
int err;
- bool lock_inode = (file->f_flags & O_TRUNC) &&
+ bool is_wb_truncate = (file->f_flags & O_TRUNC) &&
fc->atomic_o_trunc &&
fc->writeback_cache;
if (err)
return err;
- if (lock_inode)
+ if (is_wb_truncate) {
inode_lock(inode);
+ fuse_set_nowrite(inode);
+ }
err = fuse_do_open(fc, get_node_id(inode), file, isdir);
if (!err)
fuse_finish_open(inode, file);
- if (lock_inode)
+ if (is_wb_truncate) {
+ fuse_release_nowrite(inode);
inode_unlock(inode);
+ }
return err;
}
if (!data->ff) {
err = -EIO;
- data->ff = fuse_write_file_get(fc, get_fuse_inode(inode));
+ data->ff = fuse_write_file_get(fc, fi);
if (!data->ff)
goto out_unlock;
}
* under writeback, so we can release the page lock.
*/
if (data->wpa == NULL) {
- struct fuse_inode *fi = get_fuse_inode(inode);
-
err = -ENOMEM;
wpa = fuse_writepage_args_alloc();
if (!wpa) {
bool destroy:1;
bool no_control:1;
bool no_force_umount:1;
+ bool no_mount_options:1;
unsigned int max_read;
unsigned int blksize;
const char *subtype;
/** Do not allow MNT_FORCE umount */
unsigned int no_force_umount:1;
+ /* Do not show mount options */
+ unsigned int no_mount_options:1;
+
/** The number of requests waiting for completion */
atomic_t num_waiting;
struct super_block *sb = root->d_sb;
struct fuse_conn *fc = get_fuse_conn_super(sb);
+ if (fc->no_mount_options)
+ return 0;
+
seq_printf(m, ",user_id=%u", from_kuid_munged(fc->user_ns, fc->user_id));
seq_printf(m, ",group_id=%u", from_kgid_munged(fc->user_ns, fc->group_id));
if (fc->default_permissions)
fc->destroy = ctx->destroy;
fc->no_control = ctx->no_control;
fc->no_force_umount = ctx->no_force_umount;
+ fc->no_mount_options = ctx->no_mount_options;
err = -ENOMEM;
root = fuse_get_root_inode(sb, ctx->rootmode);
struct virtqueue *vq; /* protected by ->lock */
struct work_struct done_work;
struct list_head queued_reqs;
+ struct list_head end_reqs; /* End these requests */
struct delayed_work dispatch_work;
struct fuse_dev *fud;
bool connected;
struct list_head list;
};
+static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq,
+ struct fuse_req *req, bool in_flight);
+
static inline struct virtio_fs_vq *vq_to_fsvq(struct virtqueue *vq)
{
struct virtio_fs *fs = vq->vdev->priv;
return &vq_to_fsvq(vq)->fud->pq;
}
+/* Should be called with fsvq->lock held. */
+static inline void inc_in_flight_req(struct virtio_fs_vq *fsvq)
+{
+ fsvq->in_flight++;
+}
+
+/* Should be called with fsvq->lock held. */
+static inline void dec_in_flight_req(struct virtio_fs_vq *fsvq)
+{
+ WARN_ON(fsvq->in_flight <= 0);
+ fsvq->in_flight--;
+}
+
static void release_virtio_fs_obj(struct kref *ref)
{
struct virtio_fs *vfs = container_of(ref, struct virtio_fs, refcount);
flush_delayed_work(&fsvq->dispatch_work);
}
-static inline void drain_hiprio_queued_reqs(struct virtio_fs_vq *fsvq)
-{
- struct virtio_fs_forget *forget;
-
- spin_lock(&fsvq->lock);
- while (1) {
- forget = list_first_entry_or_null(&fsvq->queued_reqs,
- struct virtio_fs_forget, list);
- if (!forget)
- break;
- list_del(&forget->list);
- kfree(forget);
- }
- spin_unlock(&fsvq->lock);
-}
-
static void virtio_fs_drain_all_queues(struct virtio_fs *fs)
{
struct virtio_fs_vq *fsvq;
for (i = 0; i < fs->nvqs; i++) {
fsvq = &fs->vqs[i];
- if (i == VQ_HIPRIO)
- drain_hiprio_queued_reqs(fsvq);
-
virtio_fs_drain_queue(fsvq);
}
}
while ((req = virtqueue_get_buf(vq, &len)) != NULL) {
kfree(req);
- fsvq->in_flight--;
+ dec_in_flight_req(fsvq);
}
} while (!virtqueue_enable_cb(vq) && likely(!virtqueue_is_broken(vq)));
spin_unlock(&fsvq->lock);
}
-static void virtio_fs_dummy_dispatch_work(struct work_struct *work)
+static void virtio_fs_request_dispatch_work(struct work_struct *work)
{
+ struct fuse_req *req;
+ struct virtio_fs_vq *fsvq = container_of(work, struct virtio_fs_vq,
+ dispatch_work.work);
+ struct fuse_conn *fc = fsvq->fud->fc;
+ int ret;
+
+ pr_debug("virtio-fs: worker %s called.\n", __func__);
+ while (1) {
+ spin_lock(&fsvq->lock);
+ req = list_first_entry_or_null(&fsvq->end_reqs, struct fuse_req,
+ list);
+ if (!req) {
+ spin_unlock(&fsvq->lock);
+ break;
+ }
+
+ list_del_init(&req->list);
+ spin_unlock(&fsvq->lock);
+ fuse_request_end(fc, req);
+ }
+
+ /* Dispatch pending requests */
+ while (1) {
+ spin_lock(&fsvq->lock);
+ req = list_first_entry_or_null(&fsvq->queued_reqs,
+ struct fuse_req, list);
+ if (!req) {
+ spin_unlock(&fsvq->lock);
+ return;
+ }
+ list_del_init(&req->list);
+ spin_unlock(&fsvq->lock);
+
+ ret = virtio_fs_enqueue_req(fsvq, req, true);
+ if (ret < 0) {
+ if (ret == -ENOMEM || ret == -ENOSPC) {
+ spin_lock(&fsvq->lock);
+ list_add_tail(&req->list, &fsvq->queued_reqs);
+ schedule_delayed_work(&fsvq->dispatch_work,
+ msecs_to_jiffies(1));
+ spin_unlock(&fsvq->lock);
+ return;
+ }
+ req->out.h.error = ret;
+ spin_lock(&fsvq->lock);
+ dec_in_flight_req(fsvq);
+ spin_unlock(&fsvq->lock);
+ pr_err("virtio-fs: virtio_fs_enqueue_req() failed %d\n",
+ ret);
+ fuse_request_end(fc, req);
+ }
+ }
}
static void virtio_fs_hiprio_dispatch_work(struct work_struct *work)
list_del(&forget->list);
if (!fsvq->connected) {
+ dec_in_flight_req(fsvq);
spin_unlock(&fsvq->lock);
kfree(forget);
continue;
} else {
pr_debug("virtio-fs: Could not queue FORGET: err=%d. Dropping it.\n",
ret);
+ dec_in_flight_req(fsvq);
kfree(forget);
}
spin_unlock(&fsvq->lock);
return;
}
- fsvq->in_flight++;
notify = virtqueue_kick_prepare(vq);
spin_unlock(&fsvq->lock);
fuse_request_end(fc, req);
spin_lock(&fsvq->lock);
- fsvq->in_flight--;
+ dec_in_flight_req(fsvq);
spin_unlock(&fsvq->lock);
}
}
names[VQ_HIPRIO] = fs->vqs[VQ_HIPRIO].name;
INIT_WORK(&fs->vqs[VQ_HIPRIO].done_work, virtio_fs_hiprio_done_work);
INIT_LIST_HEAD(&fs->vqs[VQ_HIPRIO].queued_reqs);
+ INIT_LIST_HEAD(&fs->vqs[VQ_HIPRIO].end_reqs);
INIT_DELAYED_WORK(&fs->vqs[VQ_HIPRIO].dispatch_work,
virtio_fs_hiprio_dispatch_work);
spin_lock_init(&fs->vqs[VQ_HIPRIO].lock);
spin_lock_init(&fs->vqs[i].lock);
INIT_WORK(&fs->vqs[i].done_work, virtio_fs_requests_done_work);
INIT_DELAYED_WORK(&fs->vqs[i].dispatch_work,
- virtio_fs_dummy_dispatch_work);
+ virtio_fs_request_dispatch_work);
INIT_LIST_HEAD(&fs->vqs[i].queued_reqs);
+ INIT_LIST_HEAD(&fs->vqs[i].end_reqs);
snprintf(fs->vqs[i].name, sizeof(fs->vqs[i].name),
"requests.%u", i - VQ_REQUEST);
callbacks[i] = virtio_fs_vq_done;
list_add_tail(&forget->list, &fsvq->queued_reqs);
schedule_delayed_work(&fsvq->dispatch_work,
msecs_to_jiffies(1));
+ inc_in_flight_req(fsvq);
} else {
pr_debug("virtio-fs: Could not queue FORGET: err=%d. Dropping it.\n",
ret);
goto out;
}
- fsvq->in_flight++;
+ inc_in_flight_req(fsvq);
notify = virtqueue_kick_prepare(vq);
spin_unlock(&fsvq->lock);
/* Add a request to a virtqueue and kick the device */
static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq,
- struct fuse_req *req)
+ struct fuse_req *req, bool in_flight)
{
/* requests need at least 4 elements */
struct scatterlist *stack_sgs[6];
unsigned int i;
int ret;
bool notify;
+ struct fuse_pqueue *fpq;
/* Does the sglist fit on the stack? */
total_sgs = sg_count_fuse_req(req);
goto out;
}
- fsvq->in_flight++;
+ /* Request successfully sent. */
+ fpq = &fsvq->fud->pq;
+ spin_lock(&fpq->lock);
+ list_add_tail(&req->list, fpq->processing);
+ spin_unlock(&fpq->lock);
+ set_bit(FR_SENT, &req->flags);
+ /* matches barrier in request_wait_answer() */
+ smp_mb__after_atomic();
+
+ if (!in_flight)
+ inc_in_flight_req(fsvq);
notify = virtqueue_kick_prepare(vq);
spin_unlock(&fsvq->lock);
{
unsigned int queue_id = VQ_REQUEST; /* TODO multiqueue */
struct virtio_fs *fs;
- struct fuse_conn *fc;
struct fuse_req *req;
- struct fuse_pqueue *fpq;
+ struct virtio_fs_vq *fsvq;
int ret;
WARN_ON(list_empty(&fiq->pending));
spin_unlock(&fiq->lock);
fs = fiq->priv;
- fc = fs->vqs[queue_id].fud->fc;
pr_debug("%s: opcode %u unique %#llx nodeid %#llx in.len %u out.len %u\n",
__func__, req->in.h.opcode, req->in.h.unique,
req->in.h.nodeid, req->in.h.len,
fuse_len_args(req->args->out_numargs, req->args->out_args));
- fpq = &fs->vqs[queue_id].fud->pq;
- spin_lock(&fpq->lock);
- if (!fpq->connected) {
- spin_unlock(&fpq->lock);
- req->out.h.error = -ENODEV;
- pr_err("virtio-fs: %s disconnected\n", __func__);
- fuse_request_end(fc, req);
- return;
- }
- list_add_tail(&req->list, fpq->processing);
- spin_unlock(&fpq->lock);
- set_bit(FR_SENT, &req->flags);
- /* matches barrier in request_wait_answer() */
- smp_mb__after_atomic();
-
-retry:
- ret = virtio_fs_enqueue_req(&fs->vqs[queue_id], req);
+ fsvq = &fs->vqs[queue_id];
+ ret = virtio_fs_enqueue_req(fsvq, req, false);
if (ret < 0) {
if (ret == -ENOMEM || ret == -ENOSPC) {
- /* Virtqueue full. Retry submission */
- /* TODO use completion instead of timeout */
- usleep_range(20, 30);
- goto retry;
+ /*
+ * Virtqueue full. Retry submission from worker
+ * context as we might be holding fc->bg_lock.
+ */
+ spin_lock(&fsvq->lock);
+ list_add_tail(&req->list, &fsvq->queued_reqs);
+ inc_in_flight_req(fsvq);
+ schedule_delayed_work(&fsvq->dispatch_work,
+ msecs_to_jiffies(1));
+ spin_unlock(&fsvq->lock);
+ return;
}
req->out.h.error = ret;
pr_err("virtio-fs: virtio_fs_enqueue_req() failed %d\n", ret);
- spin_lock(&fpq->lock);
- clear_bit(FR_SENT, &req->flags);
- list_del_init(&req->list);
- spin_unlock(&fpq->lock);
- fuse_request_end(fc, req);
+
+ /* Can't end request in submission context. Use a worker */
+ spin_lock(&fsvq->lock);
+ list_add_tail(&req->list, &fsvq->end_reqs);
+ schedule_delayed_work(&fsvq->dispatch_work, 0);
+ spin_unlock(&fsvq->lock);
return;
}
}
.destroy = true,
.no_control = true,
.no_force_umount = true,
+ .no_mount_options = true,
};
mutex_lock(&virtio_fs_mutex);
{
struct gfs2_args *args;
- args = kzalloc(sizeof(*args), GFP_KERNEL);
+ args = kmalloc(sizeof(*args), GFP_KERNEL);
if (args == NULL)
return -ENOMEM;
- args->ar_quota = GFS2_QUOTA_DEFAULT;
- args->ar_data = GFS2_DATA_DEFAULT;
- args->ar_commit = 30;
- args->ar_statfs_quantum = 30;
- args->ar_quota_quantum = 60;
- args->ar_errors = GFS2_ERRORS_DEFAULT;
+ if (fc->purpose == FS_CONTEXT_FOR_RECONFIGURE) {
+ struct gfs2_sbd *sdp = fc->root->d_sb->s_fs_info;
+ *args = sdp->sd_args;
+ } else {
+ memset(args, 0, sizeof(*args));
+ args->ar_quota = GFS2_QUOTA_DEFAULT;
+ args->ar_data = GFS2_DATA_DEFAULT;
+ args->ar_commit = 30;
+ args->ar_statfs_quantum = 30;
+ args->ar_quota_quantum = 60;
+ args->ar_errors = GFS2_ERRORS_DEFAULT;
+ }
fc->fs_private = args;
fc->ops = &gfs2_context_ops;
return 0;
}
static const struct fs_context_operations gfs2_meta_context_ops = {
+ .free = gfs2_fc_free,
.get_tree = gfs2_meta_get_tree,
};
unsigned sq_entries;
unsigned sq_mask;
unsigned sq_thread_idle;
+ unsigned cached_sq_dropped;
struct io_uring_sqe *sq_sqes;
struct list_head defer_list;
struct {
unsigned cached_cq_tail;
+ atomic_t cached_cq_overflow;
unsigned cq_entries;
unsigned cq_mask;
struct wait_queue_head cq_wait;
static inline bool __io_sequence_defer(struct io_ring_ctx *ctx,
struct io_kiocb *req)
{
- return req->sequence != ctx->cached_cq_tail + ctx->rings->sq_dropped;
+ return req->sequence != ctx->cached_cq_tail + ctx->cached_sq_dropped
+ + atomic_read(&ctx->cached_cq_overflow);
}
static inline bool io_sequence_defer(struct io_ring_ctx *ctx,
WRITE_ONCE(cqe->res, res);
WRITE_ONCE(cqe->flags, 0);
} else {
- unsigned overflow = READ_ONCE(ctx->rings->cq_overflow);
-
- WRITE_ONCE(ctx->rings->cq_overflow, overflow + 1);
+ WRITE_ONCE(ctx->rings->cq_overflow,
+ atomic_inc_return(&ctx->cached_cq_overflow));
}
}
return READ_ONCE(rings->cq.tail) - READ_ONCE(rings->cq.head);
}
+static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
+{
+ struct io_rings *rings = ctx->rings;
+
+ /* make sure SQ entry isn't read before tail */
+ return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
+}
+
/*
* Find and free completed poll iocbs
*/
mutex_unlock(&ctx->uring_lock);
}
-static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
- long min)
+static int __io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
+ long min)
{
- int iters, ret = 0;
+ int iters = 0, ret = 0;
- /*
- * We disallow the app entering submit/complete with polling, but we
- * still need to lock the ring to prevent racing with polled issue
- * that got punted to a workqueue.
- */
- mutex_lock(&ctx->uring_lock);
-
- iters = 0;
do {
int tmin = 0;
ret = 0;
} while (min && !*nr_events && !need_resched());
+ return ret;
+}
+
+static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
+ long min)
+{
+ int ret;
+
+ /*
+ * We disallow the app entering submit/complete with polling, but we
+ * still need to lock the ring to prevent racing with polled issue
+ * that got punted to a workqueue.
+ */
+ mutex_lock(&ctx->uring_lock);
+ ret = __io_iopoll_check(ctx, nr_events, min);
mutex_unlock(&ctx->uring_lock);
return ret;
}
kiocb->ki_flags |= IOCB_HIPRI;
kiocb->ki_complete = io_complete_rw_iopoll;
+ req->result = 0;
} else {
if (kiocb->ki_flags & IOCB_HIPRI)
return -EINVAL;
static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
{
struct io_ring_ctx *ctx;
- struct io_kiocb *req;
+ struct io_kiocb *req, *prev;
unsigned long flags;
req = container_of(timer, struct io_kiocb, timeout.timer);
atomic_inc(&ctx->cq_timeouts);
spin_lock_irqsave(&ctx->completion_lock, flags);
+ /*
+ * Adjust the reqs sequence before the current one because it
+ * will consume a slot in the cq_ring and the the cq_tail pointer
+ * will be increased, otherwise other timeout reqs may return in
+ * advance without waiting for enough wait_nr.
+ */
+ prev = req;
+ list_for_each_entry_continue_reverse(prev, &ctx->timeout_list, list)
+ prev->sequence++;
list_del(&req->list);
io_cqring_fill_event(ctx, req->user_data, -ETIME);
struct io_ring_ctx *ctx = req->ctx;
struct list_head *entry;
struct timespec64 ts;
+ unsigned span = 0;
if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
return -EINVAL;
if (ctx->cached_sq_head < nxt_sq_head)
tmp += UINT_MAX;
- if (tmp >= tmp_nxt)
+ if (tmp > tmp_nxt)
break;
+
+ /*
+ * Sequence of reqs after the insert one and itself should
+ * be adjusted because each timeout req consumes a slot.
+ */
+ span++;
+ nxt->sequence++;
}
+ req->sequence -= span;
list_add(&req->list, entry);
spin_unlock_irq(&ctx->completion_lock);
}
static int __io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
- struct sqe_submit *s, bool force_nonblock)
+ struct sqe_submit *s)
{
int ret;
- ret = __io_submit_sqe(ctx, req, s, force_nonblock);
+ ret = __io_submit_sqe(ctx, req, s, true);
/*
* We async punt it if the file wasn't marked NOWAIT, or if the file
}
static int io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
- struct sqe_submit *s, bool force_nonblock)
+ struct sqe_submit *s)
{
int ret;
return 0;
}
- return __io_queue_sqe(ctx, req, s, force_nonblock);
+ return __io_queue_sqe(ctx, req, s);
}
static int io_queue_link_head(struct io_ring_ctx *ctx, struct io_kiocb *req,
- struct sqe_submit *s, struct io_kiocb *shadow,
- bool force_nonblock)
+ struct sqe_submit *s, struct io_kiocb *shadow)
{
int ret;
int need_submit = false;
if (!shadow)
- return io_queue_sqe(ctx, req, s, force_nonblock);
+ return io_queue_sqe(ctx, req, s);
/*
* Mark the first IO in link list as DRAIN, let all the following
if (ret) {
if (ret != -EIOCBQUEUED) {
io_free_req(req);
+ __io_free_req(shadow);
io_cqring_add_event(ctx, s->sqe->user_data, ret);
return 0;
}
spin_unlock_irq(&ctx->completion_lock);
if (need_submit)
- return __io_queue_sqe(ctx, req, s, force_nonblock);
+ return __io_queue_sqe(ctx, req, s);
return 0;
}
#define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK)
static void io_submit_sqe(struct io_ring_ctx *ctx, struct sqe_submit *s,
- struct io_submit_state *state, struct io_kiocb **link,
- bool force_nonblock)
+ struct io_submit_state *state, struct io_kiocb **link)
{
struct io_uring_sqe *sqe_copy;
struct io_kiocb *req;
return;
}
+ req->user_data = s->sqe->user_data;
+
/*
* If we already have a head request, queue this one for async
* submittal once the head completes. If we don't have a head but
INIT_LIST_HEAD(&req->link_list);
*link = req;
} else {
- io_queue_sqe(ctx, req, s, force_nonblock);
+ io_queue_sqe(ctx, req, s);
}
}
/* drop invalid entries */
ctx->cached_sq_head++;
- rings->sq_dropped++;
+ ctx->cached_sq_dropped++;
+ WRITE_ONCE(rings->sq_dropped, ctx->cached_sq_dropped);
return false;
}
-static int io_submit_sqes(struct io_ring_ctx *ctx, struct sqe_submit *sqes,
- unsigned int nr, bool has_user, bool mm_fault)
+static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
+ bool has_user, bool mm_fault)
{
struct io_submit_state state, *statep = NULL;
struct io_kiocb *link = NULL;
}
for (i = 0; i < nr; i++) {
+ struct sqe_submit s;
+
+ if (!io_get_sqring(ctx, &s))
+ break;
+
/*
* If previous wasn't linked and we have a linked command,
* that's the end of the chain. Submit the previous link.
*/
if (!prev_was_link && link) {
- io_queue_link_head(ctx, link, &link->submit, shadow_req,
- true);
+ io_queue_link_head(ctx, link, &link->submit, shadow_req);
link = NULL;
shadow_req = NULL;
}
- prev_was_link = (sqes[i].sqe->flags & IOSQE_IO_LINK) != 0;
+ prev_was_link = (s.sqe->flags & IOSQE_IO_LINK) != 0;
- if (link && (sqes[i].sqe->flags & IOSQE_IO_DRAIN)) {
+ if (link && (s.sqe->flags & IOSQE_IO_DRAIN)) {
if (!shadow_req) {
shadow_req = io_get_req(ctx, NULL);
if (unlikely(!shadow_req))
shadow_req->flags |= (REQ_F_IO_DRAIN | REQ_F_SHADOW_DRAIN);
refcount_dec(&shadow_req->refs);
}
- shadow_req->sequence = sqes[i].sequence;
+ shadow_req->sequence = s.sequence;
}
out:
if (unlikely(mm_fault)) {
- io_cqring_add_event(ctx, sqes[i].sqe->user_data,
+ io_cqring_add_event(ctx, s.sqe->user_data,
-EFAULT);
} else {
- sqes[i].has_user = has_user;
- sqes[i].needs_lock = true;
- sqes[i].needs_fixed_file = true;
- io_submit_sqe(ctx, &sqes[i], statep, &link, true);
+ s.has_user = has_user;
+ s.needs_lock = true;
+ s.needs_fixed_file = true;
+ io_submit_sqe(ctx, &s, statep, &link);
submitted++;
}
}
if (link)
- io_queue_link_head(ctx, link, &link->submit, shadow_req, true);
+ io_queue_link_head(ctx, link, &link->submit, shadow_req);
if (statep)
io_submit_state_end(&state);
static int io_sq_thread(void *data)
{
- struct sqe_submit sqes[IO_IOPOLL_BATCH];
struct io_ring_ctx *ctx = data;
struct mm_struct *cur_mm = NULL;
mm_segment_t old_fs;
timeout = inflight = 0;
while (!kthread_should_park()) {
- bool all_fixed, mm_fault = false;
- int i;
+ bool mm_fault = false;
+ unsigned int to_submit;
if (inflight) {
unsigned nr_events = 0;
if (ctx->flags & IORING_SETUP_IOPOLL) {
- io_iopoll_check(ctx, &nr_events, 0);
+ /*
+ * inflight is the count of the maximum possible
+ * entries we submitted, but it can be smaller
+ * if we dropped some of them. If we don't have
+ * poll entries available, then we know that we
+ * have nothing left to poll for. Reset the
+ * inflight count to zero in that case.
+ */
+ mutex_lock(&ctx->uring_lock);
+ if (!list_empty(&ctx->poll_list))
+ __io_iopoll_check(ctx, &nr_events, 0);
+ else
+ inflight = 0;
+ mutex_unlock(&ctx->uring_lock);
} else {
/*
* Normal IO, just pretend everything completed.
timeout = jiffies + ctx->sq_thread_idle;
}
- if (!io_get_sqring(ctx, &sqes[0])) {
+ to_submit = io_sqring_entries(ctx);
+ if (!to_submit) {
/*
* We're polling. If we're within the defined idle
* period, then let us spin without work before going
/* make sure to read SQ tail after writing flags */
smp_mb();
- if (!io_get_sqring(ctx, &sqes[0])) {
+ to_submit = io_sqring_entries(ctx);
+ if (!to_submit) {
if (kthread_should_park()) {
finish_wait(&ctx->sqo_wait, &wait);
break;
ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
}
- i = 0;
- all_fixed = true;
- do {
- if (all_fixed && io_sqe_needs_user(sqes[i].sqe))
- all_fixed = false;
-
- i++;
- if (i == ARRAY_SIZE(sqes))
- break;
- } while (io_get_sqring(ctx, &sqes[i]));
-
/* Unless all new commands are FIXED regions, grab mm */
- if (!all_fixed && !cur_mm) {
+ if (!cur_mm) {
mm_fault = !mmget_not_zero(ctx->sqo_mm);
if (!mm_fault) {
use_mm(ctx->sqo_mm);
}
}
- inflight += io_submit_sqes(ctx, sqes, i, cur_mm != NULL,
- mm_fault);
+ to_submit = min(to_submit, ctx->sq_entries);
+ inflight += io_submit_sqes(ctx, to_submit, cur_mm != NULL,
+ mm_fault);
/* Commit SQ ring head once we've consumed all SQEs */
io_commit_sqring(ctx);
return 0;
}
-static int io_ring_submit(struct io_ring_ctx *ctx, unsigned int to_submit,
- bool block_for_last)
+static int io_ring_submit(struct io_ring_ctx *ctx, unsigned int to_submit)
{
struct io_submit_state state, *statep = NULL;
struct io_kiocb *link = NULL;
}
for (i = 0; i < to_submit; i++) {
- bool force_nonblock = true;
struct sqe_submit s;
if (!io_get_sqring(ctx, &s))
* that's the end of the chain. Submit the previous link.
*/
if (!prev_was_link && link) {
- io_queue_link_head(ctx, link, &link->submit, shadow_req,
- force_nonblock);
+ io_queue_link_head(ctx, link, &link->submit, shadow_req);
link = NULL;
shadow_req = NULL;
}
s.needs_lock = false;
s.needs_fixed_file = false;
submit++;
-
- /*
- * The caller will block for events after submit, submit the
- * last IO non-blocking. This is either the only IO it's
- * submitting, or it already submitted the previous ones. This
- * improves performance by avoiding an async punt that we don't
- * need to do.
- */
- if (block_for_last && submit == to_submit)
- force_nonblock = false;
-
- io_submit_sqe(ctx, &s, statep, &link, force_nonblock);
+ io_submit_sqe(ctx, &s, statep, &link);
}
- io_commit_sqring(ctx);
if (link)
- io_queue_link_head(ctx, link, &link->submit, shadow_req,
- !block_for_last);
+ io_queue_link_head(ctx, link, &link->submit, shadow_req);
if (statep)
io_submit_state_end(statep);
+ io_commit_sqring(ctx);
+
return submit;
}
wake_up(&ctx->sqo_wait);
submitted = to_submit;
} else if (to_submit) {
- bool block_for_last = false;
-
to_submit = min(to_submit, ctx->sq_entries);
- /*
- * Allow last submission to block in a series, IFF the caller
- * asked to wait for events and we don't currently have
- * enough. This potentially avoids an async punt.
- */
- if (to_submit == min_complete &&
- io_cqring_events(ctx->rings) < min_complete)
- block_for_last = true;
-
mutex_lock(&ctx->uring_lock);
- submitted = io_ring_submit(ctx, to_submit, block_for_last);
+ submitted = io_ring_submit(ctx, to_submit);
mutex_unlock(&ctx->uring_lock);
}
if (flags & IORING_ENTER_GETEVENTS) {
if (ret)
goto err;
- ret = io_uring_get_fd(ctx);
- if (ret < 0)
- goto err;
-
memset(&p->sq_off, 0, sizeof(p->sq_off));
p->sq_off.head = offsetof(struct io_rings, sq.head);
p->sq_off.tail = offsetof(struct io_rings, sq.tail);
p->cq_off.overflow = offsetof(struct io_rings, cq_overflow);
p->cq_off.cqes = offsetof(struct io_rings, cqes);
+ /*
+ * Install ring fd as the very last thing, so we don't risk someone
+ * having closed it before we finish setup
+ */
+ ret = io_uring_get_fd(ctx);
+ if (ret < 0)
+ goto err;
+
p->features = IORING_FEAT_SINGLE_MMAP;
return ret;
err:
return false;
}
+struct nfs_delegation *nfs4_get_valid_delegation(const struct inode *inode)
+{
+ struct nfs_delegation *delegation;
+
+ delegation = rcu_dereference(NFS_I(inode)->delegation);
+ if (nfs4_is_valid_delegation(delegation, 0))
+ return delegation;
+ return NULL;
+}
+
static int
nfs4_do_check_delegation(struct inode *inode, fmode_t flags, bool mark)
{
if (delegation != NULL &&
nfs4_stateid_match_other(dst, &delegation->stateid)) {
dst->seqid = delegation->stateid.seqid;
- return ret;
+ ret = true;
}
rcu_read_unlock();
out:
bool nfs4_copy_delegation_stateid(struct inode *inode, fmode_t flags, nfs4_stateid *dst, const struct cred **cred);
bool nfs4_refresh_delegation_stateid(nfs4_stateid *dst, struct inode *inode);
+struct nfs_delegation *nfs4_get_valid_delegation(const struct inode *inode);
void nfs_mark_delegation_referenced(struct nfs_delegation *delegation);
int nfs4_have_delegation(struct inode *inode, fmode_t flags);
int nfs4_check_delegation(struct inode *inode, fmode_t flags);
return 0;
if ((delegation->type & fmode) != fmode)
return 0;
- if (test_bit(NFS_DELEGATION_RETURNING, &delegation->flags))
- return 0;
switch (claim) {
case NFS4_OPEN_CLAIM_NULL:
case NFS4_OPEN_CLAIM_FH:
static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata)
{
struct nfs4_state *state = opendata->state;
- struct nfs_inode *nfsi = NFS_I(state->inode);
struct nfs_delegation *delegation;
int open_mode = opendata->o_arg.open_flags;
fmode_t fmode = opendata->o_arg.fmode;
}
spin_unlock(&state->owner->so_lock);
rcu_read_lock();
- delegation = rcu_dereference(nfsi->delegation);
+ delegation = nfs4_get_valid_delegation(state->inode);
if (!can_open_delegated(delegation, fmode, claim)) {
rcu_read_unlock();
break;
data->o_arg.open_flags, claim))
goto out_no_action;
rcu_read_lock();
- delegation = rcu_dereference(NFS_I(data->state->inode)->delegation);
+ delegation = nfs4_get_valid_delegation(data->state->inode);
if (can_open_delegated(delegation, data->o_arg.fmode, claim))
goto unlock_no_action;
rcu_read_unlock();
struct acpi_processor_limit limit;
struct thermal_cooling_device *cdev;
struct device *dev; /* Processor device. */
- struct dev_pm_qos_request perflib_req;
- struct dev_pm_qos_request thermal_req;
+ struct freq_qos_request perflib_req;
+ struct freq_qos_request thermal_req;
};
struct acpi_processor_errata {
#ifdef CONFIG_CPU_FREQ
extern bool acpi_processor_cpufreq_init;
void acpi_processor_ignore_ppc_init(void);
-void acpi_processor_ppc_init(int cpu);
-void acpi_processor_ppc_exit(int cpu);
+void acpi_processor_ppc_init(struct cpufreq_policy *policy);
+void acpi_processor_ppc_exit(struct cpufreq_policy *policy);
void acpi_processor_ppc_has_changed(struct acpi_processor *pr, int event_flag);
extern int acpi_processor_get_bios_limit(int cpu, unsigned int *limit);
#else
{
return;
}
-static inline void acpi_processor_ppc_init(int cpu)
+static inline void acpi_processor_ppc_init(struct cpufreq_policy *policy)
{
return;
}
-static inline void acpi_processor_ppc_exit(int cpu)
+static inline void acpi_processor_ppc_exit(struct cpufreq_policy *policy)
{
return;
}
int acpi_processor_get_limit_info(struct acpi_processor *pr);
extern const struct thermal_cooling_device_ops processor_cooling_ops;
#if defined(CONFIG_ACPI_CPU_FREQ_PSS) & defined(CONFIG_CPU_FREQ)
-void acpi_thermal_cpufreq_init(int cpu);
-void acpi_thermal_cpufreq_exit(int cpu);
+void acpi_thermal_cpufreq_init(struct cpufreq_policy *policy);
+void acpi_thermal_cpufreq_exit(struct cpufreq_policy *policy);
#else
-static inline void acpi_thermal_cpufreq_init(int cpu)
+static inline void acpi_thermal_cpufreq_init(struct cpufreq_policy *policy)
{
return;
}
-static inline void acpi_thermal_cpufreq_exit(int cpu)
+static inline void acpi_thermal_cpufreq_exit(struct cpufreq_policy *policy)
{
return;
}
#define MAX_BPF_CGROUP_STORAGE_TYPE __BPF_CGROUP_STORAGE_MAX
+/* The longest tracepoint has 12 args.
+ * See include/trace/bpf_probe.h
+ */
+#define MAX_BPF_FUNC_ARGS 12
+
struct bpf_prog_stats {
u64 cnt;
u64 nsecs;
}
#endif
-#if defined(CONFIG_XDP_SOCKETS)
-struct xdp_sock;
-struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map, u32 key);
-int __xsk_map_redirect(struct bpf_map *map, struct xdp_buff *xdp,
- struct xdp_sock *xs);
-void __xsk_map_flush(struct bpf_map *map);
-#else
-struct xdp_sock;
-static inline struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map,
- u32 key)
-{
- return NULL;
-}
-
-static inline int __xsk_map_redirect(struct bpf_map *map, struct xdp_buff *xdp,
- struct xdp_sock *xs)
-{
- return -EOPNOTSUPP;
-}
-
-static inline void __xsk_map_flush(struct bpf_map *map)
-{
-}
-#endif
-
#if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL)
void bpf_sk_reuseport_detach(struct sock *sk);
int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key,
BPF_PROG_TYPE(BPF_PROG_TYPE_PERF_EVENT, perf_event)
BPF_PROG_TYPE(BPF_PROG_TYPE_RAW_TRACEPOINT, raw_tracepoint)
BPF_PROG_TYPE(BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE, raw_tracepoint_writable)
+BPF_PROG_TYPE(BPF_PROG_TYPE_TRACING, tracing)
#endif
#ifdef CONFIG_CGROUP_BPF
BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_DEVICE, cg_dev)
#include <linux/completion.h>
#include <linux/kobject.h>
#include <linux/notifier.h>
+#include <linux/pm_qos.h>
#include <linux/spinlock.h>
#include <linux/sysfs.h>
struct work_struct update; /* if update_policy() needs to be
* called, but you're in IRQ context */
- struct dev_pm_qos_request *min_freq_req;
- struct dev_pm_qos_request *max_freq_req;
+ struct freq_constraints constraints;
+ struct freq_qos_request *min_freq_req;
+ struct freq_qos_request *max_freq_req;
+
struct cpufreq_frequency_table *freq_table;
enum cpufreq_table_sorting freq_table_sorted;
do { if (0) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); } while (0)
#define dynamic_dev_dbg(dev, fmt, ...) \
do { if (0) dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); } while (0)
+#define dynamic_hex_dump(prefix_str, prefix_type, rowsize, \
+ groupsize, buf, len, ascii) \
+ do { if (0) \
+ print_hex_dump(KERN_DEBUG, prefix_str, prefix_type, \
+ rowsize, groupsize, buf, len, ascii); \
+ } while (0)
#endif
#endif
efi_status_t efi_get_memory_map(efi_system_table_t *sys_table_arg,
struct efi_boot_memmap *map);
+efi_status_t efi_low_alloc_above(efi_system_table_t *sys_table_arg,
+ unsigned long size, unsigned long align,
+ unsigned long *addr, unsigned long min);
+
+static inline
efi_status_t efi_low_alloc(efi_system_table_t *sys_table_arg,
unsigned long size, unsigned long align,
- unsigned long *addr);
+ unsigned long *addr)
+{
+ /*
+ * Don't allocate at 0x0. It will confuse code that
+ * checks pointers against NULL. Skip the first 8
+ * bytes so we start at a nice even number.
+ */
+ return efi_low_alloc_above(sys_table_arg, size, align, addr, 0x8);
+}
efi_status_t efi_high_alloc(efi_system_table_t *sys_table_arg,
unsigned long size, unsigned long align,
unsigned long image_size,
unsigned long alloc_size,
unsigned long preferred_addr,
- unsigned long alignment);
+ unsigned long alignment,
+ unsigned long min_addr);
efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg,
efi_loaded_image_t *image,
#define THIS_MODULE ((struct module *)0)
#endif
-#define NS_SEPARATOR "."
-
#ifdef CONFIG_MODVERSIONS
/* Mark the CRC weak since genksyms apparently decides not to
* generate a checksums for some symbols */
* absolute relocations that require runtime processing on relocatable
* kernels.
*/
-#define __KSYMTAB_ENTRY_NS(sym, sec, ns) \
+#define __KSYMTAB_ENTRY_NS(sym, sec) \
__ADDRESSABLE(sym) \
asm(" .section \"___ksymtab" sec "+" #sym "\", \"a\" \n" \
" .balign 4 \n" \
- "__ksymtab_" #ns NS_SEPARATOR #sym ": \n" \
+ "__ksymtab_" #sym ": \n" \
" .long " #sym "- . \n" \
" .long __kstrtab_" #sym "- . \n" \
" .long __kstrtabns_" #sym "- . \n" \
int namespace_offset;
};
#else
-#define __KSYMTAB_ENTRY_NS(sym, sec, ns) \
- static const struct kernel_symbol __ksymtab_##sym##__##ns \
- asm("__ksymtab_" #ns NS_SEPARATOR #sym) \
+#define __KSYMTAB_ENTRY_NS(sym, sec) \
+ static const struct kernel_symbol __ksymtab_##sym \
__attribute__((section("___ksymtab" sec "+" #sym), used)) \
__aligned(sizeof(void *)) \
= { (unsigned long)&sym, __kstrtab_##sym, __kstrtabns_##sym }
#define __KSYMTAB_ENTRY(sym, sec) \
static const struct kernel_symbol __ksymtab_##sym \
- asm("__ksymtab_" #sym) \
__attribute__((section("___ksymtab" sec "+" #sym), used)) \
__aligned(sizeof(void *)) \
= { (unsigned long)&sym, __kstrtab_##sym, NULL }
static const char __kstrtabns_##sym[] \
__attribute__((section("__ksymtab_strings"), used, aligned(1))) \
= #ns; \
- __KSYMTAB_ENTRY_NS(sym, sec, ns)
+ __KSYMTAB_ENTRY_NS(sym, sec)
#define ___EXPORT_SYMBOL(sym, sec) \
___export_symbol_common(sym, sec); \
#endif /* CONFIG_BPF_JIT */
-void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp);
void bpf_prog_kallsyms_del_all(struct bpf_prog *fp);
#define BPF_ANC BIT(15)
return !!(gfp_flags & __GFP_DIRECT_RECLAIM);
}
+/**
+ * gfpflags_normal_context - is gfp_flags a normal sleepable context?
+ * @gfp_flags: gfp_flags to test
+ *
+ * Test whether @gfp_flags indicates that the allocation is from the
+ * %current context and allowed to sleep.
+ *
+ * An allocation being allowed to block doesn't mean it owns the %current
+ * context. When direct reclaim path tries to allocate memory, the
+ * allocation context is nested inside whatever %current was doing at the
+ * time of the original allocation. The nested allocation may be allowed
+ * to block but modifying anything %current owns can corrupt the outer
+ * context's expectations.
+ *
+ * %true result from this function indicates that the allocation context
+ * can sleep and use anything that's associated with %current.
+ */
+static inline bool gfpflags_normal_context(const gfp_t gfp_flags)
+{
+ return (gfp_flags & (__GFP_DIRECT_RECLAIM | __GFP_MEMALLOC)) ==
+ __GFP_DIRECT_RECLAIM;
+}
+
#ifdef CONFIG_HIGHMEM
#define OPT_ZONE_HIGHMEM ZONE_HIGHMEM
#else
netdev_features_t set_features;
enum macvlan_mode mode;
u16 flags;
- int nest_level;
unsigned int macaddr_count;
#ifdef CONFIG_NET_POLL_CONTROLLER
struct netpoll *netpoll;
atomic_t count_pending;
struct delayed_work dw;
} mcast_rejoin;
+ struct lock_class_key team_lock_key;
long mode_priv[TEAM_MODE_PRIV_LONGS];
};
#ifdef CONFIG_NET_POLL_CONTROLLER
struct netpoll *netpoll;
#endif
- unsigned int nest_level;
};
static inline struct vlan_dev_priv *vlan_dev_priv(const struct net_device *dev)
extern bool vlan_uses_dev(const struct net_device *dev);
-static inline int vlan_get_encap_level(struct net_device *dev)
-{
- BUG_ON(!is_vlan_dev(dev));
- return vlan_dev_priv(dev)->nest_level;
-}
#else
static inline struct net_device *
__vlan_find_dev_deep_rcu(struct net_device *real_dev,
{
return false;
}
-static inline int vlan_get_encap_level(struct net_device *dev)
-{
- BUG();
- return 0;
-}
#endif
/**
};
union mlx5_ifc_dest_format_struct_flow_counter_list_auto_bits {
- struct mlx5_ifc_dest_format_struct_bits dest_format_struct;
+ struct mlx5_ifc_extended_dest_format_bits extended_dest_format;
struct mlx5_ifc_flow_counter_list_bits flow_counter_list;
- u8 reserved_at_0[0x40];
};
struct mlx5_ifc_fte_match_param_bits {
void (*ndo_dfwd_del_station)(struct net_device *pdev,
void *priv);
- int (*ndo_get_lock_subclass)(struct net_device *dev);
int (*ndo_set_tx_maxrate)(struct net_device *dev,
int queue_index,
u32 maxrate);
* @perm_addr: Permanent hw address
* @addr_assign_type: Hw address assignment type
* @addr_len: Hardware address length
+ * @upper_level: Maximum depth level of upper devices.
+ * @lower_level: Maximum depth level of lower devices.
* @neigh_priv_len: Used in neigh_alloc()
* @dev_id: Used to differentiate devices that share
* the same link layer address
* @phydev: Physical device may attach itself
* for hardware timestamping
* @sfp_bus: attached &struct sfp_bus structure.
- *
- * @qdisc_tx_busylock: lockdep class annotating Qdisc->busylock spinlock
- * @qdisc_running_key: lockdep class annotating Qdisc->running seqcount
+ * @qdisc_tx_busylock_key: lockdep class annotating Qdisc->busylock
+ spinlock
+ * @qdisc_running_key: lockdep class annotating Qdisc->running seqcount
+ * @qdisc_xmit_lock_key: lockdep class annotating
+ * netdev_queue->_xmit_lock spinlock
+ * @addr_list_lock_key: lockdep class annotating
+ * net_device->addr_list_lock spinlock
*
* @proto_down: protocol port state information can be sent to the
* switch driver and used to set the phys state of the
unsigned char perm_addr[MAX_ADDR_LEN];
unsigned char addr_assign_type;
unsigned char addr_len;
+ unsigned char upper_level;
+ unsigned char lower_level;
unsigned short neigh_priv_len;
unsigned short dev_id;
unsigned short dev_port;
#endif
struct phy_device *phydev;
struct sfp_bus *sfp_bus;
- struct lock_class_key *qdisc_tx_busylock;
- struct lock_class_key *qdisc_running_key;
+ struct lock_class_key qdisc_tx_busylock_key;
+ struct lock_class_key qdisc_running_key;
+ struct lock_class_key qdisc_xmit_lock_key;
+ struct lock_class_key addr_list_lock_key;
bool proto_down;
unsigned wol_enabled:1;
};
f(dev, &dev->_tx[i], arg);
}
-#define netdev_lockdep_set_classes(dev) \
-{ \
- static struct lock_class_key qdisc_tx_busylock_key; \
- static struct lock_class_key qdisc_running_key; \
- static struct lock_class_key qdisc_xmit_lock_key; \
- static struct lock_class_key dev_addr_list_lock_key; \
- unsigned int i; \
- \
- (dev)->qdisc_tx_busylock = &qdisc_tx_busylock_key; \
- (dev)->qdisc_running_key = &qdisc_running_key; \
- lockdep_set_class(&(dev)->addr_list_lock, \
- &dev_addr_list_lock_key); \
- for (i = 0; i < (dev)->num_tx_queues; i++) \
- lockdep_set_class(&(dev)->_tx[i]._xmit_lock, \
- &qdisc_xmit_lock_key); \
-}
-
u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev);
struct netdev_queue *netdev_core_pick_tx(struct net_device *dev,
}
void netif_tx_stop_all_queues(struct net_device *dev);
+void netdev_update_lockdep_key(struct net_device *dev);
static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
{
spin_lock(&dev->addr_list_lock);
}
-static inline void netif_addr_lock_nested(struct net_device *dev)
-{
- int subclass = SINGLE_DEPTH_NESTING;
-
- if (dev->netdev_ops->ndo_get_lock_subclass)
- subclass = dev->netdev_ops->ndo_get_lock_subclass(dev);
-
- spin_lock_nested(&dev->addr_list_lock, subclass);
-}
-
static inline void netif_addr_lock_bh(struct net_device *dev)
{
spin_lock_bh(&dev->addr_list_lock);
struct netlink_ext_ack *extack);
void netdev_upper_dev_unlink(struct net_device *dev,
struct net_device *upper_dev);
+int netdev_adjacent_change_prepare(struct net_device *old_dev,
+ struct net_device *new_dev,
+ struct net_device *dev,
+ struct netlink_ext_ack *extack);
+void netdev_adjacent_change_commit(struct net_device *old_dev,
+ struct net_device *new_dev,
+ struct net_device *dev);
+void netdev_adjacent_change_abort(struct net_device *old_dev,
+ struct net_device *new_dev,
+ struct net_device *dev);
void netdev_adjacent_rename_links(struct net_device *dev, char *oldname);
void *netdev_lower_dev_get_private(struct net_device *dev,
struct net_device *lower_dev);
extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN] __read_mostly;
void netdev_rss_key_fill(void *buffer, size_t len);
-int dev_get_nest_level(struct net_device *dev);
int skb_checksum_help(struct sk_buff *skb);
int skb_crc32c_csum_help(struct sk_buff *skb);
int skb_csum_hwoffload_help(struct sk_buff *skb,
* -EBUSY -- @event is for this PMU but PMU temporarily unavailable
* -EINVAL -- @event is for this PMU but @event is not valid
* -EOPNOTSUPP -- @event is for this PMU, @event is valid, but not supported
- * -EACCESS -- @event is for this PMU, @event is valid, but no privilidges
+ * -EACCES -- @event is for this PMU, @event is valid, but no privileges
*
* 0 -- @event is for this PMU and valid
*
/* End of v2 array */
s32 zcanfd_2_mcu_addr;
s32 zqspi_2_mcu_addr;
+ s32 mcu_2_ecspi_addr;
/* End of v3 array */
+ s32 mcu_2_zqspi_addr;
+ /* End of v4 array */
};
/**
#define PM_QOS_RESUME_LATENCY_NO_CONSTRAINT PM_QOS_LATENCY_ANY
#define PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS PM_QOS_LATENCY_ANY_NS
#define PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE 0
-#define PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE 0
-#define PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE (-1)
#define PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT (-1)
#define PM_QOS_FLAG_NO_POWER_OFF (1 << 0)
enum dev_pm_qos_req_type {
DEV_PM_QOS_RESUME_LATENCY = 1,
DEV_PM_QOS_LATENCY_TOLERANCE,
- DEV_PM_QOS_MIN_FREQUENCY,
- DEV_PM_QOS_MAX_FREQUENCY,
DEV_PM_QOS_FLAGS,
};
struct dev_pm_qos {
struct pm_qos_constraints resume_latency;
struct pm_qos_constraints latency_tolerance;
- struct pm_qos_constraints min_frequency;
- struct pm_qos_constraints max_frequency;
struct pm_qos_flags flags;
struct dev_pm_qos_request *resume_latency_req;
struct dev_pm_qos_request *latency_tolerance_req;
struct dev_pm_qos_request *flags_req;
- struct dev_pm_qos_request *min_frequency_req;
- struct dev_pm_qos_request *max_frequency_req;
};
/* Action requested to pm_qos_update_target */
switch (type) {
case DEV_PM_QOS_RESUME_LATENCY:
return PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
- case DEV_PM_QOS_MIN_FREQUENCY:
- return PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE;
- case DEV_PM_QOS_MAX_FREQUENCY:
- return PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE;
default:
WARN_ON(1);
return 0;
}
#endif
+#define FREQ_QOS_MIN_DEFAULT_VALUE 0
+#define FREQ_QOS_MAX_DEFAULT_VALUE (-1)
+
+enum freq_qos_req_type {
+ FREQ_QOS_MIN = 1,
+ FREQ_QOS_MAX,
+};
+
+struct freq_constraints {
+ struct pm_qos_constraints min_freq;
+ struct blocking_notifier_head min_freq_notifiers;
+ struct pm_qos_constraints max_freq;
+ struct blocking_notifier_head max_freq_notifiers;
+};
+
+struct freq_qos_request {
+ enum freq_qos_req_type type;
+ struct plist_node pnode;
+ struct freq_constraints *qos;
+};
+
+static inline int freq_qos_request_active(struct freq_qos_request *req)
+{
+ return !IS_ERR_OR_NULL(req->qos);
+}
+
+void freq_constraints_init(struct freq_constraints *qos);
+
+s32 freq_qos_read_value(struct freq_constraints *qos,
+ enum freq_qos_req_type type);
+
+int freq_qos_add_request(struct freq_constraints *qos,
+ struct freq_qos_request *req,
+ enum freq_qos_req_type type, s32 value);
+int freq_qos_update_request(struct freq_qos_request *req, s32 new_value);
+int freq_qos_remove_request(struct freq_qos_request *req);
+
+int freq_qos_add_notifier(struct freq_constraints *qos,
+ enum freq_qos_req_type type,
+ struct notifier_block *notifier);
+int freq_qos_remove_notifier(struct freq_constraints *qos,
+ enum freq_qos_req_type type,
+ struct notifier_block *notifier);
+
#endif
LOCKDOWN_NONE,
LOCKDOWN_MODULE_SIGNATURE,
LOCKDOWN_DEV_MEM,
+ LOCKDOWN_EFI_TEST,
LOCKDOWN_KEXEC,
LOCKDOWN_HIBERNATION,
LOCKDOWN_PCI_ACCESS,
return skb->hash;
}
-__u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb);
+__u32 skb_get_hash_perturb(const struct sk_buff *skb,
+ const siphash_key_t *perturb);
static inline __u32 skb_get_hash_raw(const struct sk_buff *skb)
{
}
/**
+ * skb_queue_empty_lockless - check if a queue is empty
+ * @list: queue head
+ *
+ * Returns true if the queue is empty, false otherwise.
+ * This variant can be used in lockless contexts.
+ */
+static inline bool skb_queue_empty_lockless(const struct sk_buff_head *list)
+{
+ return READ_ONCE(list->next) == (const struct sk_buff *) list;
+}
+
+
+/**
* skb_queue_is_last - check if skb is the last entry in the queue
* @list: queue head
* @skb: buffer
struct sk_buff *prev, struct sk_buff *next,
struct sk_buff_head *list)
{
- newsk->next = next;
- newsk->prev = prev;
- next->prev = prev->next = newsk;
+ /* see skb_queue_empty_lockless() for the opposite READ_ONCE() */
+ WRITE_ONCE(newsk->next, next);
+ WRITE_ONCE(newsk->prev, prev);
+ WRITE_ONCE(next->prev, newsk);
+ WRITE_ONCE(prev->next, newsk);
list->qlen++;
}
struct sk_buff *first = list->next;
struct sk_buff *last = list->prev;
- first->prev = prev;
- prev->next = first;
+ WRITE_ONCE(first->prev, prev);
+ WRITE_ONCE(prev->next, first);
- last->next = next;
- next->prev = last;
+ WRITE_ONCE(last->next, next);
+ WRITE_ONCE(next->prev, last);
}
/**
next = skb->next;
prev = skb->prev;
skb->next = skb->prev = NULL;
- next->prev = prev;
- prev->next = next;
+ WRITE_ONCE(next->prev, prev);
+ WRITE_ONCE(prev->next, next);
}
/**
#define PF_MAX AF_MAX
/* Maximum queue length specifiable by listen. */
-#define SOMAXCONN 128
+#define SOMAXCONN 4096
/* Flags we can use with send/ and recv.
Added those for 1003.1g not all are supported yet
return 0;
}
+static inline void xprt_destroy_backchannel(struct rpc_xprt *xprt,
+ unsigned int max_reqs)
+{
+}
+
static inline bool svc_is_backchannel(const struct svc_rqst *rqstp)
{
return false;
.size = _size, \
}
-#define __BIN_ATTR_WO(_name) { \
+#define __BIN_ATTR_WO(_name, _size) { \
.attr = { .name = __stringify(_name), .mode = 0200 }, \
- .store = _name##_store, \
+ .write = _name##_write, \
.size = _size, \
}
* happens, handle that and return -EFAULT.
*/
extern long probe_kernel_read(void *dst, const void *src, size_t size);
+extern long probe_kernel_read_strict(void *dst, const void *src, size_t size);
extern long __probe_kernel_read(void *dst, const void *src, size_t size);
/*
extern long notrace probe_kernel_write(void *dst, const void *src, size_t size);
extern long notrace __probe_kernel_write(void *dst, const void *src, size_t size);
+/*
+ * probe_user_write(): safely attempt to write to a location in user space
+ * @dst: address to write to
+ * @src: pointer to the data that shall be written
+ * @size: size of the data chunk
+ *
+ * Safely write to address @dst from the buffer at @src. If a kernel fault
+ * happens, handle that and return -EFAULT.
+ */
+extern long notrace probe_user_write(void __user *dst, const void *src, size_t size);
+extern long notrace __probe_user_write(void __user *dst, const void *src, size_t size);
+
extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count);
+extern long strncpy_from_unsafe_strict(char *dst, const void *unsafe_addr,
+ long count);
+extern long __strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count);
extern long strncpy_from_unsafe_user(char *dst, const void __user *unsafe_addr,
long count);
extern long strnlen_unsafe_user(const void __user *unsafe_addr, long count);
struct virtio_vsock_pkt {
struct virtio_vsock_hdr hdr;
- struct work_struct work;
struct list_head list;
/* socket refcnt not held, only use for cancellation */
struct vsock_sock *vsk;
struct slave __rcu *primary_slave;
struct bond_up_slave __rcu *slave_arr; /* Array of usable slaves */
bool force_primary;
- u32 nest_level;
s32 slave_cnt; /* never change this value outside the attach/detach wrappers */
int (*recv_probe)(const struct sk_buff *, struct bonding *,
struct slave *);
struct dentry *debug_dir;
#endif /* CONFIG_DEBUG_FS */
struct rtnl_link_stats64 bond_stats;
+ struct lock_class_key stats_lock_key;
};
#define bond_slave_get_rcu(dev) \
static inline void sk_mark_napi_id(struct sock *sk, const struct sk_buff *skb)
{
#ifdef CONFIG_NET_RX_BUSY_POLL
- sk->sk_napi_id = skb->napi_id;
+ WRITE_ONCE(sk->sk_napi_id, skb->napi_id);
#endif
sk_rx_queue_set(sk, skb);
}
const struct sk_buff *skb)
{
#ifdef CONFIG_NET_RX_BUSY_POLL
- if (!sk->sk_napi_id)
- sk->sk_napi_id = skb->napi_id;
+ if (!READ_ONCE(sk->sk_napi_id))
+ WRITE_ONCE(sk->sk_napi_id, skb->napi_id);
#endif
}
#include <linux/types.h>
#include <linux/in6.h>
+#include <linux/siphash.h>
#include <uapi/linux/if_ether.h>
struct sk_buff;
struct flow_keys {
struct flow_dissector_key_control control;
#define FLOW_KEYS_HASH_START_FIELD basic
- struct flow_dissector_key_basic basic;
+ struct flow_dissector_key_basic basic __aligned(SIPHASH_ALIGNMENT);
struct flow_dissector_key_tags tags;
struct flow_dissector_key_vlan vlan;
struct flow_dissector_key_vlan cvlan;
struct list_head backlogs;
spinlock_t lock;
u32 flows_cnt;
- u32 perturbation;
+ siphash_key_t perturbation;
u32 limit;
u32 memory_limit;
u32 memory_usage;
static u32 fq_flow_idx(struct fq *fq, struct sk_buff *skb)
{
- u32 hash = skb_get_hash_perturb(skb, fq->perturbation);
+ u32 hash = skb_get_hash_perturb(skb, &fq->perturbation);
return reciprocal_scale(hash, fq->flows_cnt);
}
INIT_LIST_HEAD(&fq->backlogs);
spin_lock_init(&fq->lock);
fq->flows_cnt = max_t(u32, flows_cnt, 1);
- fq->perturbation = prandom_u32();
+ get_random_bytes(&fq->perturbation, sizeof(fq->perturbation));
fq->quantum = 300;
fq->limit = 8192;
fq->memory_limit = 16 << 20; /* 16 MBytes */
int hwbm_pool_refill(struct hwbm_pool *bm_pool, gfp_t gfp);
int hwbm_pool_add(struct hwbm_pool *bm_pool, unsigned int buf_num);
#else
-void hwbm_buf_free(struct hwbm_pool *bm_pool, void *buf) {}
-int hwbm_pool_refill(struct hwbm_pool *bm_pool, gfp_t gfp) { return 0; }
-int hwbm_pool_add(struct hwbm_pool *bm_pool, unsigned int buf_num)
+static inline void hwbm_buf_free(struct hwbm_pool *bm_pool, void *buf) {}
+
+static inline int hwbm_pool_refill(struct hwbm_pool *bm_pool, gfp_t gfp)
+{ return 0; }
+
+static inline int hwbm_pool_add(struct hwbm_pool *bm_pool,
+ unsigned int buf_num)
{ return 0; }
#endif /* CONFIG_HWBM */
#endif /* _HWBM_H */
}
struct ip_frag_state {
- struct iphdr *iph;
+ bool DF;
unsigned int hlen;
unsigned int ll_rs;
unsigned int mtu;
};
void ip_frag_init(struct sk_buff *skb, unsigned int hlen, unsigned int ll_rs,
- unsigned int mtu, struct ip_frag_state *state);
+ unsigned int mtu, bool DF, struct ip_frag_state *state);
struct sk_buff *ip_frag_next(struct sk_buff *skb,
struct ip_frag_state *state);
struct delayed_work defense_work; /* Work handler */
int drop_rate;
int drop_counter;
+ int old_secure_tcp;
atomic_t dropentry;
/* locks in ctl.c */
spinlock_t dropentry_lock; /* drop entry handling */
#define __net_initconst __initconst
#endif
-int peernet2id_alloc(struct net *net, struct net *peer);
+int peernet2id_alloc(struct net *net, struct net *peer, gfp_t gfp);
int peernet2id(struct net *net, struct net *peer);
bool peernet_has_id(struct net *net, struct net *peer);
struct net *get_net_ns_by_id(struct net *net, int id);
{
int cpu = raw_smp_processor_id();
- if (unlikely(sk->sk_incoming_cpu != cpu))
- sk->sk_incoming_cpu = cpu;
+ if (unlikely(READ_ONCE(sk->sk_incoming_cpu) != cpu))
+ WRITE_ONCE(sk->sk_incoming_cpu, cpu);
}
static inline void sock_rps_record_flow_hash(__u32 hash)
* sk_page_frag - return an appropriate page_frag
* @sk: socket
*
- * If socket allocation mode allows current thread to sleep, it means its
- * safe to use the per task page_frag instead of the per socket one.
+ * Use the per task page_frag instead of the per socket one for
+ * optimization when we know that we're in the normal context and owns
+ * everything that's associated with %current.
+ *
+ * gfpflags_allow_blocking() isn't enough here as direct reclaim may nest
+ * inside other socket operations and end up recursing into sk_page_frag()
+ * while it's already in use.
*/
static inline struct page_frag *sk_page_frag(struct sock *sk)
{
- if (gfpflags_allow_blocking(sk->sk_allocation))
+ if (gfpflags_normal_context(sk->sk_allocation))
return ¤t->task_frag;
return &sk->sk_frag;
u8 offloaded:1;
__be32 remote_vni;
u32 remote_ifindex;
+ struct net_device *remote_dev;
struct list_head list;
struct rcu_head rcu;
struct dst_cache dst_cache;
/* Nodes are linked in the struct xdp_sock map_list field, and used to
* track which maps a certain socket reside in.
*/
-struct xsk_map;
+
+struct xsk_map {
+ struct bpf_map map;
+ struct list_head __percpu *flush_list;
+ spinlock_t lock; /* Synchronize map updates */
+ struct xdp_sock *xsk_map[];
+};
+
struct xsk_map_node {
struct list_head node;
struct xsk_map *map;
struct xdp_buff;
#ifdef CONFIG_XDP_SOCKETS
int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp);
-int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp);
-void xsk_flush(struct xdp_sock *xs);
bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs);
/* Used from netdev driver */
bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt);
struct xdp_sock **map_entry);
int xsk_map_inc(struct xsk_map *map);
void xsk_map_put(struct xsk_map *map);
+int __xsk_map_redirect(struct bpf_map *map, struct xdp_buff *xdp,
+ struct xdp_sock *xs);
+void __xsk_map_flush(struct bpf_map *map);
+
+static inline struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map,
+ u32 key)
+{
+ struct xsk_map *m = container_of(map, struct xsk_map, map);
+ struct xdp_sock *xs;
+
+ if (key >= map->max_entries)
+ return NULL;
+
+ xs = READ_ONCE(m->xsk_map[key]);
+ return xs;
+}
static inline u64 xsk_umem_extract_addr(u64 addr)
{
return -ENOTSUPP;
}
-static inline int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
-{
- return -ENOTSUPP;
-}
-
-static inline void xsk_flush(struct xdp_sock *xs)
-{
-}
-
static inline bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs)
{
return false;
return 0;
}
+static inline int __xsk_map_redirect(struct bpf_map *map, struct xdp_buff *xdp,
+ struct xdp_sock *xs)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void __xsk_map_flush(struct bpf_map *map)
+{
+}
+
+static inline struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map,
+ u32 key)
+{
+ return NULL;
+}
#endif /* CONFIG_XDP_SOCKETS */
#endif /* _LINUX_XDP_SOCK_H */
struct ib_cq_init_attr {
unsigned int cqe;
- int comp_vector;
+ u32 comp_vector;
u32 flags;
};
struct link_info *li);
#ifdef DEBUG
-inline void asoc_simple_debug_dai(struct asoc_simple_priv *priv,
- char *name,
- struct asoc_simple_dai *dai)
+static inline void asoc_simple_debug_dai(struct asoc_simple_priv *priv,
+ char *name,
+ struct asoc_simple_dai *dai)
{
struct device *dev = simple_priv_to_dev(priv);
dev_dbg(dev, "%s clk %luHz\n", name, clk_get_rate(dai->clk));
}
-inline void asoc_simple_debug_info(struct asoc_simple_priv *priv)
+static inline void asoc_simple_debug_info(struct asoc_simple_priv *priv)
{
struct snd_soc_card *card = simple_priv_to_card(priv);
struct device *dev = simple_priv_to_dev(priv);
TRACE_EVENT(br_fdb_update,
TP_PROTO(struct net_bridge *br, struct net_bridge_port *source,
- const unsigned char *addr, u16 vid, bool added_by_user),
+ const unsigned char *addr, u16 vid, unsigned long flags),
- TP_ARGS(br, source, addr, vid, added_by_user),
+ TP_ARGS(br, source, addr, vid, flags),
TP_STRUCT__entry(
__string(br_dev, br->dev->name)
__string(dev, source->dev->name)
__array(unsigned char, addr, ETH_ALEN)
__field(u16, vid)
- __field(bool, added_by_user)
+ __field(unsigned long, flags)
),
TP_fast_assign(
__assign_str(dev, source->dev->name);
memcpy(__entry->addr, addr, ETH_ALEN);
__entry->vid = vid;
- __entry->added_by_user = added_by_user;
+ __entry->flags = flags;
),
- TP_printk("br_dev %s source %s addr %02x:%02x:%02x:%02x:%02x:%02x vid %u added_by_user %d",
+ TP_printk("br_dev %s source %s addr %02x:%02x:%02x:%02x:%02x:%02x vid %u flags 0x%lx",
__get_str(br_dev), __get_str(dev), __entry->addr[0],
__entry->addr[1], __entry->addr[2], __entry->addr[3],
__entry->addr[4], __entry->addr[5], __entry->vid,
- __entry->added_by_user)
+ __entry->flags)
);
__entry->qgid = qgroup->qgroupid;
__entry->cur_reserved = qgroup->rsv.values[type];
__entry->diff = diff;
+ __entry->type = type;
),
TP_printk_btrfs("qgid=%llu type=%s cur_reserved=%llu diff=%lld",
TP_fast_assign_btrfs(root->fs_info,
__entry->refroot = root->root_key.objectid;
__entry->diff = diff;
+ __entry->type = type;
),
TP_printk_btrfs("refroot=%llu(%s) type=%s diff=%lld",
TP_STRUCT__entry_btrfs(
__field( u64, refroot )
__field( s64, diff )
- __field( int, type )
),
TP_fast_assign_btrfs(root->fs_info,
BPF_PROG_TYPE_CGROUP_SYSCTL,
BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE,
BPF_PROG_TYPE_CGROUP_SOCKOPT,
+ BPF_PROG_TYPE_TRACING,
};
enum bpf_attach_type {
BPF_CGROUP_UDP6_RECVMSG,
BPF_CGROUP_GETSOCKOPT,
BPF_CGROUP_SETSOCKOPT,
+ BPF_TRACE_RAW_TP,
__MAX_BPF_ATTACH_TYPE
};
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_probe_read(void *dst, u32 size, const void *src)
+ * int bpf_probe_read(void *dst, u32 size, const void *unsafe_ptr)
* Description
* For tracing programs, safely attempt to read *size* bytes from
- * address *src* and store the data in *dst*.
+ * kernel space address *unsafe_ptr* and store the data in *dst*.
+ *
+ * Generally, use bpf_probe_read_user() or bpf_probe_read_kernel()
+ * instead.
* Return
* 0 on success, or a negative error in case of failure.
*
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_probe_read_str(void *dst, int size, const void *unsafe_ptr)
+ * int bpf_probe_read_str(void *dst, u32 size, const void *unsafe_ptr)
* Description
- * Copy a NUL terminated string from an unsafe address
- * *unsafe_ptr* to *dst*. The *size* should include the
- * terminating NUL byte. In case the string length is smaller than
- * *size*, the target is not padded with further NUL bytes. If the
- * string length is larger than *size*, just *size*-1 bytes are
- * copied and the last byte is set to NUL.
- *
- * On success, the length of the copied string is returned. This
- * makes this helper useful in tracing programs for reading
- * strings, and more importantly to get its length at runtime. See
- * the following snippet:
- *
- * ::
- *
- * SEC("kprobe/sys_open")
- * void bpf_sys_open(struct pt_regs *ctx)
- * {
- * char buf[PATHLEN]; // PATHLEN is defined to 256
- * int res = bpf_probe_read_str(buf, sizeof(buf),
- * ctx->di);
- *
- * // Consume buf, for example push it to
- * // userspace via bpf_perf_event_output(); we
- * // can use res (the string length) as event
- * // size, after checking its boundaries.
- * }
- *
- * In comparison, using **bpf_probe_read()** helper here instead
- * to read the string would require to estimate the length at
- * compile time, and would often result in copying more memory
- * than necessary.
+ * Copy a NUL terminated string from an unsafe kernel address
+ * *unsafe_ptr* to *dst*. See bpf_probe_read_kernel_str() for
+ * more details.
*
- * Another useful use case is when parsing individual process
- * arguments or individual environment variables navigating
- * *current*\ **->mm->arg_start** and *current*\
- * **->mm->env_start**: using this helper and the return value,
- * one can quickly iterate at the right offset of the memory area.
+ * Generally, use bpf_probe_read_user_str() or bpf_probe_read_kernel_str()
+ * instead.
* Return
* On success, the strictly positive length of the string,
* including the trailing NUL character. On error, a negative
* restricted to raw_tracepoint bpf programs.
* Return
* 0 on success, or a negative error in case of failure.
+ *
+ * int bpf_probe_read_user(void *dst, u32 size, const void *unsafe_ptr)
+ * Description
+ * Safely attempt to read *size* bytes from user space address
+ * *unsafe_ptr* and store the data in *dst*.
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
+ * int bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr)
+ * Description
+ * Safely attempt to read *size* bytes from kernel space address
+ * *unsafe_ptr* and store the data in *dst*.
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
+ * int bpf_probe_read_user_str(void *dst, u32 size, const void *unsafe_ptr)
+ * Description
+ * Copy a NUL terminated string from an unsafe user address
+ * *unsafe_ptr* to *dst*. The *size* should include the
+ * terminating NUL byte. In case the string length is smaller than
+ * *size*, the target is not padded with further NUL bytes. If the
+ * string length is larger than *size*, just *size*-1 bytes are
+ * copied and the last byte is set to NUL.
+ *
+ * On success, the length of the copied string is returned. This
+ * makes this helper useful in tracing programs for reading
+ * strings, and more importantly to get its length at runtime. See
+ * the following snippet:
+ *
+ * ::
+ *
+ * SEC("kprobe/sys_open")
+ * void bpf_sys_open(struct pt_regs *ctx)
+ * {
+ * char buf[PATHLEN]; // PATHLEN is defined to 256
+ * int res = bpf_probe_read_user_str(buf, sizeof(buf),
+ * ctx->di);
+ *
+ * // Consume buf, for example push it to
+ * // userspace via bpf_perf_event_output(); we
+ * // can use res (the string length) as event
+ * // size, after checking its boundaries.
+ * }
+ *
+ * In comparison, using **bpf_probe_read_user()** helper here
+ * instead to read the string would require to estimate the length
+ * at compile time, and would often result in copying more memory
+ * than necessary.
+ *
+ * Another useful use case is when parsing individual process
+ * arguments or individual environment variables navigating
+ * *current*\ **->mm->arg_start** and *current*\
+ * **->mm->env_start**: using this helper and the return value,
+ * one can quickly iterate at the right offset of the memory area.
+ * Return
+ * On success, the strictly positive length of the string,
+ * including the trailing NUL character. On error, a negative
+ * value.
+ *
+ * int bpf_probe_read_kernel_str(void *dst, u32 size, const void *unsafe_ptr)
+ * Description
+ * Copy a NUL terminated string from an unsafe kernel address *unsafe_ptr*
+ * to *dst*. Same semantics as with bpf_probe_read_user_str() apply.
+ * Return
+ * On success, the strictly positive length of the string, including
+ * the trailing NUL character. On error, a negative value.
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
FN(sk_storage_delete), \
FN(send_signal), \
FN(tcp_gen_syncookie), \
- FN(skb_output),
+ FN(skb_output), \
+ FN(probe_read_user), \
+ FN(probe_read_kernel), \
+ FN(probe_read_user_str), \
+ FN(probe_read_kernel_str),
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
* function eBPF program intends to call
*
* Protocol changelog:
*
+ * 7.1:
+ * - add the following messages:
+ * FUSE_SETATTR, FUSE_SYMLINK, FUSE_MKNOD, FUSE_MKDIR, FUSE_UNLINK,
+ * FUSE_RMDIR, FUSE_RENAME, FUSE_LINK, FUSE_OPEN, FUSE_READ, FUSE_WRITE,
+ * FUSE_RELEASE, FUSE_FSYNC, FUSE_FLUSH, FUSE_SETXATTR, FUSE_GETXATTR,
+ * FUSE_LISTXATTR, FUSE_REMOVEXATTR, FUSE_OPENDIR, FUSE_READDIR,
+ * FUSE_RELEASEDIR
+ * - add padding to messages to accommodate 32-bit servers on 64-bit kernels
+ *
+ * 7.2:
+ * - add FOPEN_DIRECT_IO and FOPEN_KEEP_CACHE flags
+ * - add FUSE_FSYNCDIR message
+ *
+ * 7.3:
+ * - add FUSE_ACCESS message
+ * - add FUSE_CREATE message
+ * - add filehandle to fuse_setattr_in
+ *
+ * 7.4:
+ * - add frsize to fuse_kstatfs
+ * - clean up request size limit checking
+ *
+ * 7.5:
+ * - add flags and max_write to fuse_init_out
+ *
+ * 7.6:
+ * - add max_readahead to fuse_init_in and fuse_init_out
+ *
+ * 7.7:
+ * - add FUSE_INTERRUPT message
+ * - add POSIX file lock support
+ *
+ * 7.8:
+ * - add lock_owner and flags fields to fuse_release_in
+ * - add FUSE_BMAP message
+ * - add FUSE_DESTROY message
+ *
* 7.9:
* - new fuse_getattr_in input argument of GETATTR
* - add lk_flags in fuse_lk_in
return WARN_ON_ONCE(bpf_adj_branches(prog, off, off + cnt, off, false));
}
-void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp)
+static void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp)
{
int i;
{
struct latch_tree_node *n;
- if (!bpf_jit_kallsyms_enabled())
- return NULL;
-
n = latch_tree_find((void *)addr, &bpf_tree, &bpf_tree_ops);
return n ?
container_of(n, struct bpf_prog_aux, ksym_tnode)->prog :
}
#ifndef CONFIG_BPF_JIT_ALWAYS_ON
-u64 __weak bpf_probe_read(void * dst, u32 size, const void * unsafe_ptr)
+u64 __weak bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr)
{
memset(dst, 0, size);
return -EFAULT;
}
+
/**
* __bpf_prog_run - run eBPF program on a given context
* @regs: is the array of MAX_BPF_EXT_REG eBPF pseudo-registers
LDST(W, u32)
LDST(DW, u64)
#undef LDST
-#define LDX_PROBE(SIZEOP, SIZE) \
- LDX_PROBE_MEM_##SIZEOP: \
- bpf_probe_read(&DST, SIZE, (const void *)(long) SRC); \
+#define LDX_PROBE(SIZEOP, SIZE) \
+ LDX_PROBE_MEM_##SIZEOP: \
+ bpf_probe_read_kernel(&DST, SIZE, (const void *)(long) SRC); \
CONT;
LDX_PROBE(B, 1)
LDX_PROBE(H, 2)
if (!dtab->n_buckets) /* Overflow check */
return -EINVAL;
- cost += sizeof(struct hlist_head) * dtab->n_buckets;
+ cost += (u64) sizeof(struct hlist_head) * dtab->n_buckets;
}
/* if map size is larger than memlock limit, reject it */
.map_check_btf = map_check_no_btf,
};
+static void dev_map_hash_remove_netdev(struct bpf_dtab *dtab,
+ struct net_device *netdev)
+{
+ unsigned long flags;
+ u32 i;
+
+ spin_lock_irqsave(&dtab->index_lock, flags);
+ for (i = 0; i < dtab->n_buckets; i++) {
+ struct bpf_dtab_netdev *dev;
+ struct hlist_head *head;
+ struct hlist_node *next;
+
+ head = dev_map_index_hash(dtab, i);
+
+ hlist_for_each_entry_safe(dev, next, head, index_hlist) {
+ if (netdev != dev->dev)
+ continue;
+
+ dtab->items--;
+ hlist_del_rcu(&dev->index_hlist);
+ call_rcu(&dev->rcu, __dev_map_entry_free);
+ }
+ }
+ spin_unlock_irqrestore(&dtab->index_lock, flags);
+}
+
static int dev_map_notification(struct notifier_block *notifier,
ulong event, void *ptr)
{
*/
rcu_read_lock();
list_for_each_entry_rcu(dtab, &dev_map_list, list) {
+ if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
+ dev_map_hash_remove_netdev(dtab, netdev);
+ continue;
+ }
+
for (i = 0; i < dtab->map.max_entries; i++) {
struct bpf_dtab_netdev *dev, *odev;
{
struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
+ kvfree(aux->func_info);
free_used_maps(aux);
bpf_prog_uncharge_memlock(aux->prog);
security_bpf_prog_free(aux);
bpf_prog_free(aux->prog);
}
+static void __bpf_prog_put_noref(struct bpf_prog *prog, bool deferred)
+{
+ bpf_prog_kallsyms_del_all(prog);
+ btf_put(prog->aux->btf);
+ bpf_prog_free_linfo(prog);
+
+ if (deferred)
+ call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
+ else
+ __bpf_prog_put_rcu(&prog->aux->rcu);
+}
+
static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock)
{
if (atomic_dec_and_test(&prog->aux->refcnt)) {
perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_UNLOAD, 0);
/* bpf_prog_free_id() must be called first */
bpf_prog_free_id(prog, do_idr_lock);
- bpf_prog_kallsyms_del_all(prog);
- btf_put(prog->aux->btf);
- kvfree(prog->aux->func_info);
- bpf_prog_free_linfo(prog);
-
- call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
+ __bpf_prog_put_noref(prog, true);
}
}
u32 btf_id)
{
switch (prog_type) {
- case BPF_PROG_TYPE_RAW_TRACEPOINT:
+ case BPF_PROG_TYPE_TRACING:
if (btf_id > BTF_MAX_TYPE)
return -EINVAL;
break;
return err;
free_used_maps:
- bpf_prog_free_linfo(prog);
- kvfree(prog->aux->func_info);
- btf_put(prog->aux->btf);
- bpf_prog_kallsyms_del_subprogs(prog);
- free_used_maps(prog->aux);
+ /* In case we have subprogs, we need to wait for a grace
+ * period before we can tear down JIT memory since symbols
+ * are already exposed under kallsyms.
+ */
+ __bpf_prog_put_noref(prog, prog->aux->func_cnt);
+ return err;
free_prog:
bpf_prog_uncharge_memlock(prog);
free_prog_sec:
return PTR_ERR(prog);
if (prog->type != BPF_PROG_TYPE_RAW_TRACEPOINT &&
+ prog->type != BPF_PROG_TYPE_TRACING &&
prog->type != BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE) {
err = -EINVAL;
goto out_put_prog;
}
- if (prog->type == BPF_PROG_TYPE_RAW_TRACEPOINT &&
- prog->aux->attach_btf_id) {
+ if (prog->type == BPF_PROG_TYPE_TRACING) {
if (attr->raw_tracepoint.name) {
/* raw_tp name should not be specified in raw_tp
* programs that were verified via in-kernel BTF info
case BPF_PROG_TYPE_CGROUP_SYSCTL:
case BPF_PROG_TYPE_CGROUP_SOCKOPT:
break;
+ case BPF_PROG_TYPE_RAW_TRACEPOINT:
+ if (!env->prog->aux->attach_btf_id)
+ return 0;
+ range = tnum_const(0);
+ break;
default:
return 0;
}
{
struct bpf_prog *prog = env->prog;
u32 btf_id = prog->aux->attach_btf_id;
+ const char prefix[] = "btf_trace_";
const struct btf_type *t;
const char *tname;
- if (prog->type == BPF_PROG_TYPE_RAW_TRACEPOINT && btf_id) {
- const char prefix[] = "btf_trace_";
+ if (prog->type != BPF_PROG_TYPE_TRACING)
+ return 0;
- t = btf_type_by_id(btf_vmlinux, btf_id);
- if (!t) {
- verbose(env, "attach_btf_id %u is invalid\n", btf_id);
- return -EINVAL;
- }
+ if (!btf_id) {
+ verbose(env, "Tracing programs must provide btf_id\n");
+ return -EINVAL;
+ }
+ t = btf_type_by_id(btf_vmlinux, btf_id);
+ if (!t) {
+ verbose(env, "attach_btf_id %u is invalid\n", btf_id);
+ return -EINVAL;
+ }
+ tname = btf_name_by_offset(btf_vmlinux, t->name_off);
+ if (!tname) {
+ verbose(env, "attach_btf_id %u doesn't have a name\n", btf_id);
+ return -EINVAL;
+ }
+
+ switch (prog->expected_attach_type) {
+ case BPF_TRACE_RAW_TP:
if (!btf_type_is_typedef(t)) {
verbose(env, "attach_btf_id %u is not a typedef\n",
btf_id);
return -EINVAL;
}
- tname = btf_name_by_offset(btf_vmlinux, t->name_off);
- if (!tname || strncmp(prefix, tname, sizeof(prefix) - 1)) {
+ if (strncmp(prefix, tname, sizeof(prefix) - 1)) {
verbose(env, "attach_btf_id %u points to wrong type name %s\n",
btf_id, tname);
return -EINVAL;
prog->aux->attach_func_name = tname;
prog->aux->attach_func_proto = t;
prog->aux->attach_btf_trace = true;
+ return 0;
+ default:
+ return -EINVAL;
}
- return 0;
}
int bpf_check(struct bpf_prog **prog, union bpf_attr *attr,
#include <linux/slab.h>
#include <linux/sched.h>
-struct xsk_map {
- struct bpf_map map;
- struct xdp_sock **xsk_map;
- struct list_head __percpu *flush_list;
- spinlock_t lock; /* Synchronize map updates */
-};
-
int xsk_map_inc(struct xsk_map *map)
{
struct bpf_map *m = &map->map;
static struct bpf_map *xsk_map_alloc(union bpf_attr *attr)
{
+ struct bpf_map_memory mem;
+ int cpu, err, numa_node;
struct xsk_map *m;
- int cpu, err;
- u64 cost;
+ u64 cost, size;
if (!capable(CAP_NET_ADMIN))
return ERR_PTR(-EPERM);
attr->map_flags & ~(BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY))
return ERR_PTR(-EINVAL);
- m = kzalloc(sizeof(*m), GFP_USER);
- if (!m)
+ numa_node = bpf_map_attr_numa_node(attr);
+ size = struct_size(m, xsk_map, attr->max_entries);
+ cost = size + array_size(sizeof(*m->flush_list), num_possible_cpus());
+
+ err = bpf_map_charge_init(&mem, cost);
+ if (err < 0)
+ return ERR_PTR(err);
+
+ m = bpf_map_area_alloc(size, numa_node);
+ if (!m) {
+ bpf_map_charge_finish(&mem);
return ERR_PTR(-ENOMEM);
+ }
bpf_map_init_from_attr(&m->map, attr);
+ bpf_map_charge_move(&m->map.memory, &mem);
spin_lock_init(&m->lock);
- cost = (u64)m->map.max_entries * sizeof(struct xdp_sock *);
- cost += sizeof(struct list_head) * num_possible_cpus();
-
- /* Notice returns -EPERM on if map size is larger than memlock limit */
- err = bpf_map_charge_init(&m->map.memory, cost);
- if (err)
- goto free_m;
-
- err = -ENOMEM;
-
m->flush_list = alloc_percpu(struct list_head);
- if (!m->flush_list)
- goto free_charge;
+ if (!m->flush_list) {
+ bpf_map_charge_finish(&m->map.memory);
+ bpf_map_area_free(m);
+ return ERR_PTR(-ENOMEM);
+ }
for_each_possible_cpu(cpu)
INIT_LIST_HEAD(per_cpu_ptr(m->flush_list, cpu));
- m->xsk_map = bpf_map_area_alloc(m->map.max_entries *
- sizeof(struct xdp_sock *),
- m->map.numa_node);
- if (!m->xsk_map)
- goto free_percpu;
return &m->map;
-
-free_percpu:
- free_percpu(m->flush_list);
-free_charge:
- bpf_map_charge_finish(&m->map.memory);
-free_m:
- kfree(m);
- return ERR_PTR(err);
}
static void xsk_map_free(struct bpf_map *map)
bpf_clear_redirect_map(map);
synchronize_net();
free_percpu(m->flush_list);
- bpf_map_area_free(m->xsk_map);
- kfree(m);
+ bpf_map_area_free(m);
}
static int xsk_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
return 0;
}
-struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map, u32 key)
-{
- struct xsk_map *m = container_of(map, struct xsk_map, map);
- struct xdp_sock *xs;
-
- if (key >= map->max_entries)
- return NULL;
-
- xs = READ_ONCE(m->xsk_map[key]);
- return xs;
-}
-
-int __xsk_map_redirect(struct bpf_map *map, struct xdp_buff *xdp,
- struct xdp_sock *xs)
-{
- struct xsk_map *m = container_of(map, struct xsk_map, map);
- struct list_head *flush_list = this_cpu_ptr(m->flush_list);
- int err;
-
- err = xsk_rcv(xs, xdp);
- if (err)
- return err;
-
- if (!xs->flush_node.prev)
- list_add(&xs->flush_node, flush_list);
-
- return 0;
-}
-
-void __xsk_map_flush(struct bpf_map *map)
+static u32 xsk_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
{
- struct xsk_map *m = container_of(map, struct xsk_map, map);
- struct list_head *flush_list = this_cpu_ptr(m->flush_list);
- struct xdp_sock *xs, *tmp;
-
- list_for_each_entry_safe(xs, tmp, flush_list, flush_node) {
- xsk_flush(xs);
- __list_del_clearprev(&xs->flush_node);
- }
+ const int ret = BPF_REG_0, mp = BPF_REG_1, index = BPF_REG_2;
+ struct bpf_insn *insn = insn_buf;
+
+ *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
+ *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5);
+ *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(sizeof(struct xsk_sock *)));
+ *insn++ = BPF_ALU64_IMM(BPF_ADD, mp, offsetof(struct xsk_map, xsk_map));
+ *insn++ = BPF_ALU64_REG(BPF_ADD, ret, mp);
+ *insn++ = BPF_LDX_MEM(BPF_SIZEOF(struct xsk_sock *), ret, ret, 0);
+ *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
+ *insn++ = BPF_MOV64_IMM(ret, 0);
+ return insn - insn_buf;
}
static void *xsk_map_lookup_elem(struct bpf_map *map, void *key)
.map_free = xsk_map_free,
.map_get_next_key = xsk_map_get_next_key,
.map_lookup_elem = xsk_map_lookup_elem,
+ .map_gen_lookup = xsk_map_gen_lookup,
.map_lookup_elem_sys_only = xsk_map_lookup_elem_sys_only,
.map_update_elem = xsk_map_update_elem,
.map_delete_elem = xsk_map_delete_elem,
cpumask_subset(cp->cpus_allowed, top_cpuset.effective_cpus))
continue;
- if (is_sched_load_balance(cp))
+ if (is_sched_load_balance(cp) &&
+ !cpumask_empty(cp->effective_cpus))
csa[csn++] = cp;
/* skip @cp's subtree if not a partition root */
perf_pmu_output_stop(event);
/* now it's safe to free the pages */
- atomic_long_sub(rb->aux_nr_pages, &mmap_user->locked_vm);
- atomic64_sub(rb->aux_mmap_locked, &vma->vm_mm->pinned_vm);
+ if (!rb->aux_mmap_locked)
+ atomic_long_sub(rb->aux_nr_pages, &mmap_user->locked_vm);
+ else
+ atomic64_sub(rb->aux_mmap_locked, &vma->vm_mm->pinned_vm);
/* this has to be the last one */
rb_free_aux(rb);
static int __perf_pmu_output_stop(void *info)
{
struct perf_event *event = info;
- struct pmu *pmu = event->pmu;
+ struct pmu *pmu = event->ctx->pmu;
struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
struct remote_output ro = {
.rb = event->rb,
attr->size = size;
- if (attr->__reserved_1)
+ if (attr->__reserved_1 || attr->__reserved_2)
return -EINVAL;
if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))
find $cpio_dir -type f -print0 |
xargs -0 -P8 -n1 perl -pi -e 'BEGIN {undef $/;}; s/\/\*((?!SPDX).)*?\*\///smg;'
-# Create archive and try to normalize metadata for reproducibility
-tar "${KBUILD_BUILD_TIMESTAMP:+--mtime=$KBUILD_BUILD_TIMESTAMP}" \
- --owner=0 --group=0 --sort=name --numeric-owner \
- -Jcf $tarfile -C $cpio_dir/ . > /dev/null
+# Create archive and try to normalize metadata for reproducibility.
+# For compatibility with older versions of tar, files are fed to tar
+# pre-sorted, as --sort=name might not be available.
+find $cpio_dir -printf "./%P\n" | LC_ALL=C sort | \
+ tar "${KBUILD_BUILD_TIMESTAMP:+--mtime=$KBUILD_BUILD_TIMESTAMP}" \
+ --owner=0 --group=0 --numeric-owner --no-recursion \
+ -Jcf $tarfile -C $cpio_dir/ -T - > /dev/null
echo "$src_files_md5" > kernel/kheaders.md5
echo "$obj_files_md5" >> kernel/kheaders.md5
}
late_initcall(pm_qos_power_init);
+
+/* Definitions related to the frequency QoS below. */
+
+/**
+ * freq_constraints_init - Initialize frequency QoS constraints.
+ * @qos: Frequency QoS constraints to initialize.
+ */
+void freq_constraints_init(struct freq_constraints *qos)
+{
+ struct pm_qos_constraints *c;
+
+ c = &qos->min_freq;
+ plist_head_init(&c->list);
+ c->target_value = FREQ_QOS_MIN_DEFAULT_VALUE;
+ c->default_value = FREQ_QOS_MIN_DEFAULT_VALUE;
+ c->no_constraint_value = FREQ_QOS_MIN_DEFAULT_VALUE;
+ c->type = PM_QOS_MAX;
+ c->notifiers = &qos->min_freq_notifiers;
+ BLOCKING_INIT_NOTIFIER_HEAD(c->notifiers);
+
+ c = &qos->max_freq;
+ plist_head_init(&c->list);
+ c->target_value = FREQ_QOS_MAX_DEFAULT_VALUE;
+ c->default_value = FREQ_QOS_MAX_DEFAULT_VALUE;
+ c->no_constraint_value = FREQ_QOS_MAX_DEFAULT_VALUE;
+ c->type = PM_QOS_MIN;
+ c->notifiers = &qos->max_freq_notifiers;
+ BLOCKING_INIT_NOTIFIER_HEAD(c->notifiers);
+}
+
+/**
+ * freq_qos_read_value - Get frequency QoS constraint for a given list.
+ * @qos: Constraints to evaluate.
+ * @type: QoS request type.
+ */
+s32 freq_qos_read_value(struct freq_constraints *qos,
+ enum freq_qos_req_type type)
+{
+ s32 ret;
+
+ switch (type) {
+ case FREQ_QOS_MIN:
+ ret = IS_ERR_OR_NULL(qos) ?
+ FREQ_QOS_MIN_DEFAULT_VALUE :
+ pm_qos_read_value(&qos->min_freq);
+ break;
+ case FREQ_QOS_MAX:
+ ret = IS_ERR_OR_NULL(qos) ?
+ FREQ_QOS_MAX_DEFAULT_VALUE :
+ pm_qos_read_value(&qos->max_freq);
+ break;
+ default:
+ WARN_ON(1);
+ ret = 0;
+ }
+
+ return ret;
+}
+
+/**
+ * freq_qos_apply - Add/modify/remove frequency QoS request.
+ * @req: Constraint request to apply.
+ * @action: Action to perform (add/update/remove).
+ * @value: Value to assign to the QoS request.
+ */
+static int freq_qos_apply(struct freq_qos_request *req,
+ enum pm_qos_req_action action, s32 value)
+{
+ int ret;
+
+ switch(req->type) {
+ case FREQ_QOS_MIN:
+ ret = pm_qos_update_target(&req->qos->min_freq, &req->pnode,
+ action, value);
+ break;
+ case FREQ_QOS_MAX:
+ ret = pm_qos_update_target(&req->qos->max_freq, &req->pnode,
+ action, value);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+/**
+ * freq_qos_add_request - Insert new frequency QoS request into a given list.
+ * @qos: Constraints to update.
+ * @req: Preallocated request object.
+ * @type: Request type.
+ * @value: Request value.
+ *
+ * Insert a new entry into the @qos list of requests, recompute the effective
+ * QoS constraint value for that list and initialize the @req object. The
+ * caller needs to save that object for later use in updates and removal.
+ *
+ * Return 1 if the effective constraint value has changed, 0 if the effective
+ * constraint value has not changed, or a negative error code on failures.
+ */
+int freq_qos_add_request(struct freq_constraints *qos,
+ struct freq_qos_request *req,
+ enum freq_qos_req_type type, s32 value)
+{
+ int ret;
+
+ if (IS_ERR_OR_NULL(qos) || !req)
+ return -EINVAL;
+
+ if (WARN(freq_qos_request_active(req),
+ "%s() called for active request\n", __func__))
+ return -EINVAL;
+
+ req->qos = qos;
+ req->type = type;
+ ret = freq_qos_apply(req, PM_QOS_ADD_REQ, value);
+ if (ret < 0) {
+ req->qos = NULL;
+ req->type = 0;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(freq_qos_add_request);
+
+/**
+ * freq_qos_update_request - Modify existing frequency QoS request.
+ * @req: Request to modify.
+ * @new_value: New request value.
+ *
+ * Update an existing frequency QoS request along with the effective constraint
+ * value for the list of requests it belongs to.
+ *
+ * Return 1 if the effective constraint value has changed, 0 if the effective
+ * constraint value has not changed, or a negative error code on failures.
+ */
+int freq_qos_update_request(struct freq_qos_request *req, s32 new_value)
+{
+ if (!req)
+ return -EINVAL;
+
+ if (WARN(!freq_qos_request_active(req),
+ "%s() called for unknown object\n", __func__))
+ return -EINVAL;
+
+ if (req->pnode.prio == new_value)
+ return 0;
+
+ return freq_qos_apply(req, PM_QOS_UPDATE_REQ, new_value);
+}
+EXPORT_SYMBOL_GPL(freq_qos_update_request);
+
+/**
+ * freq_qos_remove_request - Remove frequency QoS request from its list.
+ * @req: Request to remove.
+ *
+ * Remove the given frequency QoS request from the list of constraints it
+ * belongs to and recompute the effective constraint value for that list.
+ *
+ * Return 1 if the effective constraint value has changed, 0 if the effective
+ * constraint value has not changed, or a negative error code on failures.
+ */
+int freq_qos_remove_request(struct freq_qos_request *req)
+{
+ if (!req)
+ return -EINVAL;
+
+ if (WARN(!freq_qos_request_active(req),
+ "%s() called for unknown object\n", __func__))
+ return -EINVAL;
+
+ return freq_qos_apply(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
+}
+EXPORT_SYMBOL_GPL(freq_qos_remove_request);
+
+/**
+ * freq_qos_add_notifier - Add frequency QoS change notifier.
+ * @qos: List of requests to add the notifier to.
+ * @type: Request type.
+ * @notifier: Notifier block to add.
+ */
+int freq_qos_add_notifier(struct freq_constraints *qos,
+ enum freq_qos_req_type type,
+ struct notifier_block *notifier)
+{
+ int ret;
+
+ if (IS_ERR_OR_NULL(qos) || !notifier)
+ return -EINVAL;
+
+ switch (type) {
+ case FREQ_QOS_MIN:
+ ret = blocking_notifier_chain_register(qos->min_freq.notifiers,
+ notifier);
+ break;
+ case FREQ_QOS_MAX:
+ ret = blocking_notifier_chain_register(qos->max_freq.notifiers,
+ notifier);
+ break;
+ default:
+ WARN_ON(1);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(freq_qos_add_notifier);
+
+/**
+ * freq_qos_remove_notifier - Remove frequency QoS change notifier.
+ * @qos: List of requests to remove the notifier from.
+ * @type: Request type.
+ * @notifier: Notifier block to remove.
+ */
+int freq_qos_remove_notifier(struct freq_constraints *qos,
+ enum freq_qos_req_type type,
+ struct notifier_block *notifier)
+{
+ int ret;
+
+ if (IS_ERR_OR_NULL(qos) || !notifier)
+ return -EINVAL;
+
+ switch (type) {
+ case FREQ_QOS_MIN:
+ ret = blocking_notifier_chain_unregister(qos->min_freq.notifiers,
+ notifier);
+ break;
+ case FREQ_QOS_MAX:
+ ret = blocking_notifier_chain_unregister(qos->max_freq.notifiers,
+ notifier);
+ break;
+ default:
+ WARN_ON(1);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(freq_qos_remove_notifier);
static int
build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *attr)
{
- enum s_alloc alloc_state;
+ enum s_alloc alloc_state = sa_none;
struct sched_domain *sd;
struct s_data d;
struct rq *rq = NULL;
struct sched_domain_topology_level *tl_asym;
bool has_asym = false;
+ if (WARN_ON(cpumask_empty(cpu_map)))
+ goto error;
+
alloc_state = __visit_domain_allocation_hell(&d, cpu_map);
if (alloc_state != sa_rootdomain)
goto error;
rcu_read_unlock();
if (has_asym)
- static_branch_enable_cpuslocked(&sched_asym_cpucapacity);
+ static_branch_inc_cpuslocked(&sched_asym_cpucapacity);
if (rq && sched_debug_enabled) {
pr_info("root domain span: %*pbl (max cpu_capacity = %lu)\n",
*/
static void detach_destroy_domains(const struct cpumask *cpu_map)
{
+ unsigned int cpu = cpumask_any(cpu_map);
int i;
+ if (rcu_access_pointer(per_cpu(sd_asym_cpucapacity, cpu)))
+ static_branch_dec_cpuslocked(&sched_asym_cpucapacity);
+
rcu_read_lock();
for_each_cpu(i, cpu_map)
cpu_attach_domain(NULL, &def_root_domain, i);
* Copyright (C) 2010 SUSE Linux Products GmbH
* Copyright (C) 2010 Tejun Heo <tj@kernel.org>
*/
+#include <linux/compiler.h>
#include <linux/completion.h>
#include <linux/cpu.h>
#include <linux/init.h>
/* Reset ack counter. */
atomic_set(&msdata->thread_ack, msdata->num_threads);
smp_wmb();
- msdata->state = newstate;
+ WRITE_ONCE(msdata->state, newstate);
}
/* Last one to ack a state moves to the next state. */
static int multi_cpu_stop(void *data)
{
struct multi_stop_data *msdata = data;
- enum multi_stop_state curstate = MULTI_STOP_NONE;
+ enum multi_stop_state newstate, curstate = MULTI_STOP_NONE;
int cpu = smp_processor_id(), err = 0;
const struct cpumask *cpumask;
unsigned long flags;
do {
/* Chill out and ensure we re-read multi_stop_state. */
stop_machine_yield(cpumask);
- if (msdata->state != curstate) {
- curstate = msdata->state;
+ newstate = READ_ONCE(msdata->state);
+ if (newstate != curstate) {
+ curstate = newstate;
switch (curstate) {
case MULTI_STOP_DISABLE_IRQ:
local_irq_disable();
struct hrtimer_clock_base *base;
for (;;) {
- base = timer->base;
+ base = READ_ONCE(timer->base);
if (likely(base != &migration_base)) {
raw_spin_lock_irqsave(&base->cpu_base->lock, *flags);
if (likely(base == timer->base))
return base;
/* See the comment in lock_hrtimer_base() */
- timer->base = &migration_base;
+ WRITE_ONCE(timer->base, &migration_base);
raw_spin_unlock(&base->cpu_base->lock);
raw_spin_lock(&new_base->cpu_base->lock);
raw_spin_unlock(&new_base->cpu_base->lock);
raw_spin_lock(&base->cpu_base->lock);
new_cpu_base = this_cpu_base;
- timer->base = base;
+ WRITE_ONCE(timer->base, base);
goto again;
}
- timer->base = new_base;
+ WRITE_ONCE(timer->base, new_base);
} else {
if (new_cpu_base != this_cpu_base &&
hrtimer_check_target(timer, new_base)) {
/**
* thread_group_sample_cputime - Sample cputime for a given task
* @tsk: Task for which cputime needs to be started
- * @iimes: Storage for time samples
+ * @samples: Storage for time samples
*
* Called from sys_getitimer() to calculate the expiry time of an active
* timer. That means group cputime accounting is already active. Called
* member of @pct->bases[CLK].nextevt. False otherwise
*/
static inline bool
-task_cputimers_expired(const u64 *sample, struct posix_cputimers *pct)
+task_cputimers_expired(const u64 *samples, struct posix_cputimers *pct)
{
int i;
for (i = 0; i < CPUCLOCK_MAX; i++) {
- if (sample[i] >= pct->bases[i].nextevt)
+ if (samples[i] >= pct->bases[i].nextevt)
return true;
}
return false;
#include <linux/seqlock.h>
#include <linux/bitops.h>
+#include "timekeeping.h"
+
/**
* struct clock_read_data - data required to read from sched_clock()
*
};
#endif
-BPF_CALL_3(bpf_probe_read, void *, dst, u32, size, const void *, unsafe_ptr)
+BPF_CALL_3(bpf_probe_read_user, void *, dst, u32, size,
+ const void __user *, unsafe_ptr)
{
- int ret;
+ int ret = probe_user_read(dst, unsafe_ptr, size);
- ret = security_locked_down(LOCKDOWN_BPF_READ);
- if (ret < 0)
- goto out;
+ if (unlikely(ret < 0))
+ memset(dst, 0, size);
+
+ return ret;
+}
+
+static const struct bpf_func_proto bpf_probe_read_user_proto = {
+ .func = bpf_probe_read_user,
+ .gpl_only = true,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_UNINIT_MEM,
+ .arg2_type = ARG_CONST_SIZE_OR_ZERO,
+ .arg3_type = ARG_ANYTHING,
+};
+
+BPF_CALL_3(bpf_probe_read_user_str, void *, dst, u32, size,
+ const void __user *, unsafe_ptr)
+{
+ int ret = strncpy_from_unsafe_user(dst, unsafe_ptr, size);
- ret = probe_kernel_read(dst, unsafe_ptr, size);
+ if (unlikely(ret < 0))
+ memset(dst, 0, size);
+
+ return ret;
+}
+
+static const struct bpf_func_proto bpf_probe_read_user_str_proto = {
+ .func = bpf_probe_read_user_str,
+ .gpl_only = true,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_UNINIT_MEM,
+ .arg2_type = ARG_CONST_SIZE_OR_ZERO,
+ .arg3_type = ARG_ANYTHING,
+};
+
+static __always_inline int
+bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr,
+ const bool compat)
+{
+ int ret = security_locked_down(LOCKDOWN_BPF_READ);
+
+ if (unlikely(ret < 0))
+ goto out;
+ ret = compat ? probe_kernel_read(dst, unsafe_ptr, size) :
+ probe_kernel_read_strict(dst, unsafe_ptr, size);
if (unlikely(ret < 0))
out:
memset(dst, 0, size);
+ return ret;
+}
+BPF_CALL_3(bpf_probe_read_kernel, void *, dst, u32, size,
+ const void *, unsafe_ptr)
+{
+ return bpf_probe_read_kernel_common(dst, size, unsafe_ptr, false);
+}
+
+static const struct bpf_func_proto bpf_probe_read_kernel_proto = {
+ .func = bpf_probe_read_kernel,
+ .gpl_only = true,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_UNINIT_MEM,
+ .arg2_type = ARG_CONST_SIZE_OR_ZERO,
+ .arg3_type = ARG_ANYTHING,
+};
+
+BPF_CALL_3(bpf_probe_read_compat, void *, dst, u32, size,
+ const void *, unsafe_ptr)
+{
+ return bpf_probe_read_kernel_common(dst, size, unsafe_ptr, true);
+}
+
+static const struct bpf_func_proto bpf_probe_read_compat_proto = {
+ .func = bpf_probe_read_compat,
+ .gpl_only = true,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_UNINIT_MEM,
+ .arg2_type = ARG_CONST_SIZE_OR_ZERO,
+ .arg3_type = ARG_ANYTHING,
+};
+
+static __always_inline int
+bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr,
+ const bool compat)
+{
+ int ret = security_locked_down(LOCKDOWN_BPF_READ);
+
+ if (unlikely(ret < 0))
+ goto out;
+ /*
+ * The strncpy_from_unsafe_*() call will likely not fill the entire
+ * buffer, but that's okay in this circumstance as we're probing
+ * arbitrary memory anyway similar to bpf_probe_read_*() and might
+ * as well probe the stack. Thus, memory is explicitly cleared
+ * only in error case, so that improper users ignoring return
+ * code altogether don't copy garbage; otherwise length of string
+ * is returned that can be used for bpf_perf_event_output() et al.
+ */
+ ret = compat ? strncpy_from_unsafe(dst, unsafe_ptr, size) :
+ strncpy_from_unsafe_strict(dst, unsafe_ptr, size);
+ if (unlikely(ret < 0))
+out:
+ memset(dst, 0, size);
return ret;
}
-static const struct bpf_func_proto bpf_probe_read_proto = {
- .func = bpf_probe_read,
+BPF_CALL_3(bpf_probe_read_kernel_str, void *, dst, u32, size,
+ const void *, unsafe_ptr)
+{
+ return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr, false);
+}
+
+static const struct bpf_func_proto bpf_probe_read_kernel_str_proto = {
+ .func = bpf_probe_read_kernel_str,
.gpl_only = true,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_UNINIT_MEM,
.arg3_type = ARG_ANYTHING,
};
-BPF_CALL_3(bpf_probe_write_user, void *, unsafe_ptr, const void *, src,
+BPF_CALL_3(bpf_probe_read_compat_str, void *, dst, u32, size,
+ const void *, unsafe_ptr)
+{
+ return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr, true);
+}
+
+static const struct bpf_func_proto bpf_probe_read_compat_str_proto = {
+ .func = bpf_probe_read_compat_str,
+ .gpl_only = true,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_UNINIT_MEM,
+ .arg2_type = ARG_CONST_SIZE_OR_ZERO,
+ .arg3_type = ARG_ANYTHING,
+};
+
+BPF_CALL_3(bpf_probe_write_user, void __user *, unsafe_ptr, const void *, src,
u32, size)
{
/*
return -EPERM;
if (unlikely(!nmi_uaccess_okay()))
return -EPERM;
- if (!access_ok(unsafe_ptr, size))
- return -EPERM;
- return probe_kernel_write(unsafe_ptr, src, size);
+ return probe_user_write(unsafe_ptr, src, size);
}
static const struct bpf_func_proto bpf_probe_write_user_proto = {
.arg2_type = ARG_ANYTHING,
};
-BPF_CALL_3(bpf_probe_read_str, void *, dst, u32, size,
- const void *, unsafe_ptr)
-{
- int ret;
-
- ret = security_locked_down(LOCKDOWN_BPF_READ);
- if (ret < 0)
- goto out;
-
- /*
- * The strncpy_from_unsafe() call will likely not fill the entire
- * buffer, but that's okay in this circumstance as we're probing
- * arbitrary memory anyway similar to bpf_probe_read() and might
- * as well probe the stack. Thus, memory is explicitly cleared
- * only in error case, so that improper users ignoring return
- * code altogether don't copy garbage; otherwise length of string
- * is returned that can be used for bpf_perf_event_output() et al.
- */
- ret = strncpy_from_unsafe(dst, unsafe_ptr, size);
- if (unlikely(ret < 0))
-out:
- memset(dst, 0, size);
-
- return ret;
-}
-
-static const struct bpf_func_proto bpf_probe_read_str_proto = {
- .func = bpf_probe_read_str,
- .gpl_only = true,
- .ret_type = RET_INTEGER,
- .arg1_type = ARG_PTR_TO_UNINIT_MEM,
- .arg2_type = ARG_CONST_SIZE_OR_ZERO,
- .arg3_type = ARG_ANYTHING,
-};
-
struct send_signal_irq_work {
struct irq_work irq_work;
struct task_struct *task;
return &bpf_map_pop_elem_proto;
case BPF_FUNC_map_peek_elem:
return &bpf_map_peek_elem_proto;
- case BPF_FUNC_probe_read:
- return &bpf_probe_read_proto;
case BPF_FUNC_ktime_get_ns:
return &bpf_ktime_get_ns_proto;
case BPF_FUNC_tail_call:
return &bpf_current_task_under_cgroup_proto;
case BPF_FUNC_get_prandom_u32:
return &bpf_get_prandom_u32_proto;
+ case BPF_FUNC_probe_read_user:
+ return &bpf_probe_read_user_proto;
+ case BPF_FUNC_probe_read_kernel:
+ return &bpf_probe_read_kernel_proto;
+ case BPF_FUNC_probe_read:
+ return &bpf_probe_read_compat_proto;
+ case BPF_FUNC_probe_read_user_str:
+ return &bpf_probe_read_user_str_proto;
+ case BPF_FUNC_probe_read_kernel_str:
+ return &bpf_probe_read_kernel_str_proto;
case BPF_FUNC_probe_read_str:
- return &bpf_probe_read_str_proto;
+ return &bpf_probe_read_compat_str_proto;
#ifdef CONFIG_CGROUPS
case BPF_FUNC_get_current_cgroup_id:
return &bpf_get_current_cgroup_id_proto;
switch (func_id) {
case BPF_FUNC_perf_event_output:
return &bpf_perf_event_output_proto_raw_tp;
-#ifdef CONFIG_NET
- case BPF_FUNC_skb_output:
- return &bpf_skb_output_proto;
-#endif
case BPF_FUNC_get_stackid:
return &bpf_get_stackid_proto_raw_tp;
case BPF_FUNC_get_stack:
}
}
+static const struct bpf_func_proto *
+tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
+{
+ switch (func_id) {
+#ifdef CONFIG_NET
+ case BPF_FUNC_skb_output:
+ return &bpf_skb_output_proto;
+#endif
+ default:
+ return raw_tp_prog_func_proto(func_id, prog);
+ }
+}
+
static bool raw_tp_prog_is_valid_access(int off, int size,
enum bpf_access_type type,
const struct bpf_prog *prog,
struct bpf_insn_access_aux *info)
{
- /* largest tracepoint in the kernel has 12 args */
- if (off < 0 || off >= sizeof(__u64) * 12)
+ if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
+ return false;
+ if (type != BPF_READ)
+ return false;
+ if (off % size != 0)
+ return false;
+ return true;
+}
+
+static bool tracing_prog_is_valid_access(int off, int size,
+ enum bpf_access_type type,
+ const struct bpf_prog *prog,
+ struct bpf_insn_access_aux *info)
+{
+ if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
return false;
if (type != BPF_READ)
return false;
if (off % size != 0)
return false;
- if (!prog->aux->attach_btf_id)
- return true;
return btf_ctx_access(off, size, type, prog, info);
}
const struct bpf_prog_ops raw_tracepoint_prog_ops = {
};
+const struct bpf_verifier_ops tracing_verifier_ops = {
+ .get_func_proto = tracing_prog_func_proto,
+ .is_valid_access = tracing_prog_is_valid_access,
+};
+
+const struct bpf_prog_ops tracing_prog_ops = {
+};
+
static bool raw_tp_writable_prog_is_valid_access(int off, int size,
enum bpf_access_type type,
const struct bpf_prog *prog,
goto out;
}
+ mutex_lock(&event_mutex);
ret = perf_trace_event_init(tp_event, p_event);
if (ret)
destroy_local_trace_kprobe(tp_event);
+ mutex_unlock(&event_mutex);
out:
kfree(func);
return ret;
void perf_kprobe_destroy(struct perf_event *p_event)
{
+ mutex_lock(&event_mutex);
perf_trace_event_close(p_event);
perf_trace_event_unreg(p_event);
+ mutex_unlock(&event_mutex);
destroy_local_trace_kprobe(p_event->tp_event);
}
{
if (str_has_prefix(type, "u"))
return false;
+ if (strcmp(type, "gfp_t") == 0)
+ return false;
return true;
}
return NULL;
}
-static __init int test_skb_segment(void)
+static __init struct sk_buff *build_test_skb_linear_no_head_frag(void)
{
+ unsigned int alloc_size = 2000;
+ unsigned int headroom = 102, doffset = 72, data_size = 1308;
+ struct sk_buff *skb[2];
+ int i;
+
+ /* skbs linked in a frag_list, both with linear data, with head_frag=0
+ * (data allocated by kmalloc), both have tcp data of 1308 bytes
+ * (total payload is 2616 bytes).
+ * Data offset is 72 bytes (40 ipv6 hdr, 32 tcp hdr). Some headroom.
+ */
+ for (i = 0; i < 2; i++) {
+ skb[i] = alloc_skb(alloc_size, GFP_KERNEL);
+ if (!skb[i]) {
+ if (i == 0)
+ goto err_skb0;
+ else
+ goto err_skb1;
+ }
+
+ skb[i]->protocol = htons(ETH_P_IPV6);
+ skb_reserve(skb[i], headroom);
+ skb_put(skb[i], doffset + data_size);
+ skb_reset_network_header(skb[i]);
+ if (i == 0)
+ skb_reset_mac_header(skb[i]);
+ else
+ skb_set_mac_header(skb[i], -ETH_HLEN);
+ __skb_pull(skb[i], doffset);
+ }
+
+ /* setup shinfo.
+ * mimic bpf_skb_proto_4_to_6, which resets gso_segs and assigns a
+ * reduced gso_size.
+ */
+ skb_shinfo(skb[0])->gso_size = 1288;
+ skb_shinfo(skb[0])->gso_type = SKB_GSO_TCPV6 | SKB_GSO_DODGY;
+ skb_shinfo(skb[0])->gso_segs = 0;
+ skb_shinfo(skb[0])->frag_list = skb[1];
+
+ /* adjust skb[0]'s len */
+ skb[0]->len += skb[1]->len;
+ skb[0]->data_len += skb[1]->len;
+ skb[0]->truesize += skb[1]->truesize;
+
+ return skb[0];
+
+err_skb1:
+ kfree_skb(skb[0]);
+err_skb0:
+ return NULL;
+}
+
+struct skb_segment_test {
+ const char *descr;
+ struct sk_buff *(*build_skb)(void);
netdev_features_t features;
+};
+
+static struct skb_segment_test skb_segment_tests[] __initconst = {
+ {
+ .descr = "gso_with_rx_frags",
+ .build_skb = build_test_skb,
+ .features = NETIF_F_SG | NETIF_F_GSO_PARTIAL | NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM
+ },
+ {
+ .descr = "gso_linear_no_head_frag",
+ .build_skb = build_test_skb_linear_no_head_frag,
+ .features = NETIF_F_SG | NETIF_F_FRAGLIST |
+ NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_GSO |
+ NETIF_F_LLTX_BIT | NETIF_F_GRO |
+ NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
+ NETIF_F_HW_VLAN_STAG_TX_BIT
+ }
+};
+
+static __init int test_skb_segment_single(const struct skb_segment_test *test)
+{
struct sk_buff *skb, *segs;
int ret = -1;
- features = NETIF_F_SG | NETIF_F_GSO_PARTIAL | NETIF_F_IP_CSUM |
- NETIF_F_IPV6_CSUM;
- features |= NETIF_F_RXCSUM;
- skb = build_test_skb();
+ skb = test->build_skb();
if (!skb) {
pr_info("%s: failed to build_test_skb", __func__);
goto done;
}
- segs = skb_segment(skb, features);
+ segs = skb_segment(skb, test->features);
if (!IS_ERR(segs)) {
kfree_skb_list(segs);
ret = 0;
- pr_info("%s: success in skb_segment!", __func__);
- } else {
- pr_info("%s: failed in skb_segment!", __func__);
}
kfree_skb(skb);
done:
return ret;
}
+static __init int test_skb_segment(void)
+{
+ int i, err_cnt = 0, pass_cnt = 0;
+
+ for (i = 0; i < ARRAY_SIZE(skb_segment_tests); i++) {
+ const struct skb_segment_test *test = &skb_segment_tests[i];
+
+ pr_info("#%d %s ", i, test->descr);
+
+ if (test_skb_segment_single(test)) {
+ pr_cont("FAIL\n");
+ err_cnt++;
+ } else {
+ pr_cont("PASS\n");
+ pass_cnt++;
+ }
+ }
+
+ pr_info("%s: Summary: %d PASSED, %d FAILED\n", __func__,
+ pass_cnt, err_cnt);
+ return err_cnt ? -EINVAL : 0;
+}
+
static __init int test_bpf(void)
{
int i, err_cnt = 0, pass_cnt = 0;
return -1;
}
- res->tv_sec = 0;
- res->tv_nsec = ns;
-
+ if (likely(res)) {
+ res->tv_sec = 0;
+ res->tv_nsec = ns;
+ }
return 0;
}
ret = clock_getres_fallback(clock, &ts);
#endif
- if (likely(!ret)) {
+ if (likely(!ret && res)) {
res->tv_sec = ts.tv_sec;
res->tv_nsec = ts.tv_nsec;
}
return ret ? -EFAULT : 0;
}
+static __always_inline long
+probe_write_common(void __user *dst, const void *src, size_t size)
+{
+ long ret;
+
+ pagefault_disable();
+ ret = __copy_to_user_inatomic(dst, src, size);
+ pagefault_enable();
+
+ return ret ? -EFAULT : 0;
+}
+
/**
* probe_kernel_read(): safely attempt to read from a kernel-space location
* @dst: pointer to the buffer that shall take the data
* do_page_fault() doesn't attempt to take mmap_sem. This makes
* probe_kernel_read() suitable for use within regions where the caller
* already holds mmap_sem, or other locks which nest inside mmap_sem.
+ *
+ * probe_kernel_read_strict() is the same as probe_kernel_read() except for
+ * the case where architectures have non-overlapping user and kernel address
+ * ranges: probe_kernel_read_strict() will additionally return -EFAULT for
+ * probing memory on a user address range where probe_user_read() is supposed
+ * to be used instead.
*/
long __weak probe_kernel_read(void *dst, const void *src, size_t size)
__attribute__((alias("__probe_kernel_read")));
+long __weak probe_kernel_read_strict(void *dst, const void *src, size_t size)
+ __attribute__((alias("__probe_kernel_read")));
+
long __probe_kernel_read(void *dst, const void *src, size_t size)
{
long ret;
* Safely write to address @dst from the buffer at @src. If a kernel fault
* happens, handle that and return -EFAULT.
*/
+
long __weak probe_kernel_write(void *dst, const void *src, size_t size)
__attribute__((alias("__probe_kernel_write")));
mm_segment_t old_fs = get_fs();
set_fs(KERNEL_DS);
- pagefault_disable();
- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
- pagefault_enable();
+ ret = probe_write_common((__force void __user *)dst, src, size);
set_fs(old_fs);
- return ret ? -EFAULT : 0;
+ return ret;
}
EXPORT_SYMBOL_GPL(probe_kernel_write);
+/**
+ * probe_user_write(): safely attempt to write to a user-space location
+ * @dst: address to write to
+ * @src: pointer to the data that shall be written
+ * @size: size of the data chunk
+ *
+ * Safely write to address @dst from the buffer at @src. If a kernel fault
+ * happens, handle that and return -EFAULT.
+ */
+
+long __weak probe_user_write(void __user *dst, const void *src, size_t size)
+ __attribute__((alias("__probe_user_write")));
+
+long __probe_user_write(void __user *dst, const void *src, size_t size)
+{
+ long ret = -EFAULT;
+ mm_segment_t old_fs = get_fs();
+
+ set_fs(USER_DS);
+ if (access_ok(dst, size))
+ ret = probe_write_common(dst, src, size);
+ set_fs(old_fs);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(probe_user_write);
/**
* strncpy_from_unsafe: - Copy a NUL terminated string from unsafe address.
*
* If @count is smaller than the length of the string, copies @count-1 bytes,
* sets the last byte of @dst buffer to NUL and returns @count.
+ *
+ * strncpy_from_unsafe_strict() is the same as strncpy_from_unsafe() except
+ * for the case where architectures have non-overlapping user and kernel address
+ * ranges: strncpy_from_unsafe_strict() will additionally return -EFAULT for
+ * probing memory on a user address range where strncpy_from_unsafe_user() is
+ * supposed to be used instead.
*/
-long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count)
+
+long __weak strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count)
+ __attribute__((alias("__strncpy_from_unsafe")));
+
+long __weak strncpy_from_unsafe_strict(char *dst, const void *unsafe_addr,
+ long count)
+ __attribute__((alias("__strncpy_from_unsafe")));
+
+long __strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count)
{
mm_segment_t old_fs = get_fs();
const void *src = unsafe_addr;
if (err < 0)
goto out_uninit_mvrp;
- vlan->nest_level = dev_get_nest_level(real_dev) + 1;
err = register_netdevice(dev);
if (err < 0)
goto out_uninit_mvrp;
dev_uc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev);
}
-/*
- * vlan network devices have devices nesting below it, and are a special
- * "super class" of normal network devices; split their locks off into a
- * separate class since they always nest.
- */
-static struct lock_class_key vlan_netdev_xmit_lock_key;
-static struct lock_class_key vlan_netdev_addr_lock_key;
-
-static void vlan_dev_set_lockdep_one(struct net_device *dev,
- struct netdev_queue *txq,
- void *_subclass)
-{
- lockdep_set_class_and_subclass(&txq->_xmit_lock,
- &vlan_netdev_xmit_lock_key,
- *(int *)_subclass);
-}
-
-static void vlan_dev_set_lockdep_class(struct net_device *dev, int subclass)
-{
- lockdep_set_class_and_subclass(&dev->addr_list_lock,
- &vlan_netdev_addr_lock_key,
- subclass);
- netdev_for_each_tx_queue(dev, vlan_dev_set_lockdep_one, &subclass);
-}
-
-static int vlan_dev_get_lock_subclass(struct net_device *dev)
-{
- return vlan_dev_priv(dev)->nest_level;
-}
-
static const struct header_ops vlan_header_ops = {
.create = vlan_dev_hard_header,
.parse = eth_header_parse,
SET_NETDEV_DEVTYPE(dev, &vlan_type);
- vlan_dev_set_lockdep_class(dev, vlan_dev_get_lock_subclass(dev));
-
vlan->vlan_pcpu_stats = netdev_alloc_pcpu_stats(struct vlan_pcpu_stats);
if (!vlan->vlan_pcpu_stats)
return -ENOMEM;
.ndo_netpoll_cleanup = vlan_dev_netpoll_cleanup,
#endif
.ndo_fix_features = vlan_dev_fix_features,
- .ndo_get_lock_subclass = vlan_dev_get_lock_subclass,
.ndo_get_iflink = vlan_dev_get_iflink,
};
mask |= EPOLLHUP;
/* readable? */
- if (!skb_queue_empty(&sk->sk_receive_queue))
+ if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
mask |= EPOLLIN | EPOLLRDNORM;
/* writable? */
#include <linux/kernel.h>
#include <linux/kref.h>
#include <linux/list.h>
+#include <linux/lockdep.h>
+#include <linux/mutex.h>
#include <linux/netdevice.h>
#include <linux/netlink.h>
#include <linux/pkt_sched.h>
unsigned char *ogm_buff;
u32 random_seqno;
+ mutex_lock(&hard_iface->bat_iv.ogm_buff_mutex);
+
/* randomize initial seqno to avoid collision */
get_random_bytes(&random_seqno, sizeof(random_seqno));
atomic_set(&hard_iface->bat_iv.ogm_seqno, random_seqno);
hard_iface->bat_iv.ogm_buff_len = BATADV_OGM_HLEN;
ogm_buff = kmalloc(hard_iface->bat_iv.ogm_buff_len, GFP_ATOMIC);
- if (!ogm_buff)
+ if (!ogm_buff) {
+ mutex_unlock(&hard_iface->bat_iv.ogm_buff_mutex);
return -ENOMEM;
+ }
hard_iface->bat_iv.ogm_buff = ogm_buff;
batadv_ogm_packet->reserved = 0;
batadv_ogm_packet->tq = BATADV_TQ_MAX_VALUE;
+ mutex_unlock(&hard_iface->bat_iv.ogm_buff_mutex);
+
return 0;
}
static void batadv_iv_ogm_iface_disable(struct batadv_hard_iface *hard_iface)
{
+ mutex_lock(&hard_iface->bat_iv.ogm_buff_mutex);
+
kfree(hard_iface->bat_iv.ogm_buff);
hard_iface->bat_iv.ogm_buff = NULL;
+
+ mutex_unlock(&hard_iface->bat_iv.ogm_buff_mutex);
}
static void batadv_iv_ogm_iface_update_mac(struct batadv_hard_iface *hard_iface)
{
struct batadv_ogm_packet *batadv_ogm_packet;
- unsigned char *ogm_buff = hard_iface->bat_iv.ogm_buff;
+ void *ogm_buff;
- batadv_ogm_packet = (struct batadv_ogm_packet *)ogm_buff;
+ mutex_lock(&hard_iface->bat_iv.ogm_buff_mutex);
+
+ ogm_buff = hard_iface->bat_iv.ogm_buff;
+ if (!ogm_buff)
+ goto unlock;
+
+ batadv_ogm_packet = ogm_buff;
ether_addr_copy(batadv_ogm_packet->orig,
hard_iface->net_dev->dev_addr);
ether_addr_copy(batadv_ogm_packet->prev_sender,
hard_iface->net_dev->dev_addr);
+
+unlock:
+ mutex_unlock(&hard_iface->bat_iv.ogm_buff_mutex);
}
static void
batadv_iv_ogm_primary_iface_set(struct batadv_hard_iface *hard_iface)
{
struct batadv_ogm_packet *batadv_ogm_packet;
- unsigned char *ogm_buff = hard_iface->bat_iv.ogm_buff;
+ void *ogm_buff;
- batadv_ogm_packet = (struct batadv_ogm_packet *)ogm_buff;
+ mutex_lock(&hard_iface->bat_iv.ogm_buff_mutex);
+
+ ogm_buff = hard_iface->bat_iv.ogm_buff;
+ if (!ogm_buff)
+ goto unlock;
+
+ batadv_ogm_packet = ogm_buff;
batadv_ogm_packet->ttl = BATADV_TTL;
+
+unlock:
+ mutex_unlock(&hard_iface->bat_iv.ogm_buff_mutex);
}
/* when do we schedule our own ogm to be sent */
}
}
-static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
+/**
+ * batadv_iv_ogm_schedule_buff() - schedule submission of hardif ogm buffer
+ * @hard_iface: interface whose ogm buffer should be transmitted
+ */
+static void batadv_iv_ogm_schedule_buff(struct batadv_hard_iface *hard_iface)
{
struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
unsigned char **ogm_buff = &hard_iface->bat_iv.ogm_buff;
u16 tvlv_len = 0;
unsigned long send_time;
- if (hard_iface->if_status == BATADV_IF_NOT_IN_USE ||
- hard_iface->if_status == BATADV_IF_TO_BE_REMOVED)
- return;
+ lockdep_assert_held(&hard_iface->bat_iv.ogm_buff_mutex);
/* the interface gets activated here to avoid race conditions between
* the moment of activating the interface in
batadv_hardif_put(primary_if);
}
+static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
+{
+ if (hard_iface->if_status == BATADV_IF_NOT_IN_USE ||
+ hard_iface->if_status == BATADV_IF_TO_BE_REMOVED)
+ return;
+
+ mutex_lock(&hard_iface->bat_iv.ogm_buff_mutex);
+ batadv_iv_ogm_schedule_buff(hard_iface);
+ mutex_unlock(&hard_iface->bat_iv.ogm_buff_mutex);
+}
+
/**
* batadv_iv_orig_ifinfo_sum() - Get bcast_own sum for originator over iterface
* @orig_node: originator which reproadcasted the OGMs directly
#include <linux/kref.h>
#include <linux/list.h>
#include <linux/lockdep.h>
+#include <linux/mutex.h>
#include <linux/netdevice.h>
#include <linux/random.h>
#include <linux/rculist.h>
}
/**
- * batadv_v_ogm_send() - periodic worker broadcasting the own OGM
- * @work: work queue item
+ * batadv_v_ogm_send_softif() - periodic worker broadcasting the own OGM
+ * @bat_priv: the bat priv with all the soft interface information
*/
-static void batadv_v_ogm_send(struct work_struct *work)
+static void batadv_v_ogm_send_softif(struct batadv_priv *bat_priv)
{
struct batadv_hard_iface *hard_iface;
- struct batadv_priv_bat_v *bat_v;
- struct batadv_priv *bat_priv;
struct batadv_ogm2_packet *ogm_packet;
struct sk_buff *skb, *skb_tmp;
unsigned char *ogm_buff;
u16 tvlv_len = 0;
int ret;
- bat_v = container_of(work, struct batadv_priv_bat_v, ogm_wq.work);
- bat_priv = container_of(bat_v, struct batadv_priv, bat_v);
+ lockdep_assert_held(&bat_priv->bat_v.ogm_buff_mutex);
if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING)
goto out;
}
/**
+ * batadv_v_ogm_send() - periodic worker broadcasting the own OGM
+ * @work: work queue item
+ */
+static void batadv_v_ogm_send(struct work_struct *work)
+{
+ struct batadv_priv_bat_v *bat_v;
+ struct batadv_priv *bat_priv;
+
+ bat_v = container_of(work, struct batadv_priv_bat_v, ogm_wq.work);
+ bat_priv = container_of(bat_v, struct batadv_priv, bat_v);
+
+ mutex_lock(&bat_priv->bat_v.ogm_buff_mutex);
+ batadv_v_ogm_send_softif(bat_priv);
+ mutex_unlock(&bat_priv->bat_v.ogm_buff_mutex);
+}
+
+/**
* batadv_v_ogm_aggr_work() - OGM queue periodic task per interface
* @work: work queue item
*
struct batadv_priv *bat_priv = netdev_priv(primary_iface->soft_iface);
struct batadv_ogm2_packet *ogm_packet;
+ mutex_lock(&bat_priv->bat_v.ogm_buff_mutex);
if (!bat_priv->bat_v.ogm_buff)
- return;
+ goto unlock;
ogm_packet = (struct batadv_ogm2_packet *)bat_priv->bat_v.ogm_buff;
ether_addr_copy(ogm_packet->orig, primary_iface->net_dev->dev_addr);
+
+unlock:
+ mutex_unlock(&bat_priv->bat_v.ogm_buff_mutex);
}
/**
atomic_set(&bat_priv->bat_v.ogm_seqno, random_seqno);
INIT_DELAYED_WORK(&bat_priv->bat_v.ogm_wq, batadv_v_ogm_send);
+ mutex_init(&bat_priv->bat_v.ogm_buff_mutex);
+
return 0;
}
{
cancel_delayed_work_sync(&bat_priv->bat_v.ogm_wq);
+ mutex_lock(&bat_priv->bat_v.ogm_buff_mutex);
+
kfree(bat_priv->bat_v.ogm_buff);
bat_priv->bat_v.ogm_buff = NULL;
bat_priv->bat_v.ogm_buff_len = 0;
+
+ mutex_unlock(&bat_priv->bat_v.ogm_buff_mutex);
}
#include <linux/kref.h>
#include <linux/limits.h>
#include <linux/list.h>
+#include <linux/mutex.h>
#include <linux/netdevice.h>
#include <linux/printk.h>
#include <linux/rculist.h>
INIT_LIST_HEAD(&hard_iface->list);
INIT_HLIST_HEAD(&hard_iface->neigh_list);
+ mutex_init(&hard_iface->bat_iv.ogm_buff_mutex);
spin_lock_init(&hard_iface->neigh_list_lock);
kref_init(&hard_iface->refcount);
return 0;
}
-/* batman-adv network devices have devices nesting below it and are a special
- * "super class" of normal network devices; split their locks off into a
- * separate class since they always nest.
- */
-static struct lock_class_key batadv_netdev_xmit_lock_key;
-static struct lock_class_key batadv_netdev_addr_lock_key;
-
-/**
- * batadv_set_lockdep_class_one() - Set lockdep class for a single tx queue
- * @dev: device which owns the tx queue
- * @txq: tx queue to modify
- * @_unused: always NULL
- */
-static void batadv_set_lockdep_class_one(struct net_device *dev,
- struct netdev_queue *txq,
- void *_unused)
-{
- lockdep_set_class(&txq->_xmit_lock, &batadv_netdev_xmit_lock_key);
-}
-
-/**
- * batadv_set_lockdep_class() - Set txq and addr_list lockdep class
- * @dev: network device to modify
- */
-static void batadv_set_lockdep_class(struct net_device *dev)
-{
- lockdep_set_class(&dev->addr_list_lock, &batadv_netdev_addr_lock_key);
- netdev_for_each_tx_queue(dev, batadv_set_lockdep_class_one, NULL);
-}
-
/**
* batadv_softif_init_late() - late stage initialization of soft interface
* @dev: registered network device to modify
int ret;
size_t cnt_len = sizeof(u64) * BATADV_CNT_NUM;
- batadv_set_lockdep_class(dev);
-
bat_priv = netdev_priv(dev);
bat_priv->soft_iface = dev;
#include <linux/if.h>
#include <linux/if_ether.h>
#include <linux/kref.h>
+#include <linux/mutex.h>
#include <linux/netdevice.h>
#include <linux/netlink.h>
#include <linux/sched.h> /* for linux/wait.h */
/** @ogm_seqno: OGM sequence number - used to identify each OGM */
atomic_t ogm_seqno;
+
+ /** @ogm_buff_mutex: lock protecting ogm_buff and ogm_buff_len */
+ struct mutex ogm_buff_mutex;
};
/**
/** @ogm_seqno: OGM sequence number - used to identify each OGM */
atomic_t ogm_seqno;
+ /** @ogm_buff_mutex: lock protecting ogm_buff and ogm_buff_len */
+ struct mutex ogm_buff_mutex;
+
/** @ogm_wq: workqueue used to schedule OGM transmissions */
struct delayed_work ogm_wq;
};
return err < 0 ? NET_XMIT_DROP : err;
}
-static int bt_dev_init(struct net_device *dev)
-{
- netdev_lockdep_set_classes(dev);
-
- return 0;
-}
-
static const struct net_device_ops netdev_ops = {
- .ndo_init = bt_dev_init,
.ndo_start_xmit = bt_xmit,
};
if (sk->sk_state == BT_LISTEN)
return bt_accept_poll(sk);
- if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
+ if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue))
mask |= EPOLLERR |
(sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
if (sk->sk_shutdown == SHUTDOWN_MASK)
mask |= EPOLLHUP;
- if (!skb_queue_empty(&sk->sk_receive_queue))
+ if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
mask |= EPOLLIN | EPOLLRDNORM;
if (sk->sk_state == BT_CLOSED)
const struct nf_br_ops __rcu *nf_br_ops __read_mostly;
EXPORT_SYMBOL_GPL(nf_br_ops);
-static struct lock_class_key bridge_netdev_addr_lock_key;
-
/* net device transmit always called with BH disabled */
netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
{
return NETDEV_TX_OK;
}
-static void br_set_lockdep_class(struct net_device *dev)
-{
- lockdep_set_class(&dev->addr_list_lock, &bridge_netdev_addr_lock_key);
-}
-
static int br_dev_init(struct net_device *dev)
{
struct net_bridge *br = netdev_priv(dev);
br_mdb_hash_fini(br);
br_fdb_hash_fini(br);
}
- br_set_lockdep_class(dev);
return err;
}
}
void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
- const unsigned char *addr, u16 vid, bool added_by_user)
+ const unsigned char *addr, u16 vid, unsigned long flags)
{
struct net_bridge_fdb_entry *fdb;
bool fdb_modified = false;
fdb->dst = source;
fdb_modified = true;
/* Take over HW learned entry */
- test_and_clear_bit(BR_FDB_ADDED_BY_EXT_LEARN,
- &fdb->flags);
+ if (unlikely(test_bit(BR_FDB_ADDED_BY_EXT_LEARN,
+ &fdb->flags)))
+ clear_bit(BR_FDB_ADDED_BY_EXT_LEARN,
+ &fdb->flags);
}
if (now != fdb->updated)
fdb->updated = now;
- if (unlikely(added_by_user))
+ if (unlikely(test_bit(BR_FDB_ADDED_BY_USER, &flags)))
set_bit(BR_FDB_ADDED_BY_USER, &fdb->flags);
if (unlikely(fdb_modified)) {
- trace_br_fdb_update(br, source, addr, vid, added_by_user);
+ trace_br_fdb_update(br, source, addr, vid, flags);
fdb_notify(br, fdb, RTM_NEWNEIGH, true);
}
}
} else {
spin_lock(&br->hash_lock);
- fdb = fdb_create(br, source, addr, vid, 0);
+ fdb = fdb_create(br, source, addr, vid, flags);
if (fdb) {
- if (unlikely(added_by_user))
- set_bit(BR_FDB_ADDED_BY_USER, &fdb->flags);
- trace_br_fdb_update(br, source, addr, vid,
- added_by_user);
+ trace_br_fdb_update(br, source, addr, vid, flags);
fdb_notify(br, fdb, RTM_NEWNEIGH, true);
}
/* else we lose race and someone else inserts
}
local_bh_disable();
rcu_read_lock();
- br_fdb_update(br, p, addr, vid, true);
+ br_fdb_update(br, p, addr, vid, BIT(BR_FDB_ADDED_BY_USER));
rcu_read_unlock();
local_bh_enable();
} else if (ndm->ndm_flags & NTF_EXT_LEARNED) {
fdb = br_fdb_find(br, addr, vid);
if (!fdb) {
- fdb = fdb_create(br, p, addr, vid, 0);
+ unsigned long flags = BIT(BR_FDB_ADDED_BY_EXT_LEARN);
+
+ if (swdev_notify)
+ flags |= BIT(BR_FDB_ADDED_BY_USER);
+ fdb = fdb_create(br, p, addr, vid, flags);
if (!fdb) {
err = -ENOMEM;
goto err_unlock;
}
- if (swdev_notify)
- set_bit(BR_FDB_ADDED_BY_USER, &fdb->flags);
- set_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags);
fdb_notify(br, fdb, RTM_NEWNEIGH, swdev_notify);
} else {
fdb->updated = jiffies;
/* insert into forwarding database after filtering to avoid spoofing */
br = p->br;
if (p->flags & BR_LEARNING)
- br_fdb_update(br, p, eth_hdr(skb)->h_source, vid, false);
+ br_fdb_update(br, p, eth_hdr(skb)->h_source, vid, 0);
local_rcv = !!(br->dev->flags & IFF_PROMISC);
if (is_multicast_ether_addr(eth_hdr(skb)->h_dest)) {
if ((p->flags & BR_LEARNING) &&
!br_opt_get(p->br, BROPT_NO_LL_LEARN) &&
br_should_learn(p, skb, &vid))
- br_fdb_update(p->br, p, eth_hdr(skb)->h_source, vid, false);
+ br_fdb_update(p->br, p, eth_hdr(skb)->h_source, vid, 0);
}
/* note: already called with rcu_read_lock */
int br_fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
const unsigned char *addr, u16 vid);
void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
- const unsigned char *addr, u16 vid, bool added_by_user);
+ const unsigned char *addr, u16 vid, unsigned long flags);
int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev, const unsigned char *addr, u16 vid);
* This may also be a clone skbuff, we could preserve the geometry for
* the copies but probably not worth the effort.
*/
- ip_frag_init(skb, hlen, ll_rs, frag_max_size, &state);
+ ip_frag_init(skb, hlen, ll_rs, frag_max_size, false, &state);
while (state.left > 0) {
struct sk_buff *skb2;
mask |= EPOLLRDHUP;
/* readable? */
- if (!skb_queue_empty(&sk->sk_receive_queue) ||
+ if (!skb_queue_empty_lockless(&sk->sk_receive_queue) ||
(sk->sk_shutdown & RCV_SHUTDOWN))
mask |= EPOLLIN | EPOLLRDNORM;
if (error)
goto out_err;
- if (sk->sk_receive_queue.prev != skb)
+ if (READ_ONCE(sk->sk_receive_queue.prev) != skb)
goto out;
/* Socket shut down? */
break;
sk_busy_loop(sk, flags & MSG_DONTWAIT);
- } while (sk->sk_receive_queue.prev != *last);
+ } while (READ_ONCE(sk->sk_receive_queue.prev) != *last);
error = -EAGAIN;
mask = 0;
/* exceptional events? */
- if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
+ if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue))
mask |= EPOLLERR |
(sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
mask |= EPOLLHUP;
/* readable? */
- if (!skb_queue_empty(&sk->sk_receive_queue))
+ if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
mask |= EPOLLIN | EPOLLRDNORM;
/* Connection-based need to check for termination and startup */
#include "net-sysfs.h"
#define MAX_GRO_SKBS 8
+#define MAX_NEST_DEV 8
/* This should be increased if a protocol with a bigger head is added. */
#define GRO_MAX_HEAD (MAX_HEADER + 128)
DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
EXPORT_PER_CPU_SYMBOL(softnet_data);
-#ifdef CONFIG_LOCKDEP
-/*
- * register_netdevice() inits txq->_xmit_lock and sets lockdep class
- * according to dev->type
- */
-static const unsigned short netdev_lock_type[] = {
- ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
- ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
- ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
- ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
- ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
- ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
- ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
- ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
- ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
- ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
- ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
- ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
- ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
- ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
- ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
-
-static const char *const netdev_lock_name[] = {
- "_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
- "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
- "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
- "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
- "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
- "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
- "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
- "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
- "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
- "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
- "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
- "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
- "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
- "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
- "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
-
-static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
-static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
-
-static inline unsigned short netdev_lock_pos(unsigned short dev_type)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
- if (netdev_lock_type[i] == dev_type)
- return i;
- /* the last key is used by default */
- return ARRAY_SIZE(netdev_lock_type) - 1;
-}
-
-static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
- unsigned short dev_type)
-{
- int i;
-
- i = netdev_lock_pos(dev_type);
- lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
- netdev_lock_name[i]);
-}
-
-static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
-{
- int i;
-
- i = netdev_lock_pos(dev->type);
- lockdep_set_class_and_name(&dev->addr_list_lock,
- &netdev_addr_lock_key[i],
- netdev_lock_name[i]);
-}
-#else
-static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
- unsigned short dev_type)
-{
-}
-static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
-{
-}
-#endif
-
/*******************************************************************************
*
* Protocol management and registration routines
/* upper master flag, there can only be one master device per list */
bool master;
+ /* lookup ignore flag */
+ bool ignore;
+
/* counter for the number of times this device was added to us */
u16 ref_nr;
return NULL;
}
-static int __netdev_has_upper_dev(struct net_device *upper_dev, void *data)
+static int ____netdev_has_upper_dev(struct net_device *upper_dev, void *data)
{
struct net_device *dev = data;
{
ASSERT_RTNL();
- return netdev_walk_all_upper_dev_rcu(dev, __netdev_has_upper_dev,
+ return netdev_walk_all_upper_dev_rcu(dev, ____netdev_has_upper_dev,
upper_dev);
}
EXPORT_SYMBOL(netdev_has_upper_dev);
bool netdev_has_upper_dev_all_rcu(struct net_device *dev,
struct net_device *upper_dev)
{
- return !!netdev_walk_all_upper_dev_rcu(dev, __netdev_has_upper_dev,
+ return !!netdev_walk_all_upper_dev_rcu(dev, ____netdev_has_upper_dev,
upper_dev);
}
EXPORT_SYMBOL(netdev_has_upper_dev_all_rcu);
}
EXPORT_SYMBOL(netdev_master_upper_dev_get);
+static struct net_device *__netdev_master_upper_dev_get(struct net_device *dev)
+{
+ struct netdev_adjacent *upper;
+
+ ASSERT_RTNL();
+
+ if (list_empty(&dev->adj_list.upper))
+ return NULL;
+
+ upper = list_first_entry(&dev->adj_list.upper,
+ struct netdev_adjacent, list);
+ if (likely(upper->master) && !upper->ignore)
+ return upper->dev;
+ return NULL;
+}
+
/**
* netdev_has_any_lower_dev - Check if device is linked to some device
* @dev: device
}
EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu);
+static struct net_device *__netdev_next_upper_dev(struct net_device *dev,
+ struct list_head **iter,
+ bool *ignore)
+{
+ struct netdev_adjacent *upper;
+
+ upper = list_entry((*iter)->next, struct netdev_adjacent, list);
+
+ if (&upper->list == &dev->adj_list.upper)
+ return NULL;
+
+ *iter = &upper->list;
+ *ignore = upper->ignore;
+
+ return upper->dev;
+}
+
static struct net_device *netdev_next_upper_dev_rcu(struct net_device *dev,
struct list_head **iter)
{
return upper->dev;
}
+static int __netdev_walk_all_upper_dev(struct net_device *dev,
+ int (*fn)(struct net_device *dev,
+ void *data),
+ void *data)
+{
+ struct net_device *udev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
+ struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
+ int ret, cur = 0;
+ bool ignore;
+
+ now = dev;
+ iter = &dev->adj_list.upper;
+
+ while (1) {
+ if (now != dev) {
+ ret = fn(now, data);
+ if (ret)
+ return ret;
+ }
+
+ next = NULL;
+ while (1) {
+ udev = __netdev_next_upper_dev(now, &iter, &ignore);
+ if (!udev)
+ break;
+ if (ignore)
+ continue;
+
+ next = udev;
+ niter = &udev->adj_list.upper;
+ dev_stack[cur] = now;
+ iter_stack[cur++] = iter;
+ break;
+ }
+
+ if (!next) {
+ if (!cur)
+ return 0;
+ next = dev_stack[--cur];
+ niter = iter_stack[cur];
+ }
+
+ now = next;
+ iter = niter;
+ }
+
+ return 0;
+}
+
int netdev_walk_all_upper_dev_rcu(struct net_device *dev,
int (*fn)(struct net_device *dev,
void *data),
void *data)
{
- struct net_device *udev;
- struct list_head *iter;
- int ret;
+ struct net_device *udev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
+ struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
+ int ret, cur = 0;
- for (iter = &dev->adj_list.upper,
- udev = netdev_next_upper_dev_rcu(dev, &iter);
- udev;
- udev = netdev_next_upper_dev_rcu(dev, &iter)) {
- /* first is the upper device itself */
- ret = fn(udev, data);
- if (ret)
- return ret;
+ now = dev;
+ iter = &dev->adj_list.upper;
- /* then look at all of its upper devices */
- ret = netdev_walk_all_upper_dev_rcu(udev, fn, data);
- if (ret)
- return ret;
+ while (1) {
+ if (now != dev) {
+ ret = fn(now, data);
+ if (ret)
+ return ret;
+ }
+
+ next = NULL;
+ while (1) {
+ udev = netdev_next_upper_dev_rcu(now, &iter);
+ if (!udev)
+ break;
+
+ next = udev;
+ niter = &udev->adj_list.upper;
+ dev_stack[cur] = now;
+ iter_stack[cur++] = iter;
+ break;
+ }
+
+ if (!next) {
+ if (!cur)
+ return 0;
+ next = dev_stack[--cur];
+ niter = iter_stack[cur];
+ }
+
+ now = next;
+ iter = niter;
}
return 0;
}
EXPORT_SYMBOL_GPL(netdev_walk_all_upper_dev_rcu);
+static bool __netdev_has_upper_dev(struct net_device *dev,
+ struct net_device *upper_dev)
+{
+ ASSERT_RTNL();
+
+ return __netdev_walk_all_upper_dev(dev, ____netdev_has_upper_dev,
+ upper_dev);
+}
+
/**
* netdev_lower_get_next_private - Get the next ->private from the
* lower neighbour list
return lower->dev;
}
+static struct net_device *__netdev_next_lower_dev(struct net_device *dev,
+ struct list_head **iter,
+ bool *ignore)
+{
+ struct netdev_adjacent *lower;
+
+ lower = list_entry((*iter)->next, struct netdev_adjacent, list);
+
+ if (&lower->list == &dev->adj_list.lower)
+ return NULL;
+
+ *iter = &lower->list;
+ *ignore = lower->ignore;
+
+ return lower->dev;
+}
+
int netdev_walk_all_lower_dev(struct net_device *dev,
int (*fn)(struct net_device *dev,
void *data),
void *data)
{
- struct net_device *ldev;
- struct list_head *iter;
- int ret;
+ struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
+ struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
+ int ret, cur = 0;
- for (iter = &dev->adj_list.lower,
- ldev = netdev_next_lower_dev(dev, &iter);
- ldev;
- ldev = netdev_next_lower_dev(dev, &iter)) {
- /* first is the lower device itself */
- ret = fn(ldev, data);
- if (ret)
- return ret;
+ now = dev;
+ iter = &dev->adj_list.lower;
- /* then look at all of its lower devices */
- ret = netdev_walk_all_lower_dev(ldev, fn, data);
- if (ret)
- return ret;
+ while (1) {
+ if (now != dev) {
+ ret = fn(now, data);
+ if (ret)
+ return ret;
+ }
+
+ next = NULL;
+ while (1) {
+ ldev = netdev_next_lower_dev(now, &iter);
+ if (!ldev)
+ break;
+
+ next = ldev;
+ niter = &ldev->adj_list.lower;
+ dev_stack[cur] = now;
+ iter_stack[cur++] = iter;
+ break;
+ }
+
+ if (!next) {
+ if (!cur)
+ return 0;
+ next = dev_stack[--cur];
+ niter = iter_stack[cur];
+ }
+
+ now = next;
+ iter = niter;
}
return 0;
}
EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev);
+static int __netdev_walk_all_lower_dev(struct net_device *dev,
+ int (*fn)(struct net_device *dev,
+ void *data),
+ void *data)
+{
+ struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
+ struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
+ int ret, cur = 0;
+ bool ignore;
+
+ now = dev;
+ iter = &dev->adj_list.lower;
+
+ while (1) {
+ if (now != dev) {
+ ret = fn(now, data);
+ if (ret)
+ return ret;
+ }
+
+ next = NULL;
+ while (1) {
+ ldev = __netdev_next_lower_dev(now, &iter, &ignore);
+ if (!ldev)
+ break;
+ if (ignore)
+ continue;
+
+ next = ldev;
+ niter = &ldev->adj_list.lower;
+ dev_stack[cur] = now;
+ iter_stack[cur++] = iter;
+ break;
+ }
+
+ if (!next) {
+ if (!cur)
+ return 0;
+ next = dev_stack[--cur];
+ niter = iter_stack[cur];
+ }
+
+ now = next;
+ iter = niter;
+ }
+
+ return 0;
+}
+
static struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev,
struct list_head **iter)
{
return lower->dev;
}
-int netdev_walk_all_lower_dev_rcu(struct net_device *dev,
- int (*fn)(struct net_device *dev,
- void *data),
- void *data)
+static u8 __netdev_upper_depth(struct net_device *dev)
+{
+ struct net_device *udev;
+ struct list_head *iter;
+ u8 max_depth = 0;
+ bool ignore;
+
+ for (iter = &dev->adj_list.upper,
+ udev = __netdev_next_upper_dev(dev, &iter, &ignore);
+ udev;
+ udev = __netdev_next_upper_dev(dev, &iter, &ignore)) {
+ if (ignore)
+ continue;
+ if (max_depth < udev->upper_level)
+ max_depth = udev->upper_level;
+ }
+
+ return max_depth;
+}
+
+static u8 __netdev_lower_depth(struct net_device *dev)
{
struct net_device *ldev;
struct list_head *iter;
- int ret;
+ u8 max_depth = 0;
+ bool ignore;
for (iter = &dev->adj_list.lower,
- ldev = netdev_next_lower_dev_rcu(dev, &iter);
+ ldev = __netdev_next_lower_dev(dev, &iter, &ignore);
ldev;
- ldev = netdev_next_lower_dev_rcu(dev, &iter)) {
- /* first is the lower device itself */
- ret = fn(ldev, data);
- if (ret)
- return ret;
+ ldev = __netdev_next_lower_dev(dev, &iter, &ignore)) {
+ if (ignore)
+ continue;
+ if (max_depth < ldev->lower_level)
+ max_depth = ldev->lower_level;
+ }
- /* then look at all of its lower devices */
- ret = netdev_walk_all_lower_dev_rcu(ldev, fn, data);
- if (ret)
- return ret;
+ return max_depth;
+}
+
+static int __netdev_update_upper_level(struct net_device *dev, void *data)
+{
+ dev->upper_level = __netdev_upper_depth(dev) + 1;
+ return 0;
+}
+
+static int __netdev_update_lower_level(struct net_device *dev, void *data)
+{
+ dev->lower_level = __netdev_lower_depth(dev) + 1;
+ return 0;
+}
+
+int netdev_walk_all_lower_dev_rcu(struct net_device *dev,
+ int (*fn)(struct net_device *dev,
+ void *data),
+ void *data)
+{
+ struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
+ struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
+ int ret, cur = 0;
+
+ now = dev;
+ iter = &dev->adj_list.lower;
+
+ while (1) {
+ if (now != dev) {
+ ret = fn(now, data);
+ if (ret)
+ return ret;
+ }
+
+ next = NULL;
+ while (1) {
+ ldev = netdev_next_lower_dev_rcu(now, &iter);
+ if (!ldev)
+ break;
+
+ next = ldev;
+ niter = &ldev->adj_list.lower;
+ dev_stack[cur] = now;
+ iter_stack[cur++] = iter;
+ break;
+ }
+
+ if (!next) {
+ if (!cur)
+ return 0;
+ next = dev_stack[--cur];
+ niter = iter_stack[cur];
+ }
+
+ now = next;
+ iter = niter;
}
return 0;
adj->master = master;
adj->ref_nr = 1;
adj->private = private;
+ adj->ignore = false;
dev_hold(adj_dev);
pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d; dev_hold on %s\n",
return -EBUSY;
/* To prevent loops, check if dev is not upper device to upper_dev. */
- if (netdev_has_upper_dev(upper_dev, dev))
+ if (__netdev_has_upper_dev(upper_dev, dev))
return -EBUSY;
+ if ((dev->lower_level + upper_dev->upper_level) > MAX_NEST_DEV)
+ return -EMLINK;
+
if (!master) {
- if (netdev_has_upper_dev(dev, upper_dev))
+ if (__netdev_has_upper_dev(dev, upper_dev))
return -EEXIST;
} else {
- master_dev = netdev_master_upper_dev_get(dev);
+ master_dev = __netdev_master_upper_dev_get(dev);
if (master_dev)
return master_dev == upper_dev ? -EEXIST : -EBUSY;
}
if (ret)
goto rollback;
+ __netdev_update_upper_level(dev, NULL);
+ __netdev_walk_all_lower_dev(dev, __netdev_update_upper_level, NULL);
+
+ __netdev_update_lower_level(upper_dev, NULL);
+ __netdev_walk_all_upper_dev(upper_dev, __netdev_update_lower_level,
+ NULL);
+
return 0;
rollback:
call_netdevice_notifiers_info(NETDEV_CHANGEUPPER,
&changeupper_info.info);
+
+ __netdev_update_upper_level(dev, NULL);
+ __netdev_walk_all_lower_dev(dev, __netdev_update_upper_level, NULL);
+
+ __netdev_update_lower_level(upper_dev, NULL);
+ __netdev_walk_all_upper_dev(upper_dev, __netdev_update_lower_level,
+ NULL);
}
EXPORT_SYMBOL(netdev_upper_dev_unlink);
+static void __netdev_adjacent_dev_set(struct net_device *upper_dev,
+ struct net_device *lower_dev,
+ bool val)
+{
+ struct netdev_adjacent *adj;
+
+ adj = __netdev_find_adj(lower_dev, &upper_dev->adj_list.lower);
+ if (adj)
+ adj->ignore = val;
+
+ adj = __netdev_find_adj(upper_dev, &lower_dev->adj_list.upper);
+ if (adj)
+ adj->ignore = val;
+}
+
+static void netdev_adjacent_dev_disable(struct net_device *upper_dev,
+ struct net_device *lower_dev)
+{
+ __netdev_adjacent_dev_set(upper_dev, lower_dev, true);
+}
+
+static void netdev_adjacent_dev_enable(struct net_device *upper_dev,
+ struct net_device *lower_dev)
+{
+ __netdev_adjacent_dev_set(upper_dev, lower_dev, false);
+}
+
+int netdev_adjacent_change_prepare(struct net_device *old_dev,
+ struct net_device *new_dev,
+ struct net_device *dev,
+ struct netlink_ext_ack *extack)
+{
+ int err;
+
+ if (!new_dev)
+ return 0;
+
+ if (old_dev && new_dev != old_dev)
+ netdev_adjacent_dev_disable(dev, old_dev);
+
+ err = netdev_upper_dev_link(new_dev, dev, extack);
+ if (err) {
+ if (old_dev && new_dev != old_dev)
+ netdev_adjacent_dev_enable(dev, old_dev);
+ return err;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(netdev_adjacent_change_prepare);
+
+void netdev_adjacent_change_commit(struct net_device *old_dev,
+ struct net_device *new_dev,
+ struct net_device *dev)
+{
+ if (!new_dev || !old_dev)
+ return;
+
+ if (new_dev == old_dev)
+ return;
+
+ netdev_adjacent_dev_enable(dev, old_dev);
+ netdev_upper_dev_unlink(old_dev, dev);
+}
+EXPORT_SYMBOL(netdev_adjacent_change_commit);
+
+void netdev_adjacent_change_abort(struct net_device *old_dev,
+ struct net_device *new_dev,
+ struct net_device *dev)
+{
+ if (!new_dev)
+ return;
+
+ if (old_dev && new_dev != old_dev)
+ netdev_adjacent_dev_enable(dev, old_dev);
+
+ netdev_upper_dev_unlink(new_dev, dev);
+}
+EXPORT_SYMBOL(netdev_adjacent_change_abort);
+
/**
* netdev_bonding_info_change - Dispatch event about slave change
* @dev: device
EXPORT_SYMBOL(netdev_lower_dev_get_private);
-int dev_get_nest_level(struct net_device *dev)
-{
- struct net_device *lower = NULL;
- struct list_head *iter;
- int max_nest = -1;
- int nest;
-
- ASSERT_RTNL();
-
- netdev_for_each_lower_dev(dev, lower, iter) {
- nest = dev_get_nest_level(lower);
- if (max_nest < nest)
- max_nest = nest;
- }
-
- return max_nest + 1;
-}
-EXPORT_SYMBOL(dev_get_nest_level);
-
/**
* netdev_lower_change - Dispatch event about lower device state change
* @lower_dev: device
return -EINVAL;
}
- if (prog->aux->id == prog_id) {
+ /* prog->aux->id may be 0 for orphaned device-bound progs */
+ if (prog->aux->id && prog->aux->id == prog_id) {
bpf_prog_put(prog);
return 0;
}
{
/* Initialize queue lock */
spin_lock_init(&queue->_xmit_lock);
- netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
+ lockdep_set_class(&queue->_xmit_lock, &dev->qdisc_xmit_lock_key);
queue->xmit_lock_owner = -1;
netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
queue->dev = dev;
}
EXPORT_SYMBOL(netif_tx_stop_all_queues);
+static void netdev_register_lockdep_key(struct net_device *dev)
+{
+ lockdep_register_key(&dev->qdisc_tx_busylock_key);
+ lockdep_register_key(&dev->qdisc_running_key);
+ lockdep_register_key(&dev->qdisc_xmit_lock_key);
+ lockdep_register_key(&dev->addr_list_lock_key);
+}
+
+static void netdev_unregister_lockdep_key(struct net_device *dev)
+{
+ lockdep_unregister_key(&dev->qdisc_tx_busylock_key);
+ lockdep_unregister_key(&dev->qdisc_running_key);
+ lockdep_unregister_key(&dev->qdisc_xmit_lock_key);
+ lockdep_unregister_key(&dev->addr_list_lock_key);
+}
+
+void netdev_update_lockdep_key(struct net_device *dev)
+{
+ struct netdev_queue *queue;
+ int i;
+
+ lockdep_unregister_key(&dev->qdisc_xmit_lock_key);
+ lockdep_unregister_key(&dev->addr_list_lock_key);
+
+ lockdep_register_key(&dev->qdisc_xmit_lock_key);
+ lockdep_register_key(&dev->addr_list_lock_key);
+
+ lockdep_set_class(&dev->addr_list_lock, &dev->addr_list_lock_key);
+ for (i = 0; i < dev->num_tx_queues; i++) {
+ queue = netdev_get_tx_queue(dev, i);
+
+ lockdep_set_class(&queue->_xmit_lock,
+ &dev->qdisc_xmit_lock_key);
+ }
+}
+EXPORT_SYMBOL(netdev_update_lockdep_key);
+
/**
* register_netdevice - register a network device
* @dev: device to register
BUG_ON(!net);
spin_lock_init(&dev->addr_list_lock);
- netdev_set_addr_lockdep_class(dev);
+ lockdep_set_class(&dev->addr_list_lock, &dev->addr_list_lock_key);
ret = dev_get_valid_name(net, dev, dev->name);
if (ret < 0)
dev_net_set(dev, &init_net);
+ netdev_register_lockdep_key(dev);
+
dev->gso_max_size = GSO_MAX_SIZE;
dev->gso_max_segs = GSO_MAX_SEGS;
+ dev->upper_level = 1;
+ dev->lower_level = 1;
INIT_LIST_HEAD(&dev->napi_list);
INIT_LIST_HEAD(&dev->unreg_list);
free_percpu(dev->pcpu_refcnt);
dev->pcpu_refcnt = NULL;
+ netdev_unregister_lockdep_key(dev);
+
/* Compatibility with error handling in drivers */
if (dev->reg_state == NETREG_UNINITIALIZED) {
netdev_freemem(dev);
call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
rcu_barrier();
- new_nsid = peernet2id_alloc(dev_net(dev), net);
+ new_nsid = peernet2id_alloc(dev_net(dev), net, GFP_KERNEL);
/* If there is an ifindex conflict assign a new one */
if (__dev_get_by_index(net, dev->ifindex))
new_ifindex = dev_new_index(net);
if (to->addr_len != from->addr_len)
return -EINVAL;
- netif_addr_lock_nested(to);
+ netif_addr_lock(to);
err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len);
if (!err)
__dev_set_rx_mode(to);
if (to->addr_len != from->addr_len)
return -EINVAL;
- netif_addr_lock_nested(to);
+ netif_addr_lock(to);
err = __hw_addr_sync_multiple(&to->uc, &from->uc, to->addr_len);
if (!err)
__dev_set_rx_mode(to);
return;
netif_addr_lock_bh(from);
- netif_addr_lock_nested(to);
+ netif_addr_lock(to);
__hw_addr_unsync(&to->uc, &from->uc, to->addr_len);
__dev_set_rx_mode(to);
netif_addr_unlock(to);
if (to->addr_len != from->addr_len)
return -EINVAL;
- netif_addr_lock_nested(to);
+ netif_addr_lock(to);
err = __hw_addr_sync(&to->mc, &from->mc, to->addr_len);
if (!err)
__dev_set_rx_mode(to);
if (to->addr_len != from->addr_len)
return -EINVAL;
- netif_addr_lock_nested(to);
+ netif_addr_lock(to);
err = __hw_addr_sync_multiple(&to->mc, &from->mc, to->addr_len);
if (!err)
__dev_set_rx_mode(to);
return;
netif_addr_lock_bh(from);
- netif_addr_lock_nested(to);
+ netif_addr_lock(to);
__hw_addr_unsync(&to->mc, &from->mc, to->addr_len);
__dev_set_rx_mode(to);
netif_addr_unlock(to);
static int ethtool_get_wol(struct net_device *dev, char __user *useraddr)
{
- struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
+ struct ethtool_wolinfo wol;
if (!dev->ethtool_ops->get_wol)
return -EOPNOTSUPP;
+ memset(&wol, 0, sizeof(struct ethtool_wolinfo));
+ wol.cmd = ETHTOOL_GWOL;
dev->ethtool_ops->get_wol(dev, &wol);
if (copy_to_user(useraddr, &wol, sizeof(wol)))
}
EXPORT_SYMBOL(__skb_flow_dissect);
-static u32 hashrnd __read_mostly;
+static siphash_key_t hashrnd __read_mostly;
static __always_inline void __flow_hash_secret_init(void)
{
net_get_random_once(&hashrnd, sizeof(hashrnd));
}
-static __always_inline u32 __flow_hash_words(const u32 *words, u32 length,
- u32 keyval)
+static const void *flow_keys_hash_start(const struct flow_keys *flow)
{
- return jhash2(words, length, keyval);
-}
-
-static inline const u32 *flow_keys_hash_start(const struct flow_keys *flow)
-{
- const void *p = flow;
-
- BUILD_BUG_ON(FLOW_KEYS_HASH_OFFSET % sizeof(u32));
- return (const u32 *)(p + FLOW_KEYS_HASH_OFFSET);
+ BUILD_BUG_ON(FLOW_KEYS_HASH_OFFSET % SIPHASH_ALIGNMENT);
+ return &flow->FLOW_KEYS_HASH_START_FIELD;
}
static inline size_t flow_keys_hash_length(const struct flow_keys *flow)
{
size_t diff = FLOW_KEYS_HASH_OFFSET + sizeof(flow->addrs);
+
BUILD_BUG_ON((sizeof(*flow) - FLOW_KEYS_HASH_OFFSET) % sizeof(u32));
- /* flow.addrs MUST be the last member in struct flow_keys because
- * different L3 protocols have different address length
- */
- BUILD_BUG_ON(offsetof(typeof(*flow), addrs) !=
- sizeof(*flow) - sizeof(flow->addrs));
switch (flow->control.addr_type) {
case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
diff -= sizeof(flow->addrs.tipckey);
break;
}
- return (sizeof(*flow) - diff) / sizeof(u32);
+ return sizeof(*flow) - diff;
}
__be32 flow_get_u32_src(const struct flow_keys *flow)
}
}
-static inline u32 __flow_hash_from_keys(struct flow_keys *keys, u32 keyval)
+static inline u32 __flow_hash_from_keys(struct flow_keys *keys,
+ const siphash_key_t *keyval)
{
u32 hash;
__flow_hash_consistentify(keys);
- hash = __flow_hash_words(flow_keys_hash_start(keys),
- flow_keys_hash_length(keys), keyval);
+ hash = siphash(flow_keys_hash_start(keys),
+ flow_keys_hash_length(keys), keyval);
if (!hash)
hash = 1;
u32 flow_hash_from_keys(struct flow_keys *keys)
{
__flow_hash_secret_init();
- return __flow_hash_from_keys(keys, hashrnd);
+ return __flow_hash_from_keys(keys, &hashrnd);
}
EXPORT_SYMBOL(flow_hash_from_keys);
static inline u32 ___skb_get_hash(const struct sk_buff *skb,
- struct flow_keys *keys, u32 keyval)
+ struct flow_keys *keys,
+ const siphash_key_t *keyval)
{
skb_flow_dissect_flow_keys(skb, keys,
FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
&keys, NULL, 0, 0, 0,
FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
- return __flow_hash_from_keys(&keys, hashrnd);
+ return __flow_hash_from_keys(&keys, &hashrnd);
}
EXPORT_SYMBOL_GPL(__skb_get_hash_symmetric);
__flow_hash_secret_init();
- hash = ___skb_get_hash(skb, &keys, hashrnd);
+ hash = ___skb_get_hash(skb, &keys, &hashrnd);
__skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys));
}
EXPORT_SYMBOL(__skb_get_hash);
-__u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb)
+__u32 skb_get_hash_perturb(const struct sk_buff *skb,
+ const siphash_key_t *perturb)
{
struct flow_keys keys;
int err = -EINVAL;
if (skb->protocol == htons(ETH_P_IP)) {
+ struct net_device *dev = skb_dst(skb)->dev;
struct iphdr *iph = ip_hdr(skb);
+ dev_hold(dev);
+ skb_dst_drop(skb);
err = ip_route_input_noref(skb, iph->daddr, iph->saddr,
- iph->tos, skb_dst(skb)->dev);
+ iph->tos, dev);
+ dev_put(dev);
} else if (skb->protocol == htons(ETH_P_IPV6)) {
+ skb_dst_drop(skb);
err = ipv6_stub->ipv6_route_input(skb);
} else {
err = -EAFNOSUPPORT;
}
static void rtnl_net_notifyid(struct net *net, int cmd, int id, u32 portid,
- struct nlmsghdr *nlh);
+ struct nlmsghdr *nlh, gfp_t gfp);
/* This function returns the id of a peer netns. If no id is assigned, one will
* be allocated and returned.
*/
-int peernet2id_alloc(struct net *net, struct net *peer)
+int peernet2id_alloc(struct net *net, struct net *peer, gfp_t gfp)
{
bool alloc = false, alive = false;
int id;
id = __peernet2id_alloc(net, peer, &alloc);
spin_unlock_bh(&net->nsid_lock);
if (alloc && id >= 0)
- rtnl_net_notifyid(net, RTM_NEWNSID, id, 0, NULL);
+ rtnl_net_notifyid(net, RTM_NEWNSID, id, 0, NULL, gfp);
if (alive)
put_net(peer);
return id;
if (rv < 0) {
put_userns:
+ key_remove_domain(net->key_domain);
put_user_ns(user_ns);
net_drop_ns(net);
dec_ucounts:
idr_remove(&tmp->netns_ids, id);
spin_unlock_bh(&tmp->nsid_lock);
if (id >= 0)
- rtnl_net_notifyid(tmp, RTM_DELNSID, id, 0, NULL);
+ rtnl_net_notifyid(tmp, RTM_DELNSID, id, 0, NULL,
+ GFP_KERNEL);
if (tmp == last)
break;
}
spin_unlock_bh(&net->nsid_lock);
if (err >= 0) {
rtnl_net_notifyid(net, RTM_NEWNSID, err, NETLINK_CB(skb).portid,
- nlh);
+ nlh, GFP_KERNEL);
err = 0;
} else if (err == -ENOSPC && nsid >= 0) {
err = -EEXIST;
}
static void rtnl_net_notifyid(struct net *net, int cmd, int id, u32 portid,
- struct nlmsghdr *nlh)
+ struct nlmsghdr *nlh, gfp_t gfp)
{
struct net_fill_args fillargs = {
.portid = portid,
struct sk_buff *msg;
int err = -ENOMEM;
- msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL);
+ msg = nlmsg_new(rtnl_net_get_size(), gfp);
if (!msg)
goto out;
if (err < 0)
goto err_out;
- rtnl_notify(msg, net, portid, RTNLGRP_NSID, nlh, 0);
+ rtnl_notify(msg, net, portid, RTNLGRP_NSID, nlh, gfp);
return;
err_out:
static int rtnl_fill_link_netnsid(struct sk_buff *skb,
const struct net_device *dev,
- struct net *src_net)
+ struct net *src_net, gfp_t gfp)
{
bool put_iflink = false;
struct net *link_net = dev->rtnl_link_ops->get_link_net(dev);
if (!net_eq(dev_net(dev), link_net)) {
- int id = peernet2id_alloc(src_net, link_net);
+ int id = peernet2id_alloc(src_net, link_net, gfp);
if (nla_put_s32(skb, IFLA_LINK_NETNSID, id))
return -EMSGSIZE;
int type, u32 pid, u32 seq, u32 change,
unsigned int flags, u32 ext_filter_mask,
u32 event, int *new_nsid, int new_ifindex,
- int tgt_netnsid)
+ int tgt_netnsid, gfp_t gfp)
{
struct ifinfomsg *ifm;
struct nlmsghdr *nlh;
goto nla_put_failure;
}
- if (rtnl_fill_link_netnsid(skb, dev, src_net))
+ if (rtnl_fill_link_netnsid(skb, dev, src_net, gfp))
goto nla_put_failure;
if (new_nsid &&
NETLINK_CB(cb->skb).portid,
nlh->nlmsg_seq, 0, flags,
ext_filter_mask, 0, NULL, 0,
- netnsid);
+ netnsid, GFP_KERNEL);
if (err < 0) {
if (likely(skb->len))
err = ops->ndo_del_slave(upper_dev, dev);
if (err)
return err;
+ netdev_update_lockdep_key(dev);
} else {
return -EOPNOTSUPP;
}
err = rtnl_fill_ifinfo(nskb, dev, net,
RTM_NEWLINK, NETLINK_CB(skb).portid,
nlh->nlmsg_seq, 0, 0, ext_filter_mask,
- 0, NULL, 0, netnsid);
+ 0, NULL, 0, netnsid, GFP_KERNEL);
if (err < 0) {
/* -EMSGSIZE implies BUG in if_nlmsg_size */
WARN_ON(err == -EMSGSIZE);
err = rtnl_fill_ifinfo(skb, dev, dev_net(dev),
type, 0, 0, change, 0, 0, event,
- new_nsid, new_ifindex, -1);
+ new_nsid, new_ifindex, -1, flags);
if (err < 0) {
/* -EMSGSIZE implies BUG in if_nlmsg_size() */
WARN_ON(err == -EMSGSIZE);
ndm = nlmsg_data(nlh);
if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_state ||
ndm->ndm_flags || ndm->ndm_type) {
- NL_SET_ERR_MSG(extack, "Invalid values in header for fbd dump request");
+ NL_SET_ERR_MSG(extack, "Invalid values in header for fdb dump request");
return -EINVAL;
}
break;
}
case SO_INCOMING_CPU:
- sk->sk_incoming_cpu = val;
+ WRITE_ONCE(sk->sk_incoming_cpu, val);
break;
case SO_CNX_ADVICE:
break;
case SO_INCOMING_CPU:
- v.val = sk->sk_incoming_cpu;
+ v.val = READ_ONCE(sk->sk_incoming_cpu);
break;
case SO_MEMINFO:
{
struct sock *sk = p;
- return !skb_queue_empty(&sk->sk_receive_queue) ||
+ return !skb_queue_empty_lockless(&sk->sk_receive_queue) ||
sk_busy_loop_timeout(sk, start_time);
}
EXPORT_SYMBOL(sk_busy_loop_end);
inet->inet_daddr,
inet->inet_sport,
inet->inet_dport);
- inet->inet_id = dp->dccps_iss ^ jiffies;
+ inet->inet_id = prandom_u32();
err = dccp_connect(sk);
rt = NULL;
struct dn_scp *scp = DN_SK(sk);
__poll_t mask = datagram_poll(file, sock, wait);
- if (!skb_queue_empty(&scp->other_receive_queue))
+ if (!skb_queue_empty_lockless(&scp->other_receive_queue))
mask |= EPOLLRDBAND;
return mask;
rtnl_unlock();
}
-static struct lock_class_key dsa_master_addr_list_lock_key;
-
int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp)
{
int ret;
wmb();
dev->dsa_ptr = cpu_dp;
- lockdep_set_class(&dev->addr_list_lock,
- &dsa_master_addr_list_lock_key);
-
ret = dsa_master_ethtool_setup(dev);
if (ret)
return ret;
return ret;
}
-static struct lock_class_key dsa_slave_netdev_xmit_lock_key;
-static void dsa_slave_set_lockdep_class_one(struct net_device *dev,
- struct netdev_queue *txq,
- void *_unused)
-{
- lockdep_set_class(&txq->_xmit_lock,
- &dsa_slave_netdev_xmit_lock_key);
-}
-
int dsa_slave_suspend(struct net_device *slave_dev)
{
struct dsa_port *dp = dsa_slave_to_port(slave_dev);
slave_dev->max_mtu = ETH_MAX_MTU;
SET_NETDEV_DEVTYPE(slave_dev, &dsa_type);
- netdev_for_each_tx_queue(slave_dev, dsa_slave_set_lockdep_class_one,
- NULL);
-
SET_NETDEV_DEV(slave_dev, port->ds->dev);
slave_dev->dev.of_node = port->dn;
slave_dev->vlan_features = master->vlan_features;
.create = lowpan_header_create,
};
-static int lowpan_dev_init(struct net_device *ldev)
-{
- netdev_lockdep_set_classes(ldev);
-
- return 0;
-}
-
static int lowpan_open(struct net_device *dev)
{
if (!open_count)
}
static const struct net_device_ops lowpan_netdev_ops = {
- .ndo_init = lowpan_dev_init,
.ndo_start_xmit = lowpan_xmit,
.ndo_open = lowpan_open,
.ndo_stop = lowpan_stop,
reuseport_has_conns(sk, true);
sk->sk_state = TCP_ESTABLISHED;
sk_set_txhash(sk);
- inet->inet_id = jiffies;
+ inet->inet_id = prandom_u32();
sk_dst_set(sk, &rt->dst);
err = 0;
if (!(dev->flags & IFF_UP) ||
ifa->ifa_flags & (IFA_F_SECONDARY | IFA_F_NOPREFIXROUTE) ||
ipv4_is_zeronet(prefix) ||
- prefix == ifa->ifa_local || ifa->ifa_prefixlen == 32)
+ (prefix == ifa->ifa_local && ifa->ifa_prefixlen == 32))
return;
/* add the new */
dev = dev_get_by_index_rcu(net, inet_iif(skb_in));
if (dev)
- saddr = inet_select_addr(dev, 0, RT_SCOPE_LINK);
+ saddr = inet_select_addr(dev, iph->saddr,
+ RT_SCOPE_LINK);
else
saddr = 0;
rcu_read_unlock();
return -1;
score = sk->sk_family == PF_INET ? 2 : 1;
- if (sk->sk_incoming_cpu == raw_smp_processor_id())
+ if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
score++;
}
return score;
key = &tun_info->key;
if (!(tun_info->key.tun_flags & TUNNEL_ERSPAN_OPT))
goto err_free_skb;
- md = ip_tunnel_info_opts(tun_info);
- if (!md)
+ if (tun_info->options_len < sizeof(*md))
goto err_free_skb;
+ md = ip_tunnel_info_opts(tun_info);
/* ERSPAN has fixed 8 byte GRE header */
version = md->version;
EXPORT_SYMBOL(ip_fraglist_prepare);
void ip_frag_init(struct sk_buff *skb, unsigned int hlen,
- unsigned int ll_rs, unsigned int mtu,
+ unsigned int ll_rs, unsigned int mtu, bool DF,
struct ip_frag_state *state)
{
struct iphdr *iph = ip_hdr(skb);
+ state->DF = DF;
state->hlen = hlen;
state->ll_rs = ll_rs;
state->mtu = mtu;
/* Copy the flags to each fragment. */
IPCB(to)->flags = IPCB(from)->flags;
- if (IPCB(from)->flags & IPSKB_FRAG_PMTU)
- state->iph->frag_off |= htons(IP_DF);
-
/* ANK: dirty, but effective trick. Upgrade options only if
* the segment to be fragmented was THE FIRST (otherwise,
* options are already fixed) and make it ONCE
*/
iph = ip_hdr(skb2);
iph->frag_off = htons((state->offset >> 3));
+ if (state->DF)
+ iph->frag_off |= htons(IP_DF);
/*
* Added AC : If we are fragmenting a fragment that's not the
* Fragment the datagram.
*/
- ip_frag_init(skb, hlen, ll_rs, mtu, &state);
+ ip_frag_init(skb, hlen, ll_rs, mtu, IPCB(skb)->flags & IPSKB_FRAG_PMTU,
+ &state);
/*
* Keep copying data until we run out.
}
/* This barrier is coupled with smp_wmb() in tcp_reset() */
smp_rmb();
- if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
+ if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue))
mask |= EPOLLERR;
return mask;
if (unlikely(flags & MSG_ERRQUEUE))
return inet_recv_error(sk, msg, len, addr_len);
- if (sk_can_busy_loop(sk) && skb_queue_empty(&sk->sk_receive_queue) &&
+ if (sk_can_busy_loop(sk) && skb_queue_empty_lockless(&sk->sk_receive_queue) &&
(sk->sk_state == TCP_ESTABLISHED))
sk_busy_loop(sk, nonblock);
inet->inet_daddr);
}
- inet->inet_id = tp->write_seq ^ jiffies;
+ inet->inet_id = prandom_u32();
if (tcp_fastopen_defer_connect(sk, &err))
return err;
inet_csk(newsk)->icsk_ext_hdr_len = 0;
if (inet_opt)
inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
- newinet->inet_id = newtp->write_seq ^ jiffies;
+ newinet->inet_id = prandom_u32();
if (!dst) {
dst = inet_csk_route_child_sock(sk, newsk, req);
net->ipv4.tcp_death_row.sysctl_max_tw_buckets = cnt / 2;
net->ipv4.tcp_death_row.hashinfo = &tcp_hashinfo;
- net->ipv4.sysctl_max_syn_backlog = max(128, cnt / 256);
+ net->ipv4.sysctl_max_syn_backlog = max(128, cnt / 128);
net->ipv4.sysctl_tcp_sack = 1;
net->ipv4.sysctl_tcp_window_scaling = 1;
net->ipv4.sysctl_tcp_timestamps = 1;
return -1;
score += 4;
- if (sk->sk_incoming_cpu == raw_smp_processor_id())
+ if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
score++;
return score;
}
scratch->_tsize_state |= UDP_SKB_IS_STATELESS;
}
+static void udp_skb_csum_unnecessary_set(struct sk_buff *skb)
+{
+ /* We come here after udp_lib_checksum_complete() returned 0.
+ * This means that __skb_checksum_complete() might have
+ * set skb->csum_valid to 1.
+ * On 64bit platforms, we can set csum_unnecessary
+ * to true, but only if the skb is not shared.
+ */
+#if BITS_PER_LONG == 64
+ if (!skb_shared(skb))
+ udp_skb_scratch(skb)->csum_unnecessary = true;
+#endif
+}
+
static int udp_skb_truesize(struct sk_buff *skb)
{
return udp_skb_scratch(skb)->_tsize_state & ~UDP_SKB_IS_STATELESS;
*total += skb->truesize;
kfree_skb(skb);
} else {
- /* the csum related bits could be changed, refresh
- * the scratch area
- */
- udp_set_dev_scratch(skb);
+ udp_skb_csum_unnecessary_set(skb);
break;
}
}
spin_lock_bh(&rcvq->lock);
skb = __first_packet_length(sk, rcvq, &total);
- if (!skb && !skb_queue_empty(sk_queue)) {
+ if (!skb && !skb_queue_empty_lockless(sk_queue)) {
spin_lock(&sk_queue->lock);
skb_queue_splice_tail_init(sk_queue, rcvq);
spin_unlock(&sk_queue->lock);
return skb;
}
- if (skb_queue_empty(sk_queue)) {
+ if (skb_queue_empty_lockless(sk_queue)) {
spin_unlock_bh(&queue->lock);
goto busy_check;
}
break;
sk_busy_loop(sk, flags & MSG_DONTWAIT);
- } while (!skb_queue_empty(sk_queue));
+ } while (!skb_queue_empty_lockless(sk_queue));
/* sk_queue is empty, reader_queue may contain peeked packets */
} while (timeo &&
__poll_t mask = datagram_poll(file, sock, wait);
struct sock *sk = sock->sk;
- if (!skb_queue_empty(&udp_sk(sk)->reader_queue))
+ if (!skb_queue_empty_lockless(&udp_sk(sk)->reader_queue))
mask |= EPOLLIN | EPOLLRDNORM;
/* Check for false positives due to checksum errors */
#include <linux/export.h>
#include <net/ipv6.h>
#include <net/ipv6_stubs.h>
+#include <net/addrconf.h>
#include <net/ip.h>
/* if ipv6 module registers this function is used by xfrm to force all
mip6_addr_swap(skb);
+ sk = icmpv6_xmit_lock(net);
+ if (!sk)
+ goto out_bh_enable;
+
memset(&fl6, 0, sizeof(fl6));
fl6.flowi6_proto = IPPROTO_ICMPV6;
fl6.daddr = hdr->saddr;
if (force_saddr)
saddr = force_saddr;
- if (saddr)
+ if (saddr) {
fl6.saddr = *saddr;
+ } else {
+ /* select a more meaningful saddr from input if */
+ struct net_device *in_netdev;
+
+ in_netdev = dev_get_by_index(net, IP6CB(skb)->iif);
+ if (in_netdev) {
+ ipv6_dev_get_saddr(net, in_netdev, &fl6.daddr,
+ inet6_sk(sk)->srcprefs,
+ &fl6.saddr);
+ dev_put(in_netdev);
+ }
+ }
fl6.flowi6_mark = mark;
fl6.flowi6_oif = iif;
fl6.fl6_icmp_type = type;
fl6.mp_hash = rt6_multipath_hash(net, &fl6, skb, NULL);
security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
- sk = icmpv6_xmit_lock(net);
- if (!sk)
- goto out_bh_enable;
-
sk->sk_mark = mark;
np = inet6_sk(sk);
return -1;
score = 1;
- if (sk->sk_incoming_cpu == raw_smp_processor_id())
+ if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
score++;
}
return score;
dsfield = key->tos;
if (!(tun_info->key.tun_flags & TUNNEL_ERSPAN_OPT))
goto tx_err;
- md = ip_tunnel_info_opts(tun_info);
- if (!md)
+ if (tun_info->options_len < sizeof(*md))
goto tx_err;
+ md = ip_tunnel_info_opts(tun_info);
tun_id = tunnel_id_to_key32(key->tun_id);
if (md->version == 1) {
return -1;
score++;
- if (sk->sk_incoming_cpu == raw_smp_processor_id())
+ if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
score++;
return score;
{
eth_hw_addr_random(dev);
eth_broadcast_addr(dev->broadcast);
- netdev_lockdep_set_classes(dev);
return 0;
}
mutex_lock(&__ip_vs_app_mutex);
+ /* increase the module use count */
+ if (!ip_vs_use_count_inc()) {
+ err = -ENOENT;
+ goto out_unlock;
+ }
+
list_for_each_entry(a, &ipvs->app_list, a_list) {
if (!strcmp(app->name, a->name)) {
err = -EEXIST;
+ /* decrease the module use count */
+ ip_vs_use_count_dec();
goto out_unlock;
}
}
a = kmemdup(app, sizeof(*app), GFP_KERNEL);
if (!a) {
err = -ENOMEM;
+ /* decrease the module use count */
+ ip_vs_use_count_dec();
goto out_unlock;
}
INIT_LIST_HEAD(&a->incs_list);
list_add(&a->a_list, &ipvs->app_list);
- /* increase the module use count */
- ip_vs_use_count_inc();
out_unlock:
mutex_unlock(&__ip_vs_app_mutex);
static void update_defense_level(struct netns_ipvs *ipvs)
{
struct sysinfo i;
- static int old_secure_tcp = 0;
int availmem;
int nomem;
int to_change = -1;
spin_lock(&ipvs->securetcp_lock);
switch (ipvs->sysctl_secure_tcp) {
case 0:
- if (old_secure_tcp >= 2)
+ if (ipvs->old_secure_tcp >= 2)
to_change = 0;
break;
case 1:
if (nomem) {
- if (old_secure_tcp < 2)
+ if (ipvs->old_secure_tcp < 2)
to_change = 1;
ipvs->sysctl_secure_tcp = 2;
} else {
- if (old_secure_tcp >= 2)
+ if (ipvs->old_secure_tcp >= 2)
to_change = 0;
}
break;
case 2:
if (nomem) {
- if (old_secure_tcp < 2)
+ if (ipvs->old_secure_tcp < 2)
to_change = 1;
} else {
- if (old_secure_tcp >= 2)
+ if (ipvs->old_secure_tcp >= 2)
to_change = 0;
ipvs->sysctl_secure_tcp = 1;
}
break;
case 3:
- if (old_secure_tcp < 2)
+ if (ipvs->old_secure_tcp < 2)
to_change = 1;
break;
}
- old_secure_tcp = ipvs->sysctl_secure_tcp;
+ ipvs->old_secure_tcp = ipvs->sysctl_secure_tcp;
if (to_change >= 0)
ip_vs_protocol_timeout_change(ipvs,
ipvs->sysctl_secure_tcp > 1);
struct ip_vs_service *svc = NULL;
/* increase the module use count */
- ip_vs_use_count_inc();
+ if (!ip_vs_use_count_inc())
+ return -ENOPROTOOPT;
/* Lookup the scheduler by 'u->sched_name' */
if (strcmp(u->sched_name, "none")) {
if (copy_from_user(arg, user, len) != 0)
return -EFAULT;
- /* increase the module use count */
- ip_vs_use_count_inc();
-
/* Handle daemons since they have another lock */
if (cmd == IP_VS_SO_SET_STARTDAEMON ||
cmd == IP_VS_SO_SET_STOPDAEMON) {
ret = -EINVAL;
if (strscpy(cfg.mcast_ifn, dm->mcast_ifn,
sizeof(cfg.mcast_ifn)) <= 0)
- goto out_dec;
+ return ret;
cfg.syncid = dm->syncid;
ret = start_sync_thread(ipvs, &cfg, dm->state);
} else {
ret = stop_sync_thread(ipvs, dm->state);
}
- goto out_dec;
+ return ret;
}
mutex_lock(&__ip_vs_mutex);
out_unlock:
mutex_unlock(&__ip_vs_mutex);
- out_dec:
- /* decrease the module use count */
- ip_vs_use_count_dec();
-
return ret;
}
struct ip_vs_pe *tmp;
/* increase the module use count */
- ip_vs_use_count_inc();
+ if (!ip_vs_use_count_inc())
+ return -ENOENT;
mutex_lock(&ip_vs_pe_mutex);
/* Make sure that the pe with this name doesn't exist
}
/* increase the module use count */
- ip_vs_use_count_inc();
+ if (!ip_vs_use_count_inc())
+ return -ENOENT;
mutex_lock(&ip_vs_sched_mutex);
IP_VS_DBG(7, "Each ip_vs_sync_conn entry needs %zd bytes\n",
sizeof(struct ip_vs_sync_conn_v0));
+ /* increase the module use count */
+ if (!ip_vs_use_count_inc())
+ return -ENOPROTOOPT;
+
/* Do not hold one mutex and then to block on another */
for (;;) {
rtnl_lock();
mutex_unlock(&ipvs->sync_mutex);
rtnl_unlock();
- /* increase the module use count */
- ip_vs_use_count_inc();
-
return 0;
out:
}
kfree(ti);
}
+
+ /* decrease the module use count */
+ ip_vs_use_count_dec();
return result;
out_early:
mutex_unlock(&ipvs->sync_mutex);
rtnl_unlock();
+
+ /* decrease the module use count */
+ ip_vs_use_count_dec();
return result;
}
{
int err;
+ flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT;
+
err = rhashtable_insert_fast(&flow_table->rhashtable,
&flow->tuplehash[0].node,
nf_flow_offload_rhash_params);
return err;
}
- flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT;
return 0;
}
EXPORT_SYMBOL_GPL(flow_offload_add);
policy = nft_trans_chain_policy(trans);
err = nft_flow_offload_chain(trans->ctx.chain, &policy,
- FLOW_BLOCK_BIND);
+ FLOW_BLOCK_UNBIND);
break;
case NFT_MSG_NEWRULE:
if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
switch (priv->offset) {
case offsetof(struct ethhdr, h_source):
+ if (priv->len != ETH_ALEN)
+ return -EOPNOTSUPP;
+
NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_ETH_ADDRS, eth_addrs,
src, ETH_ALEN, reg);
break;
case offsetof(struct ethhdr, h_dest):
+ if (priv->len != ETH_ALEN)
+ return -EOPNOTSUPP;
+
NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_ETH_ADDRS, eth_addrs,
dst, ETH_ALEN, reg);
break;
+ default:
+ return -EOPNOTSUPP;
}
return 0;
switch (priv->offset) {
case offsetof(struct iphdr, saddr):
+ if (priv->len != sizeof(struct in_addr))
+ return -EOPNOTSUPP;
+
NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4, src,
sizeof(struct in_addr), reg);
break;
case offsetof(struct iphdr, daddr):
+ if (priv->len != sizeof(struct in_addr))
+ return -EOPNOTSUPP;
+
NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4, dst,
sizeof(struct in_addr), reg);
break;
case offsetof(struct iphdr, protocol):
+ if (priv->len != sizeof(__u8))
+ return -EOPNOTSUPP;
+
NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic, ip_proto,
sizeof(__u8), reg);
nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_TRANSPORT);
switch (priv->offset) {
case offsetof(struct ipv6hdr, saddr):
+ if (priv->len != sizeof(struct in6_addr))
+ return -EOPNOTSUPP;
+
NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6, src,
sizeof(struct in6_addr), reg);
break;
case offsetof(struct ipv6hdr, daddr):
+ if (priv->len != sizeof(struct in6_addr))
+ return -EOPNOTSUPP;
+
NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6, dst,
sizeof(struct in6_addr), reg);
break;
case offsetof(struct ipv6hdr, nexthdr):
+ if (priv->len != sizeof(__u8))
+ return -EOPNOTSUPP;
+
NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic, ip_proto,
sizeof(__u8), reg);
nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_TRANSPORT);
switch (priv->offset) {
case offsetof(struct tcphdr, source):
+ if (priv->len != sizeof(__be16))
+ return -EOPNOTSUPP;
+
NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, src,
sizeof(__be16), reg);
break;
case offsetof(struct tcphdr, dest):
+ if (priv->len != sizeof(__be16))
+ return -EOPNOTSUPP;
+
NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, dst,
sizeof(__be16), reg);
break;
switch (priv->offset) {
case offsetof(struct udphdr, source):
+ if (priv->len != sizeof(__be16))
+ return -EOPNOTSUPP;
+
NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, src,
sizeof(__be16), reg);
break;
case offsetof(struct udphdr, dest):
+ if (priv->len != sizeof(__be16))
+ return -EOPNOTSUPP;
+
NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, dst,
sizeof(__be16), reg);
break;
static const struct proto_ops nr_proto_ops;
/*
- * NETROM network devices are virtual network devices encapsulating NETROM
- * frames into AX.25 which will be sent through an AX.25 device, so form a
- * special "super class" of normal net devices; split their locks off into a
- * separate class since they always nest.
- */
-static struct lock_class_key nr_netdev_xmit_lock_key;
-static struct lock_class_key nr_netdev_addr_lock_key;
-
-static void nr_set_lockdep_one(struct net_device *dev,
- struct netdev_queue *txq,
- void *_unused)
-{
- lockdep_set_class(&txq->_xmit_lock, &nr_netdev_xmit_lock_key);
-}
-
-static void nr_set_lockdep_key(struct net_device *dev)
-{
- lockdep_set_class(&dev->addr_list_lock, &nr_netdev_addr_lock_key);
- netdev_for_each_tx_queue(dev, nr_set_lockdep_one, NULL);
-}
-
-/*
* Socket removal during an interrupt is now safe.
*/
static void nr_remove_socket(struct sock *sk)
free_netdev(dev);
goto fail;
}
- nr_set_lockdep_key(dev);
dev_nr[i] = dev;
}
if (sk->sk_state == LLCP_LISTEN)
return llcp_accept_poll(sk);
- if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
+ if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue))
mask |= EPOLLERR |
(sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
- if (!skb_queue_empty(&sk->sk_receive_queue))
+ if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
mask |= EPOLLIN | EPOLLRDNORM;
if (sk->sk_state == LLCP_CLOSED)
stats = this_cpu_ptr(dp->stats_percpu);
/* Look up flow. */
- flow = ovs_flow_tbl_lookup_stats(&dp->table, key, &n_mask_hit);
+ flow = ovs_flow_tbl_lookup_stats(&dp->table, key, skb_get_hash(skb),
+ &n_mask_hit);
if (unlikely(!flow)) {
struct dp_upcall_info upcall;
return 0;
}
+static int ovs_dp_stats_init(struct datapath *dp)
+{
+ dp->stats_percpu = netdev_alloc_pcpu_stats(struct dp_stats_percpu);
+ if (!dp->stats_percpu)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int ovs_dp_vport_init(struct datapath *dp)
+{
+ int i;
+
+ dp->ports = kmalloc_array(DP_VPORT_HASH_BUCKETS,
+ sizeof(struct hlist_head),
+ GFP_KERNEL);
+ if (!dp->ports)
+ return -ENOMEM;
+
+ for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++)
+ INIT_HLIST_HEAD(&dp->ports[i]);
+
+ return 0;
+}
+
static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
{
struct nlattr **a = info->attrs;
struct datapath *dp;
struct vport *vport;
struct ovs_net *ovs_net;
- int err, i;
+ int err;
err = -EINVAL;
if (!a[OVS_DP_ATTR_NAME] || !a[OVS_DP_ATTR_UPCALL_PID])
err = -ENOMEM;
dp = kzalloc(sizeof(*dp), GFP_KERNEL);
if (dp == NULL)
- goto err_free_reply;
+ goto err_destroy_reply;
ovs_dp_set_net(dp, sock_net(skb->sk));
/* Allocate table. */
err = ovs_flow_tbl_init(&dp->table);
if (err)
- goto err_free_dp;
+ goto err_destroy_dp;
- dp->stats_percpu = netdev_alloc_pcpu_stats(struct dp_stats_percpu);
- if (!dp->stats_percpu) {
- err = -ENOMEM;
+ err = ovs_dp_stats_init(dp);
+ if (err)
goto err_destroy_table;
- }
-
- dp->ports = kmalloc_array(DP_VPORT_HASH_BUCKETS,
- sizeof(struct hlist_head),
- GFP_KERNEL);
- if (!dp->ports) {
- err = -ENOMEM;
- goto err_destroy_percpu;
- }
- for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++)
- INIT_HLIST_HEAD(&dp->ports[i]);
+ err = ovs_dp_vport_init(dp);
+ if (err)
+ goto err_destroy_stats;
err = ovs_meters_init(dp);
if (err)
- goto err_destroy_ports_array;
+ goto err_destroy_ports;
/* Set up our datapath device. */
parms.name = nla_data(a[OVS_DP_ATTR_NAME]);
ovs_dp_reset_user_features(skb, info);
}
+ ovs_unlock();
goto err_destroy_meters;
}
return 0;
err_destroy_meters:
- ovs_unlock();
ovs_meters_exit(dp);
-err_destroy_ports_array:
+err_destroy_ports:
kfree(dp->ports);
-err_destroy_percpu:
+err_destroy_stats:
free_percpu(dp->stats_percpu);
err_destroy_table:
ovs_flow_tbl_destroy(&dp->table);
-err_free_dp:
+err_destroy_dp:
kfree(dp);
-err_free_reply:
+err_destroy_reply:
kfree_skb(reply);
err:
return err;
/* Called with ovs_mutex or RCU read lock. */
static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
struct net *net, u32 portid, u32 seq,
- u32 flags, u8 cmd)
+ u32 flags, u8 cmd, gfp_t gfp)
{
struct ovs_header *ovs_header;
struct ovs_vport_stats vport_stats;
goto nla_put_failure;
if (!net_eq(net, dev_net(vport->dev))) {
- int id = peernet2id_alloc(net, dev_net(vport->dev));
+ int id = peernet2id_alloc(net, dev_net(vport->dev), gfp);
if (nla_put_s32(skb, OVS_VPORT_ATTR_NETNSID, id))
goto nla_put_failure;
struct sk_buff *skb;
int retval;
- skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
+ skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!skb)
return ERR_PTR(-ENOMEM);
- retval = ovs_vport_cmd_fill_info(vport, skb, net, portid, seq, 0, cmd);
+ retval = ovs_vport_cmd_fill_info(vport, skb, net, portid, seq, 0, cmd,
+ GFP_KERNEL);
BUG_ON(retval < 0);
return skb;
err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info),
info->snd_portid, info->snd_seq, 0,
- OVS_VPORT_CMD_NEW);
+ OVS_VPORT_CMD_NEW, GFP_KERNEL);
new_headroom = netdev_get_fwd_headroom(vport->dev);
err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info),
info->snd_portid, info->snd_seq, 0,
- OVS_VPORT_CMD_SET);
+ OVS_VPORT_CMD_SET, GFP_KERNEL);
BUG_ON(err < 0);
ovs_unlock();
err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info),
info->snd_portid, info->snd_seq, 0,
- OVS_VPORT_CMD_DEL);
+ OVS_VPORT_CMD_DEL, GFP_KERNEL);
BUG_ON(err < 0);
/* the vport deletion may trigger dp headroom update */
goto exit_unlock_free;
err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info),
info->snd_portid, info->snd_seq, 0,
- OVS_VPORT_CMD_GET);
+ OVS_VPORT_CMD_GET, GFP_ATOMIC);
BUG_ON(err < 0);
rcu_read_unlock();
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
NLM_F_MULTI,
- OVS_VPORT_CMD_GET) < 0)
+ OVS_VPORT_CMD_GET,
+ GFP_ATOMIC) < 0)
goto out;
j++;
struct sw_flow_mask {
int ref_count;
struct rcu_head rcu;
- struct list_head list;
struct sw_flow_key_range range;
struct sw_flow_key key;
};
#include <net/ndisc.h>
#define TBL_MIN_BUCKETS 1024
+#define MASK_ARRAY_SIZE_MIN 16
#define REHASH_INTERVAL (10 * 60 * HZ)
+#define MC_HASH_SHIFT 8
+#define MC_HASH_ENTRIES (1u << MC_HASH_SHIFT)
+#define MC_HASH_SEGS ((sizeof(uint32_t) * 8) / MC_HASH_SHIFT)
+
static struct kmem_cache *flow_cache;
struct kmem_cache *flow_stats_cache __read_mostly;
return ti;
}
+static struct mask_array *tbl_mask_array_alloc(int size)
+{
+ struct mask_array *new;
+
+ size = max(MASK_ARRAY_SIZE_MIN, size);
+ new = kzalloc(sizeof(struct mask_array) +
+ sizeof(struct sw_flow_mask *) * size, GFP_KERNEL);
+ if (!new)
+ return NULL;
+
+ new->count = 0;
+ new->max = size;
+
+ return new;
+}
+
+static int tbl_mask_array_realloc(struct flow_table *tbl, int size)
+{
+ struct mask_array *old;
+ struct mask_array *new;
+
+ new = tbl_mask_array_alloc(size);
+ if (!new)
+ return -ENOMEM;
+
+ old = ovsl_dereference(tbl->mask_array);
+ if (old) {
+ int i;
+
+ for (i = 0; i < old->max; i++) {
+ if (ovsl_dereference(old->masks[i]))
+ new->masks[new->count++] = old->masks[i];
+ }
+ }
+
+ rcu_assign_pointer(tbl->mask_array, new);
+ kfree_rcu(old, rcu);
+
+ return 0;
+}
+
+static int tbl_mask_array_add_mask(struct flow_table *tbl,
+ struct sw_flow_mask *new)
+{
+ struct mask_array *ma = ovsl_dereference(tbl->mask_array);
+ int err, ma_count = READ_ONCE(ma->count);
+
+ if (ma_count >= ma->max) {
+ err = tbl_mask_array_realloc(tbl, ma->max +
+ MASK_ARRAY_SIZE_MIN);
+ if (err)
+ return err;
+
+ ma = ovsl_dereference(tbl->mask_array);
+ }
+
+ BUG_ON(ovsl_dereference(ma->masks[ma_count]));
+
+ rcu_assign_pointer(ma->masks[ma_count], new);
+ WRITE_ONCE(ma->count, ma_count +1);
+
+ return 0;
+}
+
+static void tbl_mask_array_del_mask(struct flow_table *tbl,
+ struct sw_flow_mask *mask)
+{
+ struct mask_array *ma = ovsl_dereference(tbl->mask_array);
+ int i, ma_count = READ_ONCE(ma->count);
+
+ /* Remove the deleted mask pointers from the array */
+ for (i = 0; i < ma_count; i++) {
+ if (mask == ovsl_dereference(ma->masks[i]))
+ goto found;
+ }
+
+ BUG();
+ return;
+
+found:
+ WRITE_ONCE(ma->count, ma_count -1);
+
+ rcu_assign_pointer(ma->masks[i], ma->masks[ma_count -1]);
+ RCU_INIT_POINTER(ma->masks[ma_count -1], NULL);
+
+ kfree_rcu(mask, rcu);
+
+ /* Shrink the mask array if necessary. */
+ if (ma->max >= (MASK_ARRAY_SIZE_MIN * 2) &&
+ ma_count <= (ma->max / 3))
+ tbl_mask_array_realloc(tbl, ma->max / 2);
+}
+
+/* Remove 'mask' from the mask list, if it is not needed any more. */
+static void flow_mask_remove(struct flow_table *tbl, struct sw_flow_mask *mask)
+{
+ if (mask) {
+ /* ovs-lock is required to protect mask-refcount and
+ * mask list.
+ */
+ ASSERT_OVSL();
+ BUG_ON(!mask->ref_count);
+ mask->ref_count--;
+
+ if (!mask->ref_count)
+ tbl_mask_array_del_mask(tbl, mask);
+ }
+}
+
int ovs_flow_tbl_init(struct flow_table *table)
{
struct table_instance *ti, *ufid_ti;
+ struct mask_array *ma;
- ti = table_instance_alloc(TBL_MIN_BUCKETS);
+ table->mask_cache = __alloc_percpu(sizeof(struct mask_cache_entry) *
+ MC_HASH_ENTRIES,
+ __alignof__(struct mask_cache_entry));
+ if (!table->mask_cache)
+ return -ENOMEM;
+ ma = tbl_mask_array_alloc(MASK_ARRAY_SIZE_MIN);
+ if (!ma)
+ goto free_mask_cache;
+
+ ti = table_instance_alloc(TBL_MIN_BUCKETS);
if (!ti)
- return -ENOMEM;
+ goto free_mask_array;
ufid_ti = table_instance_alloc(TBL_MIN_BUCKETS);
if (!ufid_ti)
rcu_assign_pointer(table->ti, ti);
rcu_assign_pointer(table->ufid_ti, ufid_ti);
- INIT_LIST_HEAD(&table->mask_list);
+ rcu_assign_pointer(table->mask_array, ma);
table->last_rehash = jiffies;
table->count = 0;
table->ufid_count = 0;
free_ti:
__table_instance_destroy(ti);
+free_mask_array:
+ kfree(ma);
+free_mask_cache:
+ free_percpu(table->mask_cache);
return -ENOMEM;
}
__table_instance_destroy(ti);
}
-static void table_instance_destroy(struct table_instance *ti,
+static void table_instance_flow_free(struct flow_table *table,
+ struct table_instance *ti,
+ struct table_instance *ufid_ti,
+ struct sw_flow *flow,
+ bool count)
+{
+ hlist_del_rcu(&flow->flow_table.node[ti->node_ver]);
+ if (count)
+ table->count--;
+
+ if (ovs_identifier_is_ufid(&flow->id)) {
+ hlist_del_rcu(&flow->ufid_table.node[ufid_ti->node_ver]);
+
+ if (count)
+ table->ufid_count--;
+ }
+
+ flow_mask_remove(table, flow->mask);
+}
+
+static void table_instance_destroy(struct flow_table *table,
+ struct table_instance *ti,
struct table_instance *ufid_ti,
bool deferred)
{
struct sw_flow *flow;
struct hlist_head *head = &ti->buckets[i];
struct hlist_node *n;
- int ver = ti->node_ver;
- int ufid_ver = ufid_ti->node_ver;
- hlist_for_each_entry_safe(flow, n, head, flow_table.node[ver]) {
- hlist_del_rcu(&flow->flow_table.node[ver]);
- if (ovs_identifier_is_ufid(&flow->id))
- hlist_del_rcu(&flow->ufid_table.node[ufid_ver]);
+ hlist_for_each_entry_safe(flow, n, head,
+ flow_table.node[ti->node_ver]) {
+
+ table_instance_flow_free(table, ti, ufid_ti,
+ flow, false);
ovs_flow_free(flow, deferred);
}
}
struct table_instance *ti = rcu_dereference_raw(table->ti);
struct table_instance *ufid_ti = rcu_dereference_raw(table->ufid_ti);
- table_instance_destroy(ti, ufid_ti, false);
+ free_percpu(table->mask_cache);
+ kfree_rcu(rcu_dereference_raw(table->mask_array), rcu);
+ table_instance_destroy(table, ti, ufid_ti, false);
}
struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti,
flow_table->count = 0;
flow_table->ufid_count = 0;
- table_instance_destroy(old_ti, old_ufid_ti, true);
+ table_instance_destroy(flow_table, old_ti, old_ufid_ti, true);
return 0;
err_free_ti:
static u32 flow_hash(const struct sw_flow_key *key,
const struct sw_flow_key_range *range)
{
- int key_start = range->start;
- int key_end = range->end;
- const u32 *hash_key = (const u32 *)((const u8 *)key + key_start);
- int hash_u32s = (key_end - key_start) >> 2;
+ const u32 *hash_key = (const u32 *)((const u8 *)key + range->start);
/* Make sure number of hash bytes are multiple of u32. */
- BUILD_BUG_ON(sizeof(long) % sizeof(u32));
+ int hash_u32s = range_n_bytes(range) >> 2;
return jhash2(hash_key, hash_u32s, 0);
}
static struct sw_flow *masked_flow_lookup(struct table_instance *ti,
const struct sw_flow_key *unmasked,
- const struct sw_flow_mask *mask)
+ const struct sw_flow_mask *mask,
+ u32 *n_mask_hit)
{
struct sw_flow *flow;
struct hlist_head *head;
ovs_flow_mask_key(&masked_key, unmasked, false, mask);
hash = flow_hash(&masked_key, &mask->range);
head = find_bucket(ti, hash);
+ (*n_mask_hit)++;
+
hlist_for_each_entry_rcu(flow, head, flow_table.node[ti->node_ver]) {
if (flow->mask == mask && flow->flow_table.hash == hash &&
flow_cmp_masked_key(flow, &masked_key, &mask->range))
return NULL;
}
-struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
- const struct sw_flow_key *key,
- u32 *n_mask_hit)
+/* Flow lookup does full lookup on flow table. It starts with
+ * mask from index passed in *index.
+ */
+static struct sw_flow *flow_lookup(struct flow_table *tbl,
+ struct table_instance *ti,
+ struct mask_array *ma,
+ const struct sw_flow_key *key,
+ u32 *n_mask_hit,
+ u32 *index)
{
- struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
+ struct sw_flow *flow;
struct sw_flow_mask *mask;
+ int i;
+
+ if (likely(*index < ma->max)) {
+ mask = rcu_dereference_ovsl(ma->masks[*index]);
+ if (mask) {
+ flow = masked_flow_lookup(ti, key, mask, n_mask_hit);
+ if (flow)
+ return flow;
+ }
+ }
+
+ for (i = 0; i < ma->max; i++) {
+
+ if (i == *index)
+ continue;
+
+ mask = rcu_dereference_ovsl(ma->masks[i]);
+ if (unlikely(!mask))
+ break;
+
+ flow = masked_flow_lookup(ti, key, mask, n_mask_hit);
+ if (flow) { /* Found */
+ *index = i;
+ return flow;
+ }
+ }
+
+ return NULL;
+}
+
+/*
+ * mask_cache maps flow to probable mask. This cache is not tightly
+ * coupled cache, It means updates to mask list can result in inconsistent
+ * cache entry in mask cache.
+ * This is per cpu cache and is divided in MC_HASH_SEGS segments.
+ * In case of a hash collision the entry is hashed in next segment.
+ * */
+struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
+ const struct sw_flow_key *key,
+ u32 skb_hash,
+ u32 *n_mask_hit)
+{
+ struct mask_array *ma = rcu_dereference(tbl->mask_array);
+ struct table_instance *ti = rcu_dereference(tbl->ti);
+ struct mask_cache_entry *entries, *ce;
struct sw_flow *flow;
+ u32 hash;
+ int seg;
*n_mask_hit = 0;
- list_for_each_entry_rcu(mask, &tbl->mask_list, list) {
- (*n_mask_hit)++;
- flow = masked_flow_lookup(ti, key, mask);
- if (flow) /* Found */
+ if (unlikely(!skb_hash)) {
+ u32 mask_index = 0;
+
+ return flow_lookup(tbl, ti, ma, key, n_mask_hit, &mask_index);
+ }
+
+ /* Pre and post recirulation flows usually have the same skb_hash
+ * value. To avoid hash collisions, rehash the 'skb_hash' with
+ * 'recirc_id'. */
+ if (key->recirc_id)
+ skb_hash = jhash_1word(skb_hash, key->recirc_id);
+
+ ce = NULL;
+ hash = skb_hash;
+ entries = this_cpu_ptr(tbl->mask_cache);
+
+ /* Find the cache entry 'ce' to operate on. */
+ for (seg = 0; seg < MC_HASH_SEGS; seg++) {
+ int index = hash & (MC_HASH_ENTRIES - 1);
+ struct mask_cache_entry *e;
+
+ e = &entries[index];
+ if (e->skb_hash == skb_hash) {
+ flow = flow_lookup(tbl, ti, ma, key, n_mask_hit,
+ &e->mask_index);
+ if (!flow)
+ e->skb_hash = 0;
return flow;
+ }
+
+ if (!ce || e->skb_hash < ce->skb_hash)
+ ce = e; /* A better replacement cache candidate. */
+
+ hash >>= MC_HASH_SHIFT;
}
- return NULL;
+
+ /* Cache miss, do full lookup. */
+ flow = flow_lookup(tbl, ti, ma, key, n_mask_hit, &ce->mask_index);
+ if (flow)
+ ce->skb_hash = skb_hash;
+
+ return flow;
}
struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl,
const struct sw_flow_key *key)
{
+ struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
+ struct mask_array *ma = rcu_dereference_ovsl(tbl->mask_array);
u32 __always_unused n_mask_hit;
+ u32 index = 0;
- return ovs_flow_tbl_lookup_stats(tbl, key, &n_mask_hit);
+ return flow_lookup(tbl, ti, ma, key, &n_mask_hit, &index);
}
struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
const struct sw_flow_match *match)
{
- struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
- struct sw_flow_mask *mask;
- struct sw_flow *flow;
+ struct mask_array *ma = ovsl_dereference(tbl->mask_array);
+ int i;
/* Always called under ovs-mutex. */
- list_for_each_entry(mask, &tbl->mask_list, list) {
- flow = masked_flow_lookup(ti, match->key, mask);
+ for (i = 0; i < ma->max; i++) {
+ struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
+ u32 __always_unused n_mask_hit;
+ struct sw_flow_mask *mask;
+ struct sw_flow *flow;
+
+ mask = ovsl_dereference(ma->masks[i]);
+ if (!mask)
+ continue;
+
+ flow = masked_flow_lookup(ti, match->key, mask, &n_mask_hit);
if (flow && ovs_identifier_is_key(&flow->id) &&
- ovs_flow_cmp_unmasked_key(flow, match))
+ ovs_flow_cmp_unmasked_key(flow, match)) {
return flow;
+ }
}
+
return NULL;
}
int ovs_flow_tbl_num_masks(const struct flow_table *table)
{
- struct sw_flow_mask *mask;
- int num = 0;
-
- list_for_each_entry(mask, &table->mask_list, list)
- num++;
-
- return num;
+ struct mask_array *ma = rcu_dereference_ovsl(table->mask_array);
+ return READ_ONCE(ma->count);
}
static struct table_instance *table_instance_expand(struct table_instance *ti,
return table_instance_rehash(ti, ti->n_buckets * 2, ufid);
}
-/* Remove 'mask' from the mask list, if it is not needed any more. */
-static void flow_mask_remove(struct flow_table *tbl, struct sw_flow_mask *mask)
-{
- if (mask) {
- /* ovs-lock is required to protect mask-refcount and
- * mask list.
- */
- ASSERT_OVSL();
- BUG_ON(!mask->ref_count);
- mask->ref_count--;
-
- if (!mask->ref_count) {
- list_del_rcu(&mask->list);
- kfree_rcu(mask, rcu);
- }
- }
-}
-
/* Must be called with OVS mutex held. */
void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
{
struct table_instance *ufid_ti = ovsl_dereference(table->ufid_ti);
BUG_ON(table->count == 0);
- hlist_del_rcu(&flow->flow_table.node[ti->node_ver]);
- table->count--;
- if (ovs_identifier_is_ufid(&flow->id)) {
- hlist_del_rcu(&flow->ufid_table.node[ufid_ti->node_ver]);
- table->ufid_count--;
- }
-
- /* RCU delete the mask. 'flow->mask' is not NULLed, as it should be
- * accessible as long as the RCU read lock is held.
- */
- flow_mask_remove(table, flow->mask);
+ table_instance_flow_free(table, ti, ufid_ti, flow, true);
}
static struct sw_flow_mask *mask_alloc(void)
static struct sw_flow_mask *flow_mask_find(const struct flow_table *tbl,
const struct sw_flow_mask *mask)
{
- struct list_head *ml;
+ struct mask_array *ma;
+ int i;
+
+ ma = ovsl_dereference(tbl->mask_array);
+ for (i = 0; i < ma->max; i++) {
+ struct sw_flow_mask *t;
+ t = ovsl_dereference(ma->masks[i]);
- list_for_each(ml, &tbl->mask_list) {
- struct sw_flow_mask *m;
- m = container_of(ml, struct sw_flow_mask, list);
- if (mask_equal(mask, m))
- return m;
+ if (t && mask_equal(mask, t))
+ return t;
}
return NULL;
const struct sw_flow_mask *new)
{
struct sw_flow_mask *mask;
+
mask = flow_mask_find(tbl, new);
if (!mask) {
/* Allocate a new mask if none exsits. */
return -ENOMEM;
mask->key = new->key;
mask->range = new->range;
- list_add_rcu(&mask->list, &tbl->mask_list);
+
+ /* Add mask to mask-list. */
+ if (tbl_mask_array_add_mask(tbl, mask)) {
+ kfree(mask);
+ return -ENOMEM;
+ }
} else {
BUG_ON(!mask->ref_count);
mask->ref_count++;
#include "flow.h"
+struct mask_cache_entry {
+ u32 skb_hash;
+ u32 mask_index;
+};
+
+struct mask_array {
+ struct rcu_head rcu;
+ int count, max;
+ struct sw_flow_mask __rcu *masks[];
+};
+
struct table_instance {
struct hlist_head *buckets;
unsigned int n_buckets;
struct flow_table {
struct table_instance __rcu *ti;
struct table_instance __rcu *ufid_ti;
- struct list_head mask_list;
+ struct mask_cache_entry __percpu *mask_cache;
+ struct mask_array __rcu *mask_array;
unsigned long last_rehash;
unsigned int count;
unsigned int ufid_count;
struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *table,
u32 *bucket, u32 *idx);
struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *,
- const struct sw_flow_key *,
- u32 *n_mask_hit);
+ const struct sw_flow_key *,
+ u32 skb_hash,
+ u32 *n_mask_hit);
struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *,
const struct sw_flow_key *);
struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_OPENVSWITCH |
IFF_NO_QUEUE;
netdev->needs_free_netdev = true;
- netdev->priv_destructor = internal_dev_destructor;
+ netdev->priv_destructor = NULL;
netdev->ethtool_ops = &internal_dev_ethtool_ops;
netdev->rtnl_link_ops = &internal_dev_link_ops;
struct internal_dev *internal_dev;
struct net_device *dev;
int err;
- bool free_vport = true;
vport = ovs_vport_alloc(0, &ovs_internal_vport_ops, parms);
if (IS_ERR(vport)) {
rtnl_lock();
err = register_netdevice(vport->dev);
- if (err) {
- free_vport = false;
+ if (err)
goto error_unlock;
- }
+ vport->dev->priv_destructor = internal_dev_destructor;
dev_set_promiscuity(vport->dev, 1);
rtnl_unlock();
error_free_netdev:
free_netdev(dev);
error_free_vport:
- if (free_vport)
- ovs_vport_free(vport);
+ ovs_vport_free(vport);
error:
return ERR_PTR(err);
}
if (sk->sk_state == TCP_CLOSE)
return EPOLLERR;
- if (!skb_queue_empty(&sk->sk_receive_queue))
+ if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
mask |= EPOLLIN | EPOLLRDNORM;
- if (!skb_queue_empty(&pn->ctrlreq_queue))
+ if (!skb_queue_empty_lockless(&pn->ctrlreq_queue))
mask |= EPOLLPRI;
if (!mask && sk->sk_state == TCP_CLOSE_WAIT)
return EPOLLHUP;
ax25_address rose_callsign;
/*
- * ROSE network devices are virtual network devices encapsulating ROSE
- * frames into AX.25 which will be sent through an AX.25 device, so form a
- * special "super class" of normal net devices; split their locks off into a
- * separate class since they always nest.
- */
-static struct lock_class_key rose_netdev_xmit_lock_key;
-static struct lock_class_key rose_netdev_addr_lock_key;
-
-static void rose_set_lockdep_one(struct net_device *dev,
- struct netdev_queue *txq,
- void *_unused)
-{
- lockdep_set_class(&txq->_xmit_lock, &rose_netdev_xmit_lock_key);
-}
-
-static void rose_set_lockdep_key(struct net_device *dev)
-{
- lockdep_set_class(&dev->addr_list_lock, &rose_netdev_addr_lock_key);
- netdev_for_each_tx_queue(dev, rose_set_lockdep_one, NULL);
-}
-
-/*
* Convert a ROSE address into text.
*/
char *rose2asc(char *buf, const rose_address *addr)
free_netdev(dev);
goto fail;
}
- rose_set_lockdep_key(dev);
dev_rose[i] = dev;
}
int debug_id; /* debug ID for printks */
unsigned short rx_pkt_offset; /* Current recvmsg packet offset */
unsigned short rx_pkt_len; /* Current recvmsg packet len */
+ bool rx_pkt_last; /* Current recvmsg packet is last */
/* Rx/Tx circular buffer, depending on phase.
*
*/
static int rxrpc_locate_data(struct rxrpc_call *call, struct sk_buff *skb,
u8 *_annotation,
- unsigned int *_offset, unsigned int *_len)
+ unsigned int *_offset, unsigned int *_len,
+ bool *_last)
{
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
unsigned int offset = sizeof(struct rxrpc_wire_header);
unsigned int len;
+ bool last = false;
int ret;
u8 annotation = *_annotation;
u8 subpacket = annotation & RXRPC_RX_ANNO_SUBPACKET;
len = skb->len - offset;
if (subpacket < sp->nr_subpackets - 1)
len = RXRPC_JUMBO_DATALEN;
+ else if (sp->rx_flags & RXRPC_SKB_INCL_LAST)
+ last = true;
if (!(annotation & RXRPC_RX_ANNO_VERIFIED)) {
ret = rxrpc_verify_packet(call, skb, annotation, offset, len);
*_offset = offset;
*_len = len;
+ *_last = last;
call->security->locate_data(call, skb, _offset, _len);
return 0;
}
rxrpc_serial_t serial;
rxrpc_seq_t hard_ack, top, seq;
size_t remain;
- bool last;
+ bool rx_pkt_last;
unsigned int rx_pkt_offset, rx_pkt_len;
int ix, copy, ret = -EAGAIN, ret2;
rx_pkt_offset = call->rx_pkt_offset;
rx_pkt_len = call->rx_pkt_len;
+ rx_pkt_last = call->rx_pkt_last;
if (call->state >= RXRPC_CALL_SERVER_ACK_REQUEST) {
seq = call->rx_hard_ack;
/* Barriers against rxrpc_input_data(). */
hard_ack = call->rx_hard_ack;
seq = hard_ack + 1;
+
while (top = smp_load_acquire(&call->rx_top),
before_eq(seq, top)
) {
if (rx_pkt_offset == 0) {
ret2 = rxrpc_locate_data(call, skb,
&call->rxtx_annotations[ix],
- &rx_pkt_offset, &rx_pkt_len);
+ &rx_pkt_offset, &rx_pkt_len,
+ &rx_pkt_last);
trace_rxrpc_recvmsg(call, rxrpc_recvmsg_next, seq,
rx_pkt_offset, rx_pkt_len, ret2);
if (ret2 < 0) {
}
/* The whole packet has been transferred. */
- last = sp->hdr.flags & RXRPC_LAST_PACKET;
if (!(flags & MSG_PEEK))
rxrpc_rotate_rx_window(call);
rx_pkt_offset = 0;
rx_pkt_len = 0;
- if (last) {
+ if (rx_pkt_last) {
ASSERTCMP(seq, ==, READ_ONCE(call->rx_top));
ret = 1;
goto out;
if (!(flags & MSG_PEEK)) {
call->rx_pkt_offset = rx_pkt_offset;
call->rx_pkt_len = rx_pkt_len;
+ call->rx_pkt_last = rx_pkt_last;
}
done:
trace_rxrpc_recvmsg(call, rxrpc_recvmsg_data_return, seq,
cls_bpf.name = obj->bpf_name;
cls_bpf.exts_integrated = obj->exts_integrated;
- if (oldprog)
+ if (oldprog && prog)
err = tc_setup_cb_replace(block, tp, TC_SETUP_CLSBPF, &cls_bpf,
skip_sw, &oldprog->gen_flags,
&oldprog->in_hw_count,
&prog->gen_flags, &prog->in_hw_count,
true);
- else
+ else if (prog)
err = tc_setup_cb_add(block, tp, TC_SETUP_CLSBPF, &cls_bpf,
skip_sw, &prog->gen_flags,
&prog->in_hw_count, true);
+ else
+ err = tc_setup_cb_destroy(block, tp, TC_SETUP_CLSBPF, &cls_bpf,
+ skip_sw, &oldprog->gen_flags,
+ &oldprog->in_hw_count, true);
if (prog && err) {
cls_bpf_offload_cmd(tp, oldprog, prog, extack);
};
EXPORT_SYMBOL(pfifo_fast_ops);
-static struct lock_class_key qdisc_tx_busylock;
-static struct lock_class_key qdisc_running_key;
-
struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
const struct Qdisc_ops *ops,
struct netlink_ext_ack *extack)
}
spin_lock_init(&sch->busylock);
- lockdep_set_class(&sch->busylock,
- dev->qdisc_tx_busylock ?: &qdisc_tx_busylock);
-
/* seqlock has the same scope of busylock, for NOLOCK qdisc */
spin_lock_init(&sch->seqlock);
- lockdep_set_class(&sch->busylock,
- dev->qdisc_tx_busylock ?: &qdisc_tx_busylock);
-
seqcount_init(&sch->running);
- lockdep_set_class(&sch->running,
- dev->qdisc_running_key ?: &qdisc_running_key);
sch->ops = ops;
sch->flags = ops->static_flags;
dev_hold(dev);
refcount_set(&sch->refcnt, 1);
+ if (sch != &noop_qdisc) {
+ lockdep_set_class(&sch->busylock, &dev->qdisc_tx_busylock_key);
+ lockdep_set_class(&sch->seqlock, &dev->qdisc_tx_busylock_key);
+ lockdep_set_class(&sch->running, &dev->qdisc_running_key);
+ }
+
return sch;
errout1:
kfree(p);
* Copyright (C) 2013 Nandita Dukkipati <nanditad@google.com>
*/
-#include <linux/jhash.h>
#include <linux/jiffies.h>
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/vmalloc.h>
+#include <linux/siphash.h>
#include <net/pkt_sched.h>
#include <net/sock.h>
struct hhf_sched_data {
struct wdrr_bucket buckets[WDRR_BUCKET_CNT];
- u32 perturbation; /* hash perturbation */
+ siphash_key_t perturbation; /* hash perturbation */
u32 quantum; /* psched_mtu(qdisc_dev(sch)); */
u32 drop_overlimit; /* number of times max qdisc packet
* limit was hit
}
/* Get hashed flow-id of the skb. */
- hash = skb_get_hash_perturb(skb, q->perturbation);
+ hash = skb_get_hash_perturb(skb, &q->perturbation);
/* Check if this packet belongs to an already established HH flow. */
flow_pos = hash & HHF_BIT_MASK;
sch->limit = 1000;
q->quantum = psched_mtu(qdisc_dev(sch));
- q->perturbation = prandom_u32();
+ get_random_bytes(&q->perturbation, sizeof(q->perturbation));
INIT_LIST_HEAD(&q->new_buckets);
INIT_LIST_HEAD(&q->old_buckets);
#include <linux/errno.h>
#include <linux/skbuff.h>
#include <linux/random.h>
-#include <linux/jhash.h>
+#include <linux/siphash.h>
#include <net/ip.h>
#include <net/pkt_sched.h>
#include <net/pkt_cls.h>
* (Section 4.4 of SFB reference : moving hash functions)
*/
struct sfb_bins {
- u32 perturbation; /* jhash perturbation */
+ siphash_key_t perturbation; /* siphash key */
struct sfb_bucket bins[SFB_LEVELS][SFB_NUMBUCKETS];
};
static void sfb_init_perturbation(u32 slot, struct sfb_sched_data *q)
{
- q->bins[slot].perturbation = prandom_u32();
+ get_random_bytes(&q->bins[slot].perturbation,
+ sizeof(q->bins[slot].perturbation));
}
static void sfb_swap_slot(struct sfb_sched_data *q)
/* If using external classifiers, get result and record it. */
if (!sfb_classify(skb, fl, &ret, &salt))
goto other_drop;
- sfbhash = jhash_1word(salt, q->bins[slot].perturbation);
+ sfbhash = siphash_1u32(salt, &q->bins[slot].perturbation);
} else {
- sfbhash = skb_get_hash_perturb(skb, q->bins[slot].perturbation);
+ sfbhash = skb_get_hash_perturb(skb, &q->bins[slot].perturbation);
}
/* Inelastic flow */
if (q->double_buffering) {
sfbhash = skb_get_hash_perturb(skb,
- q->bins[slot].perturbation);
+ &q->bins[slot].perturbation);
if (!sfbhash)
sfbhash = 1;
sfb_skb_cb(skb)->hashes[slot] = sfbhash;
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/skbuff.h>
-#include <linux/jhash.h>
+#include <linux/siphash.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <net/netlink.h>
u8 headdrop;
u8 maxdepth; /* limit of packets per flow */
- u32 perturbation;
+ siphash_key_t perturbation;
u8 cur_depth; /* depth of longest slot */
u8 flags;
unsigned short scaled_quantum; /* SFQ_ALLOT_SIZE(quantum) */
static unsigned int sfq_hash(const struct sfq_sched_data *q,
const struct sk_buff *skb)
{
- return skb_get_hash_perturb(skb, q->perturbation) & (q->divisor - 1);
+ return skb_get_hash_perturb(skb, &q->perturbation) & (q->divisor - 1);
}
static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch,
struct sfq_sched_data *q = from_timer(q, t, perturb_timer);
struct Qdisc *sch = q->sch;
spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch));
+ siphash_key_t nkey;
+ get_random_bytes(&nkey, sizeof(nkey));
spin_lock(root_lock);
- q->perturbation = prandom_u32();
+ q->perturbation = nkey;
if (!q->filter_list && q->tail)
sfq_rehash(sch);
spin_unlock(root_lock);
del_timer(&q->perturb_timer);
if (q->perturb_period) {
mod_timer(&q->perturb_timer, jiffies + q->perturb_period);
- q->perturbation = prandom_u32();
+ get_random_bytes(&q->perturbation, sizeof(q->perturbation));
}
sch_tree_unlock(sch);
kfree(p);
q->quantum = psched_mtu(qdisc_dev(sch));
q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum);
q->perturb_period = 0;
- q->perturbation = prandom_u32();
+ get_random_bytes(&q->perturbation, sizeof(q->perturbation));
if (opt) {
int err = sfq_change(sch, opt);
* offload state (PENDING, ACTIVE, INACTIVE) so it can be visible in dump().
* This is left as TODO.
*/
-void taprio_offload_config_changed(struct taprio_sched *q)
+static void taprio_offload_config_changed(struct taprio_sched *q)
{
struct sched_gate_list *oper, *admin;
mask = 0;
/* Is there any exceptional events? */
- if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
+ if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue))
mask |= EPOLLERR |
(sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
if (sk->sk_shutdown & RCV_SHUTDOWN)
mask |= EPOLLHUP;
/* Is it readable? Reconsider this code with TCP-style support. */
- if (!skb_queue_empty(&sk->sk_receive_queue))
+ if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
mask |= EPOLLIN | EPOLLRDNORM;
/* The association is either gone or not ready. */
if (sk_can_busy_loop(sk)) {
sk_busy_loop(sk, noblock);
- if (!skb_queue_empty(&sk->sk_receive_queue))
+ if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
continue;
}
newinet->inet_rcv_saddr = inet->inet_rcv_saddr;
newinet->inet_dport = htons(asoc->peer.port);
newinet->pmtudisc = inet->pmtudisc;
- newinet->inet_id = asoc->next_tsn ^ jiffies;
+ newinet->inet_id = prandom_u32();
newinet->uc_ttl = inet->uc_ttl;
newinet->mc_loop = 1;
};
EXPORT_SYMBOL_GPL(smc_proto6);
+static void smc_restore_fallback_changes(struct smc_sock *smc)
+{
+ smc->clcsock->file->private_data = smc->sk.sk_socket;
+ smc->clcsock->file = NULL;
+}
+
static int __smc_release(struct smc_sock *smc)
{
struct sock *sk = &smc->sk;
}
sk->sk_state = SMC_CLOSED;
sk->sk_state_change(sk);
+ smc_restore_fallback_changes(smc);
}
sk->sk_prot->unhash(sk);
int smc_type;
int rc = 0;
- sock_hold(&smc->sk); /* sock put in passive closing */
-
if (smc->use_fallback)
return smc_connect_fallback(smc, smc->fallback_rsn);
rc = kernel_connect(smc->clcsock, addr, alen, flags);
if (rc && rc != -EINPROGRESS)
goto out;
+
+ sock_hold(&smc->sk); /* sock put in passive closing */
if (flags & O_NONBLOCK) {
if (schedule_work(&smc->connect_work))
smc->connect_nonblock = 1;
/* check if RDMA is available */
if (!ism_supported) { /* SMC_TYPE_R or SMC_TYPE_B */
/* prepare RDMA check */
- memset(&ini, 0, sizeof(ini));
ini.is_smcd = false;
+ ini.ism_dev = NULL;
ini.ib_lcl = &pclc->lcl;
rc = smc_find_rdma_device(new_smc, &ini);
if (rc) {
}
rtnl_lock();
- nest_lvl = dev_get_nest_level(ndev);
+ nest_lvl = ndev->lower_level;
for (i = 0; i < nest_lvl; i++) {
struct list_head *lower = &ndev->adj_list.lower;
int i, nest_lvl;
rtnl_lock();
- nest_lvl = dev_get_nest_level(ndev);
+ nest_lvl = ndev->lower_level;
for (i = 0; i < nest_lvl; i++) {
struct list_head *lower = &ndev->adj_list.lower;
goto out;
spin_lock_bh(&xprt->bc_pa_lock);
- xprt->bc_alloc_max -= max_reqs;
+ xprt->bc_alloc_max -= min(max_reqs, xprt->bc_alloc_max);
list_for_each_entry_safe(req, tmp, &xprt->bc_pa_list, rq_bc_pa_list) {
dprintk("RPC: req=%p\n", req);
list_del(&req->rq_bc_pa_list);
*/
dprintk("RPC: Last session removed req=%p\n", req);
xprt_free_allocation(req);
- return;
}
+ xprt_put(xprt);
}
/*
spin_unlock(&xprt->bc_pa_lock);
if (new) {
if (req != new)
- xprt_free_bc_rqst(new);
+ xprt_free_allocation(new);
break;
} else if (req)
break;
set_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
dprintk("RPC: add callback request to list\n");
+ xprt_get(xprt);
spin_lock(&bc_serv->sv_cb_lock);
list_add(&req->rq_bc_list, &bc_serv->sv_cb_list);
wake_up(&bc_serv->sv_cb_waitq);
rpc_destroy_wait_queue(&xprt->backlog);
kfree(xprt->servername);
/*
+ * Destroy any existing back channel
+ */
+ xprt_destroy_backchannel(xprt, UINT_MAX);
+
+ /*
* Tear down transport state and free the rpc_xprt
*/
xprt->ops->destroy(xprt);
spin_lock(&xprt->bc_pa_lock);
list_add_tail(&rqst->rq_bc_pa_list, &xprt->bc_pa_list);
spin_unlock(&xprt->bc_pa_lock);
+ xprt_put(xprt);
}
static struct rpc_rqst *rpcrdma_bc_rqst_get(struct rpcrdma_xprt *r_xprt)
/* Queue rqst for ULP's callback service */
bc_serv = xprt->bc_serv;
+ xprt_get(xprt);
spin_lock(&bc_serv->sv_cb_lock);
list_add(&rqst->rq_bc_list, &bc_serv->sv_cb_list);
spin_unlock(&bc_serv->sv_cb_lock);
struct sk_buff_head *xmitq)
{
struct tipc_msg *hdr = buf_msg(skb_peek(list));
- unsigned int maxwin = l->window;
- int imp = msg_importance(hdr);
- unsigned int mtu = l->mtu;
+ struct sk_buff_head *backlogq = &l->backlogq;
+ struct sk_buff_head *transmq = &l->transmq;
+ struct sk_buff *skb, *_skb;
+ u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
u16 ack = l->rcv_nxt - 1;
u16 seqno = l->snd_nxt;
- u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
- struct sk_buff_head *transmq = &l->transmq;
- struct sk_buff_head *backlogq = &l->backlogq;
- struct sk_buff *skb, *_skb, **tskb;
int pkt_cnt = skb_queue_len(list);
+ int imp = msg_importance(hdr);
+ unsigned int maxwin = l->window;
+ unsigned int mtu = l->mtu;
+ bool new_bundle;
int rc = 0;
if (unlikely(msg_size(hdr) > mtu)) {
}
/* Prepare each packet for sending, and add to relevant queue: */
- while (skb_queue_len(list)) {
- skb = skb_peek(list);
- hdr = buf_msg(skb);
- msg_set_seqno(hdr, seqno);
- msg_set_ack(hdr, ack);
- msg_set_bcast_ack(hdr, bc_ack);
-
+ while ((skb = __skb_dequeue(list))) {
if (likely(skb_queue_len(transmq) < maxwin)) {
+ hdr = buf_msg(skb);
+ msg_set_seqno(hdr, seqno);
+ msg_set_ack(hdr, ack);
+ msg_set_bcast_ack(hdr, bc_ack);
_skb = skb_clone(skb, GFP_ATOMIC);
if (!_skb) {
+ kfree_skb(skb);
__skb_queue_purge(list);
return -ENOBUFS;
}
- __skb_dequeue(list);
__skb_queue_tail(transmq, skb);
/* next retransmit attempt */
if (link_is_bc_sndlink(l))
seqno++;
continue;
}
- tskb = &l->backlog[imp].target_bskb;
- if (tipc_msg_bundle(*tskb, hdr, mtu)) {
- kfree_skb(__skb_dequeue(list));
- l->stats.sent_bundled++;
- continue;
- }
- if (tipc_msg_make_bundle(tskb, hdr, mtu, l->addr)) {
- kfree_skb(__skb_dequeue(list));
- __skb_queue_tail(backlogq, *tskb);
- l->backlog[imp].len++;
- l->stats.sent_bundled++;
- l->stats.sent_bundles++;
+ if (tipc_msg_try_bundle(l->backlog[imp].target_bskb, &skb,
+ mtu - INT_H_SIZE, l->addr,
+ &new_bundle)) {
+ if (skb) {
+ /* Keep a ref. to the skb for next try */
+ l->backlog[imp].target_bskb = skb;
+ l->backlog[imp].len++;
+ __skb_queue_tail(backlogq, skb);
+ } else {
+ if (new_bundle) {
+ l->stats.sent_bundles++;
+ l->stats.sent_bundled++;
+ }
+ l->stats.sent_bundled++;
+ }
continue;
}
l->backlog[imp].target_bskb = NULL;
- l->backlog[imp].len += skb_queue_len(list);
+ l->backlog[imp].len += (1 + skb_queue_len(list));
+ __skb_queue_tail(backlogq, skb);
skb_queue_splice_tail_init(list, backlogq);
}
l->snd_nxt = seqno;
}
/**
- * tipc_msg_bundle(): Append contents of a buffer to tail of an existing one
- * @skb: the buffer to append to ("bundle")
- * @msg: message to be appended
- * @mtu: max allowable size for the bundle buffer
- * Consumes buffer if successful
- * Returns true if bundling could be performed, otherwise false
+ * tipc_msg_bundle - Append contents of a buffer to tail of an existing one
+ * @bskb: the bundle buffer to append to
+ * @msg: message to be appended
+ * @max: max allowable size for the bundle buffer
+ *
+ * Returns "true" if bundling has been performed, otherwise "false"
*/
-bool tipc_msg_bundle(struct sk_buff *skb, struct tipc_msg *msg, u32 mtu)
+static bool tipc_msg_bundle(struct sk_buff *bskb, struct tipc_msg *msg,
+ u32 max)
{
- struct tipc_msg *bmsg;
- unsigned int bsz;
- unsigned int msz = msg_size(msg);
- u32 start, pad;
- u32 max = mtu - INT_H_SIZE;
+ struct tipc_msg *bmsg = buf_msg(bskb);
+ u32 msz, bsz, offset, pad;
- if (likely(msg_user(msg) == MSG_FRAGMENTER))
- return false;
- if (!skb)
- return false;
- bmsg = buf_msg(skb);
+ msz = msg_size(msg);
bsz = msg_size(bmsg);
- start = align(bsz);
- pad = start - bsz;
+ offset = align(bsz);
+ pad = offset - bsz;
- if (unlikely(msg_user(msg) == TUNNEL_PROTOCOL))
+ if (unlikely(skb_tailroom(bskb) < (pad + msz)))
return false;
- if (unlikely(msg_user(msg) == BCAST_PROTOCOL))
+ if (unlikely(max < (offset + msz)))
return false;
- if (unlikely(msg_user(bmsg) != MSG_BUNDLER))
+
+ skb_put(bskb, pad + msz);
+ skb_copy_to_linear_data_offset(bskb, offset, msg, msz);
+ msg_set_size(bmsg, offset + msz);
+ msg_set_msgcnt(bmsg, msg_msgcnt(bmsg) + 1);
+ return true;
+}
+
+/**
+ * tipc_msg_try_bundle - Try to bundle a new message to the last one
+ * @tskb: the last/target message to which the new one will be appended
+ * @skb: the new message skb pointer
+ * @mss: max message size (header inclusive)
+ * @dnode: destination node for the message
+ * @new_bundle: if this call made a new bundle or not
+ *
+ * Return: "true" if the new message skb is potential for bundling this time or
+ * later, in the case a bundling has been done this time, the skb is consumed
+ * (the skb pointer = NULL).
+ * Otherwise, "false" if the skb cannot be bundled at all.
+ */
+bool tipc_msg_try_bundle(struct sk_buff *tskb, struct sk_buff **skb, u32 mss,
+ u32 dnode, bool *new_bundle)
+{
+ struct tipc_msg *msg, *inner, *outer;
+ u32 tsz;
+
+ /* First, check if the new buffer is suitable for bundling */
+ msg = buf_msg(*skb);
+ if (msg_user(msg) == MSG_FRAGMENTER)
return false;
- if (unlikely(skb_tailroom(skb) < (pad + msz)))
+ if (msg_user(msg) == TUNNEL_PROTOCOL)
return false;
- if (unlikely(max < (start + msz)))
+ if (msg_user(msg) == BCAST_PROTOCOL)
return false;
- if ((msg_importance(msg) < TIPC_SYSTEM_IMPORTANCE) &&
- (msg_importance(bmsg) == TIPC_SYSTEM_IMPORTANCE))
+ if (mss <= INT_H_SIZE + msg_size(msg))
return false;
- skb_put(skb, pad + msz);
- skb_copy_to_linear_data_offset(skb, start, msg, msz);
- msg_set_size(bmsg, start + msz);
- msg_set_msgcnt(bmsg, msg_msgcnt(bmsg) + 1);
+ /* Ok, but the last/target buffer can be empty? */
+ if (unlikely(!tskb))
+ return true;
+
+ /* Is it a bundle already? Try to bundle the new message to it */
+ if (msg_user(buf_msg(tskb)) == MSG_BUNDLER) {
+ *new_bundle = false;
+ goto bundle;
+ }
+
+ /* Make a new bundle of the two messages if possible */
+ tsz = msg_size(buf_msg(tskb));
+ if (unlikely(mss < align(INT_H_SIZE + tsz) + msg_size(msg)))
+ return true;
+ if (unlikely(pskb_expand_head(tskb, INT_H_SIZE, mss - tsz - INT_H_SIZE,
+ GFP_ATOMIC)))
+ return true;
+ inner = buf_msg(tskb);
+ skb_push(tskb, INT_H_SIZE);
+ outer = buf_msg(tskb);
+ tipc_msg_init(msg_prevnode(inner), outer, MSG_BUNDLER, 0, INT_H_SIZE,
+ dnode);
+ msg_set_importance(outer, msg_importance(inner));
+ msg_set_size(outer, INT_H_SIZE + tsz);
+ msg_set_msgcnt(outer, 1);
+ *new_bundle = true;
+
+bundle:
+ if (likely(tipc_msg_bundle(tskb, msg, mss))) {
+ consume_skb(*skb);
+ *skb = NULL;
+ }
return true;
}
}
/**
- * tipc_msg_make_bundle(): Create bundle buf and append message to its tail
- * @list: the buffer chain, where head is the buffer to replace/append
- * @skb: buffer to be created, appended to and returned in case of success
- * @msg: message to be appended
- * @mtu: max allowable size for the bundle buffer, inclusive header
- * @dnode: destination node for message. (Not always present in header)
- * Returns true if success, otherwise false
- */
-bool tipc_msg_make_bundle(struct sk_buff **skb, struct tipc_msg *msg,
- u32 mtu, u32 dnode)
-{
- struct sk_buff *_skb;
- struct tipc_msg *bmsg;
- u32 msz = msg_size(msg);
- u32 max = mtu - INT_H_SIZE;
-
- if (msg_user(msg) == MSG_FRAGMENTER)
- return false;
- if (msg_user(msg) == TUNNEL_PROTOCOL)
- return false;
- if (msg_user(msg) == BCAST_PROTOCOL)
- return false;
- if (msz > (max / 2))
- return false;
-
- _skb = tipc_buf_acquire(max, GFP_ATOMIC);
- if (!_skb)
- return false;
-
- skb_trim(_skb, INT_H_SIZE);
- bmsg = buf_msg(_skb);
- tipc_msg_init(msg_prevnode(msg), bmsg, MSG_BUNDLER, 0,
- INT_H_SIZE, dnode);
- msg_set_importance(bmsg, msg_importance(msg));
- msg_set_seqno(bmsg, msg_seqno(msg));
- msg_set_ack(bmsg, msg_ack(msg));
- msg_set_bcast_ack(bmsg, msg_bcast_ack(msg));
- tipc_msg_bundle(_skb, msg, mtu);
- *skb = _skb;
- return true;
-}
-
-/**
* tipc_msg_reverse(): swap source and destination addresses and add error code
* @own_node: originating node id for reversed message
* @skb: buffer containing message to be reversed; will be consumed
uint data_sz, u32 dnode, u32 onode,
u32 dport, u32 oport, int errcode);
int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf);
-bool tipc_msg_bundle(struct sk_buff *skb, struct tipc_msg *msg, u32 mtu);
-bool tipc_msg_make_bundle(struct sk_buff **skb, struct tipc_msg *msg,
- u32 mtu, u32 dnode);
+bool tipc_msg_try_bundle(struct sk_buff *tskb, struct sk_buff **skb, u32 mss,
+ u32 dnode, bool *new_bundle);
bool tipc_msg_extract(struct sk_buff *skb, struct sk_buff **iskb, int *pos);
int tipc_msg_fragment(struct sk_buff *skb, const struct tipc_msg *hdr,
int pktmax, struct sk_buff_head *frags);
/* fall through */
case TIPC_LISTEN:
case TIPC_CONNECTING:
- if (!skb_queue_empty(&sk->sk_receive_queue))
+ if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
revents |= EPOLLIN | EPOLLRDNORM;
break;
case TIPC_OPEN:
revents |= EPOLLOUT;
if (!tipc_sk_type_connectionless(sk))
break;
- if (skb_queue_empty(&sk->sk_receive_queue))
+ if (skb_queue_empty_lockless(&sk->sk_receive_queue))
break;
revents |= EPOLLIN | EPOLLRDNORM;
break;
mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
/* readable? */
- if (!skb_queue_empty(&sk->sk_receive_queue))
+ if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
mask |= EPOLLIN | EPOLLRDNORM;
/* Connection-based need to check for termination and startup */
mask = 0;
/* exceptional events? */
- if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
+ if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue))
mask |= EPOLLERR |
(sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
mask |= EPOLLHUP;
/* readable? */
- if (!skb_queue_empty(&sk->sk_receive_queue))
+ if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
mask |= EPOLLIN | EPOLLRDNORM;
/* Connection-based need to check for termination and startup */
* the queue and write as long as the socket isn't shutdown for
* sending.
*/
- if (!skb_queue_empty(&sk->sk_receive_queue) ||
+ if (!skb_queue_empty_lockless(&sk->sk_receive_queue) ||
(sk->sk_shutdown & RCV_SHUTDOWN)) {
mask |= EPOLLIN | EPOLLRDNORM;
}
return false;
}
+ /* channel 14 is only for IEEE 802.11b */
+ if (chandef->center_freq1 == 2484 &&
+ chandef->width != NL80211_CHAN_WIDTH_20_NOHT)
+ return false;
+
if (cfg80211_chandef_is_edmg(chandef) &&
!cfg80211_edmg_chandef_valid(chandef))
return false;
[NL80211_ATTR_MNTR_FLAGS] = { /* NLA_NESTED can't be empty */ },
[NL80211_ATTR_MESH_ID] = { .type = NLA_BINARY,
.len = IEEE80211_MAX_MESH_ID_LEN },
- [NL80211_ATTR_MPATH_NEXT_HOP] = { .type = NLA_U32 },
+ [NL80211_ATTR_MPATH_NEXT_HOP] = NLA_POLICY_ETH_ADDR_COMPAT,
[NL80211_ATTR_REG_ALPHA2] = { .type = NLA_STRING, .len = 2 },
[NL80211_ATTR_REG_RULES] = { .type = NLA_NESTED },
}
if (freq == 2484) {
- if (chandef->width > NL80211_CHAN_WIDTH_40)
+ /* channel 14 is only for IEEE 802.11b */
+ if (chandef->width != NL80211_CHAN_WIDTH_20_NOHT)
return false;
*op_class = 82; /* channel 14 */
{
unsigned long flags;
+ if (!xs->tx)
+ return;
+
spin_lock_irqsave(&umem->xsk_list_lock, flags);
list_add_rcu(&xs->list, &umem->xsk_list);
spin_unlock_irqrestore(&umem->xsk_list_lock, flags);
{
unsigned long flags;
+ if (!xs->tx)
+ return;
+
spin_lock_irqsave(&umem->xsk_list_lock, flags);
list_del_rcu(&xs->list);
spin_unlock_irqrestore(&umem->xsk_list_lock, flags);
return false;
}
-int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
+static int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
{
u32 len;
__xsk_rcv_zc(xs, xdp, len) : __xsk_rcv(xs, xdp, len);
}
-void xsk_flush(struct xdp_sock *xs)
+static void xsk_flush(struct xdp_sock *xs)
{
xskq_produce_flush_desc(xs->rx);
xs->sk.sk_data_ready(&xs->sk);
return err;
}
+int __xsk_map_redirect(struct bpf_map *map, struct xdp_buff *xdp,
+ struct xdp_sock *xs)
+{
+ struct xsk_map *m = container_of(map, struct xsk_map, map);
+ struct list_head *flush_list = this_cpu_ptr(m->flush_list);
+ int err;
+
+ err = xsk_rcv(xs, xdp);
+ if (err)
+ return err;
+
+ if (!xs->flush_node.prev)
+ list_add(&xs->flush_node, flush_list);
+
+ return 0;
+}
+
+void __xsk_map_flush(struct bpf_map *map)
+{
+ struct xsk_map *m = container_of(map, struct xsk_map, map);
+ struct list_head *flush_list = this_cpu_ptr(m->flush_list);
+ struct xdp_sock *xs, *tmp;
+
+ list_for_each_entry_safe(xs, tmp, flush_list, flush_node) {
+ xsk_flush(xs);
+ __list_del_clearprev(&xs->flush_node);
+ }
+}
+
void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries)
{
xskq_produce_flush_addr_n(umem->cq, nb_entries);
if (addrlen != sizeof(*in6))
return 0;
- ret = bpf_probe_read(test_params.dst6, sizeof(test_params.dst6),
- &in6->sin6_addr);
+ ret = bpf_probe_read_user(test_params.dst6, sizeof(test_params.dst6),
+ &in6->sin6_addr);
if (ret)
goto done;
if (addrlen != sizeof(*in6))
return 0;
- ret = bpf_probe_read(dst6, sizeof(dst6), &in6->sin6_addr);
+ ret = bpf_probe_read_user(dst6, sizeof(dst6), &in6->sin6_addr);
if (ret) {
inline_ret = ret;
goto done;
test_case = dst6[7];
- ret = bpf_probe_read(&port, sizeof(port), &in6->sin6_port);
+ ret = bpf_probe_read_user(&port, sizeof(port), &in6->sin6_port);
if (ret) {
inline_ret = ret;
goto done;
if (sockaddr_len > sizeof(orig_addr))
return 0;
- if (bpf_probe_read(&orig_addr, sizeof(orig_addr), sockaddr_arg) != 0)
+ if (bpf_probe_read_user(&orig_addr, sizeof(orig_addr), sockaddr_arg) != 0)
return 0;
mapped_addr = bpf_map_lookup_elem(&dnat_map, &orig_addr);
return export_unknown;
}
-static char *sym_extract_namespace(const char **symname)
+static const char *namespace_from_kstrtabns(struct elf_info *info,
+ Elf_Sym *kstrtabns)
{
- char *namespace = NULL;
- char *ns_separator;
+ char *value = info->ksymtab_strings + kstrtabns->st_value;
+ return value[0] ? value : NULL;
+}
+
+static void sym_update_namespace(const char *symname, const char *namespace)
+{
+ struct symbol *s = find_symbol(symname);
- ns_separator = strchr(*symname, '.');
- if (ns_separator) {
- namespace = NOFAIL(strndup(*symname, ns_separator - *symname));
- *symname = ns_separator + 1;
+ /*
+ * That symbol should have been created earlier and thus this is
+ * actually an assertion.
+ */
+ if (!s) {
+ merror("Could not update namespace(%s) for symbol %s\n",
+ namespace, symname);
+ return;
}
- return namespace;
+ free(s->namespace);
+ s->namespace =
+ namespace && namespace[0] ? NOFAIL(strdup(namespace)) : NULL;
}
/**
* Add an exported symbol - it may have already been added without a
* CRC, in this case just update the CRC
**/
-static struct symbol *sym_add_exported(const char *name, const char *namespace,
- struct module *mod, enum export export)
+static struct symbol *sym_add_exported(const char *name, struct module *mod,
+ enum export export)
{
struct symbol *s = find_symbol(name);
s->module = mod;
}
}
- free(s->namespace);
- s->namespace = namespace ? strdup(namespace) : NULL;
s->preloaded = 0;
s->vmlinux = is_vmlinux(mod->name);
s->kernel = 0;
info->export_unused_gpl_sec = i;
else if (strcmp(secname, "__ksymtab_gpl_future") == 0)
info->export_gpl_future_sec = i;
+ else if (strcmp(secname, "__ksymtab_strings") == 0)
+ info->ksymtab_strings = (void *)hdr +
+ sechdrs[i].sh_offset -
+ sechdrs[i].sh_addr;
if (sechdrs[i].sh_type == SHT_SYMTAB) {
unsigned int sh_link_idx;
enum export export;
bool is_crc = false;
const char *name;
- char *namespace;
if ((!is_vmlinux(mod->name) || mod->is_dot_o) &&
strstarts(symname, "__ksymtab"))
/* All exported symbols */
if (strstarts(symname, "__ksymtab_")) {
name = symname + strlen("__ksymtab_");
- namespace = sym_extract_namespace(&name);
- sym_add_exported(name, namespace, mod, export);
- free(namespace);
+ sym_add_exported(name, mod, export);
}
if (strcmp(symname, "init_module") == 0)
mod->has_init = 1;
handle_moddevtable(mod, &info, sym, symname);
}
+ /* Apply symbol namespaces from __kstrtabns_<symbol> entries. */
+ for (sym = info.symtab_start; sym < info.symtab_stop; sym++) {
+ symname = remove_dot(info.strtab + sym->st_name);
+
+ if (strstarts(symname, "__kstrtabns_"))
+ sym_update_namespace(symname + strlen("__kstrtabns_"),
+ namespace_from_kstrtabns(&info,
+ sym));
+ }
+
// check for static EXPORT_SYMBOL_* functions && global vars
for (sym = info.symtab_start; sym < info.symtab_stop; sym++) {
unsigned char bind = ELF_ST_BIND(sym->st_info);
else
basename = mod->name;
- if (exp->namespace && exp->namespace[0]) {
+ if (exp->namespace) {
add_namespace(&mod->required_namespaces,
exp->namespace);
mod = new_module(modname);
mod->skip = 1;
}
- s = sym_add_exported(symname, namespace, mod,
- export_no(export));
+ s = sym_add_exported(symname, mod, export_no(export));
s->kernel = kernel;
s->preloaded = 1;
s->is_static = 0;
sym_update_crc(symname, mod, crc, export_no(export));
+ sym_update_namespace(symname, namespace);
}
release_file(file, size);
return;
Elf_Section export_gpl_sec;
Elf_Section export_unused_gpl_sec;
Elf_Section export_gpl_future_sec;
+ char *ksymtab_strings;
char *strtab;
char *modinfo;
unsigned int modinfo_len;
if [ ! -f "$ns_deps_file" ]; then return; fi
local mod_source_files=`cat $mod_file | sed -n 1p \
| sed -e 's/\.o/\.c/g' \
- | sed "s/[^ ]* */${srctree}\/&/g"`
+ | sed "s|[^ ]* *|${srctree}/&|g"`
for ns in `cat $ns_deps_file`; do
echo "Adding namespace $ns to module $mod_name (if needed)."
generate_deps_for_ns $ns $mod_source_files
# Check for mercurial and a mercurial repo.
if test -d .hg && hgid=`hg id 2>/dev/null`; then
# Do we have an tagged version? If so, latesttagdistance == 1
- if [ "`hg log -r . --template '{latesttagdistance}'`" == "1" ]; then
+ if [ "`hg log -r . --template '{latesttagdistance}'`" = "1" ]; then
id=`hg log -r . --template '{latesttag}'`
printf '%s%s' -hg "$id"
else
[LOCKDOWN_NONE] = "none",
[LOCKDOWN_MODULE_SIGNATURE] = "unsigned module loading",
[LOCKDOWN_DEV_MEM] = "/dev/mem,kmem,port",
+ [LOCKDOWN_EFI_TEST] = "/dev/efi_test access",
[LOCKDOWN_KEXEC] = "kexec of unsigned images",
[LOCKDOWN_HIBERNATION] = "hibernation",
[LOCKDOWN_PCI_ACCESS] = "direct PCI access",
return 0;
}
-static int snd_timer_close_locked(struct snd_timer_instance *timeri);
+static int snd_timer_close_locked(struct snd_timer_instance *timeri,
+ struct device **card_devp_to_put);
/*
* open a timer instance
{
struct snd_timer *timer;
struct snd_timer_instance *timeri = NULL;
+ struct device *card_dev_to_put = NULL;
int err;
mutex_lock(®ister_mutex);
list_add_tail(&timeri->open_list, &snd_timer_slave_list);
err = snd_timer_check_slave(timeri);
if (err < 0) {
- snd_timer_close_locked(timeri);
+ snd_timer_close_locked(timeri, &card_dev_to_put);
timeri = NULL;
}
goto unlock;
timeri = NULL;
if (timer->card)
- put_device(&timer->card->card_dev);
+ card_dev_to_put = &timer->card->card_dev;
module_put(timer->module);
goto unlock;
}
timer->num_instances++;
err = snd_timer_check_master(timeri);
if (err < 0) {
- snd_timer_close_locked(timeri);
+ snd_timer_close_locked(timeri, &card_dev_to_put);
timeri = NULL;
}
unlock:
mutex_unlock(®ister_mutex);
+ /* put_device() is called after unlock for avoiding deadlock */
+ if (card_dev_to_put)
+ put_device(card_dev_to_put);
*ti = timeri;
return err;
}
* close a timer instance
* call this with register_mutex down.
*/
-static int snd_timer_close_locked(struct snd_timer_instance *timeri)
+static int snd_timer_close_locked(struct snd_timer_instance *timeri,
+ struct device **card_devp_to_put)
{
struct snd_timer *timer = timeri->timer;
struct snd_timer_instance *slave, *tmp;
timer->hw.close(timer);
/* release a card refcount for safe disconnection */
if (timer->card)
- put_device(&timer->card->card_dev);
+ *card_devp_to_put = &timer->card->card_dev;
module_put(timer->module);
}
*/
int snd_timer_close(struct snd_timer_instance *timeri)
{
+ struct device *card_dev_to_put = NULL;
int err;
if (snd_BUG_ON(!timeri))
return -ENXIO;
mutex_lock(®ister_mutex);
- err = snd_timer_close_locked(timeri);
+ err = snd_timer_close_locked(timeri, &card_dev_to_put);
mutex_unlock(®ister_mutex);
+ /* put_device() is called after unlock for avoiding deadlock */
+ if (card_dev_to_put)
+ put_device(card_dev_to_put);
return err;
}
EXPORT_SYMBOL(snd_timer_close);
return err;
}
-static unsigned int
-map_data_channels(struct snd_bebob *bebob, struct amdtp_stream *s)
+static int map_data_channels(struct snd_bebob *bebob, struct amdtp_stream *s)
{
unsigned int sec, sections, ch, channels;
unsigned int pcm, midi, location;
list_for_each_entry(azx_dev, &bus->stream_list, list)
snd_hdac_stream_updateb(azx_dev, SD_CTL, SD_INT_MASK, 0);
- synchronize_irq(bus->irq);
-
/* disable SIE for all streams */
snd_hdac_chip_writeb(bus, INTCTL, 0);
}
if (bus->chip_init) {
- azx_stop_chip(chip);
azx_clear_irq_pending(chip);
azx_stop_all_streams(chip);
+ azx_stop_chip(chip);
}
if (bus->irq >= 0)
/* Icelake */
{ PCI_DEVICE(0x8086, 0x34c8),
.driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
+ /* Jasperlake */
+ { PCI_DEVICE(0x8086, 0x38c8),
+ .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
+ /* Tigerlake */
+ { PCI_DEVICE(0x8086, 0xa0c8),
+ .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
/* Elkhart Lake */
{ PCI_DEVICE(0x8086, 0x4b55),
.driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
struct snd_array pins; /* struct hdmi_spec_per_pin */
struct hdmi_pcm pcm_rec[16];
struct mutex pcm_lock;
+ struct mutex bind_lock; /* for audio component binding */
/* pcm_bitmap means which pcms have been assigned to pins*/
unsigned long pcm_bitmap;
int pcm_used; /* counter of pcm_rec[] */
struct hdmi_spec *spec = codec->spec;
int pin_idx;
- mutex_lock(&spec->pcm_lock);
+ mutex_lock(&spec->bind_lock);
spec->use_jack_detect = !codec->jackpoll_interval;
for (pin_idx = 0; pin_idx < spec->num_pins; pin_idx++) {
struct hdmi_spec_per_pin *per_pin = get_pin(spec, pin_idx);
snd_hda_jack_detect_enable_callback(codec, pin_nid,
jack_callback);
}
- mutex_unlock(&spec->pcm_lock);
+ mutex_unlock(&spec->bind_lock);
return 0;
}
spec->ops = generic_standard_hdmi_ops;
spec->dev_num = 1; /* initialize to 1 */
mutex_init(&spec->pcm_lock);
+ mutex_init(&spec->bind_lock);
snd_hdac_register_chmap_ops(&codec->core, &spec->chmap);
spec->chmap.ops.get_chmap = hdmi_get_chmap;
int i;
spec = container_of(acomp->audio_ops, struct hdmi_spec, drm_audio_ops);
- mutex_lock(&spec->pcm_lock);
+ mutex_lock(&spec->bind_lock);
spec->use_acomp_notifier = use_acomp;
spec->codec->relaxed_resume = use_acomp;
/* reprogram each jack detection logic depending on the notifier */
get_pin(spec, i)->pin_nid,
use_acomp);
}
- mutex_unlock(&spec->pcm_lock);
+ mutex_unlock(&spec->bind_lock);
}
/* enable / disable the notifier via master bind / unbind */
case 0x10ec0700:
case 0x10ec0701:
case 0x10ec0703:
+ case 0x10ec0711:
alc_update_coef_idx(codec, 0x10, 1<<15, 0);
break;
case 0x10ec0662:
case 0x10ec0672:
alc_update_coef_idx(codec, 0xd, 0, 1<<14); /* EAPD Ctrl */
break;
+ case 0x10ec0623:
+ alc_update_coef_idx(codec, 0x19, 1<<13, 0);
+ break;
case 0x10ec0668:
alc_update_coef_idx(codec, 0x7, 3<<13, 0);
break;
ALC269_TYPE_ALC225,
ALC269_TYPE_ALC294,
ALC269_TYPE_ALC300,
+ ALC269_TYPE_ALC623,
ALC269_TYPE_ALC700,
};
case ALC269_TYPE_ALC225:
case ALC269_TYPE_ALC294:
case ALC269_TYPE_ALC300:
+ case ALC269_TYPE_ALC623:
case ALC269_TYPE_ALC700:
ssids = alc269_ssids;
break;
SND_PCI_QUIRK(0x17aa, 0x312f, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
SND_PCI_QUIRK(0x17aa, 0x313c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
SND_PCI_QUIRK(0x17aa, 0x3151, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
+ SND_PCI_QUIRK(0x17aa, 0x3176, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
+ SND_PCI_QUIRK(0x17aa, 0x3178, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo B50-70", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
spec->codec_variant = ALC269_TYPE_ALC300;
spec->gen.mixer_nid = 0; /* no loopback on ALC300 */
break;
+ case 0x10ec0623:
+ spec->codec_variant = ALC269_TYPE_ALC623;
+ break;
case 0x10ec0700:
case 0x10ec0701:
case 0x10ec0703:
+ case 0x10ec0711:
spec->codec_variant = ALC269_TYPE_ALC700;
spec->gen.mixer_nid = 0; /* ALC700 does not have any loopback mixer path */
alc_update_coef_idx(codec, 0x4a, 1 << 15, 0); /* Combo jack auto trigger control */
HDA_CODEC_ENTRY(0x10ec0298, "ALC298", patch_alc269),
HDA_CODEC_ENTRY(0x10ec0299, "ALC299", patch_alc269),
HDA_CODEC_ENTRY(0x10ec0300, "ALC300", patch_alc269),
+ HDA_CODEC_ENTRY(0x10ec0623, "ALC623", patch_alc269),
HDA_CODEC_REV_ENTRY(0x10ec0861, 0x100340, "ALC660", patch_alc861),
HDA_CODEC_ENTRY(0x10ec0660, "ALC660-VD", patch_alc861vd),
HDA_CODEC_ENTRY(0x10ec0861, "ALC861", patch_alc861),
HDA_CODEC_ENTRY(0x10ec0700, "ALC700", patch_alc269),
HDA_CODEC_ENTRY(0x10ec0701, "ALC701", patch_alc269),
HDA_CODEC_ENTRY(0x10ec0703, "ALC703", patch_alc269),
+ HDA_CODEC_ENTRY(0x10ec0711, "ALC711", patch_alc269),
HDA_CODEC_ENTRY(0x10ec0867, "ALC891", patch_alc662),
HDA_CODEC_ENTRY(0x10ec0880, "ALC880", patch_alc880),
HDA_CODEC_ENTRY(0x10ec0882, "ALC882", patch_alc882),
max98373->i_slot = value & 0xF;
else
max98373->i_slot = 1;
-
- max98373->reset_gpio = of_get_named_gpio(dev->of_node,
+ if (dev->of_node) {
+ max98373->reset_gpio = of_get_named_gpio(dev->of_node,
"maxim,reset-gpio", 0);
- if (!gpio_is_valid(max98373->reset_gpio)) {
- dev_err(dev, "Looking up %s property in node %s failed %d\n",
- "maxim,reset-gpio", dev->of_node->full_name,
- max98373->reset_gpio);
+ if (!gpio_is_valid(max98373->reset_gpio)) {
+ dev_err(dev, "Looking up %s property in node %s failed %d\n",
+ "maxim,reset-gpio", dev->of_node->full_name,
+ max98373->reset_gpio);
+ } else {
+ dev_dbg(dev, "maxim,reset-gpio=%d",
+ max98373->reset_gpio);
+ }
} else {
- dev_dbg(dev, "maxim,reset-gpio=%d",
- max98373->reset_gpio);
+ /* this makes reset_gpio as invalid */
+ max98373->reset_gpio = -1;
}
if (!device_property_read_u32(dev, "maxim,spkfb-slot-no", &value))
"ZERO", "IIR1", "IIR2", "RX1", "RX2", "RX3"
};
+static const char * const rx_mix2_text[] = {
+ "ZERO", "IIR1", "IIR2"
+};
+
static const char *const dec_mux_text[] = {
"ZERO", "ADC1", "ADC2", "ADC3", "DMIC1", "DMIC2"
};
SOC_ENUM_SINGLE(LPASS_CDC_CONN_RX3_B2_CTL, 0, 6, rx_mix1_text),
};
+/* RX1 MIX2 */
+static const struct soc_enum rx_mix2_inp1_chain_enum =
+ SOC_ENUM_SINGLE(LPASS_CDC_CONN_RX1_B3_CTL,
+ 0, 3, rx_mix2_text);
+
+/* RX2 MIX2 */
+static const struct soc_enum rx2_mix2_inp1_chain_enum =
+ SOC_ENUM_SINGLE(LPASS_CDC_CONN_RX2_B3_CTL,
+ 0, 3, rx_mix2_text);
+
/* DEC */
static const struct soc_enum dec1_mux_enum = SOC_ENUM_SINGLE(
LPASS_CDC_CONN_TX_B1_CTL, 0, 6, dec_mux_text);
"RX3 MIX1 INP2 Mux", rx3_mix1_inp_enum[1]);
static const struct snd_kcontrol_new rx3_mix1_inp3_mux = SOC_DAPM_ENUM(
"RX3 MIX1 INP3 Mux", rx3_mix1_inp_enum[2]);
+static const struct snd_kcontrol_new rx1_mix2_inp1_mux = SOC_DAPM_ENUM(
+ "RX1 MIX2 INP1 Mux", rx_mix2_inp1_chain_enum);
+static const struct snd_kcontrol_new rx2_mix2_inp1_mux = SOC_DAPM_ENUM(
+ "RX2 MIX2 INP1 Mux", rx2_mix2_inp1_chain_enum);
/* Digital Gain control -38.4 dB to +38.4 dB in 0.3 dB steps */
static const DECLARE_TLV_DB_SCALE(digital_gain, -3840, 30, 0);
&rx3_mix1_inp2_mux),
SND_SOC_DAPM_MUX("RX3 MIX1 INP3", SND_SOC_NOPM, 0, 0,
&rx3_mix1_inp3_mux),
+ SND_SOC_DAPM_MUX("RX1 MIX2 INP1", SND_SOC_NOPM, 0, 0,
+ &rx1_mix2_inp1_mux),
+ SND_SOC_DAPM_MUX("RX2 MIX2 INP1", SND_SOC_NOPM, 0, 0,
+ &rx2_mix2_inp1_mux),
SND_SOC_DAPM_MUX("CIC1 MUX", SND_SOC_NOPM, 0, 0, &cic1_mux),
SND_SOC_DAPM_MUX("CIC2 MUX", SND_SOC_NOPM, 0, 0, &cic2_mux),
static bool rt5651_support_button_press(struct rt5651_priv *rt5651)
{
+ if (!rt5651->hp_jack)
+ return false;
+
/* Button press support only works with internal jack-detection */
return (rt5651->hp_jack->status & SND_JACK_MICROPHONE) &&
rt5651->gpiod_hp_det == NULL;
{
struct rt5682_priv *rt5682 = snd_soc_component_get_drvdata(component);
+ rt5682->hs_jack = hs_jack;
+
+ if (!hs_jack) {
+ regmap_update_bits(rt5682->regmap, RT5682_IRQ_CTRL_2,
+ RT5682_JD1_EN_MASK, RT5682_JD1_DIS);
+ regmap_update_bits(rt5682->regmap, RT5682_RC_CLK_CTRL,
+ RT5682_POW_JDH | RT5682_POW_JDL, 0);
+ return 0;
+ }
+
switch (rt5682->pdata.jd_src) {
case RT5682_JD1:
snd_soc_component_update_bits(component, RT5682_CBJ_CTRL_2,
break;
}
- rt5682->hs_jack = hs_jack;
-
return 0;
}
static SOC_ENUM_SINGLE_DECL(adc_osr,
WM8994_OVERSAMPLING, 1, osr_text);
-static const struct snd_kcontrol_new wm8994_snd_controls[] = {
+static const struct snd_kcontrol_new wm8994_common_snd_controls[] = {
SOC_DOUBLE_R_TLV("AIF1ADC1 Volume", WM8994_AIF1_ADC1_LEFT_VOLUME,
WM8994_AIF1_ADC1_RIGHT_VOLUME,
1, 119, 0, digital_tlv),
-SOC_DOUBLE_R_TLV("AIF1ADC2 Volume", WM8994_AIF1_ADC2_LEFT_VOLUME,
- WM8994_AIF1_ADC2_RIGHT_VOLUME,
- 1, 119, 0, digital_tlv),
SOC_DOUBLE_R_TLV("AIF2ADC Volume", WM8994_AIF2_ADC_LEFT_VOLUME,
WM8994_AIF2_ADC_RIGHT_VOLUME,
1, 119, 0, digital_tlv),
SOC_DOUBLE_R_TLV("AIF1DAC1 Volume", WM8994_AIF1_DAC1_LEFT_VOLUME,
WM8994_AIF1_DAC1_RIGHT_VOLUME, 1, 96, 0, digital_tlv),
-SOC_DOUBLE_R_TLV("AIF1DAC2 Volume", WM8994_AIF1_DAC2_LEFT_VOLUME,
- WM8994_AIF1_DAC2_RIGHT_VOLUME, 1, 96, 0, digital_tlv),
SOC_DOUBLE_R_TLV("AIF2DAC Volume", WM8994_AIF2_DAC_LEFT_VOLUME,
WM8994_AIF2_DAC_RIGHT_VOLUME, 1, 96, 0, digital_tlv),
SOC_SINGLE_TLV("AIF2 Boost Volume", WM8994_AIF2_CONTROL_2, 10, 3, 0, aif_tlv),
SOC_SINGLE("AIF1DAC1 EQ Switch", WM8994_AIF1_DAC1_EQ_GAINS_1, 0, 1, 0),
-SOC_SINGLE("AIF1DAC2 EQ Switch", WM8994_AIF1_DAC2_EQ_GAINS_1, 0, 1, 0),
SOC_SINGLE("AIF2 EQ Switch", WM8994_AIF2_EQ_GAINS_1, 0, 1, 0),
WM8994_DRC_SWITCH("AIF1DAC1 DRC Switch", WM8994_AIF1_DRC1_1, 2),
WM8994_DRC_SWITCH("AIF1ADC1L DRC Switch", WM8994_AIF1_DRC1_1, 1),
WM8994_DRC_SWITCH("AIF1ADC1R DRC Switch", WM8994_AIF1_DRC1_1, 0),
-WM8994_DRC_SWITCH("AIF1DAC2 DRC Switch", WM8994_AIF1_DRC2_1, 2),
-WM8994_DRC_SWITCH("AIF1ADC2L DRC Switch", WM8994_AIF1_DRC2_1, 1),
-WM8994_DRC_SWITCH("AIF1ADC2R DRC Switch", WM8994_AIF1_DRC2_1, 0),
-
WM8994_DRC_SWITCH("AIF2DAC DRC Switch", WM8994_AIF2_DRC_1, 2),
WM8994_DRC_SWITCH("AIF2ADCL DRC Switch", WM8994_AIF2_DRC_1, 1),
WM8994_DRC_SWITCH("AIF2ADCR DRC Switch", WM8994_AIF2_DRC_1, 0),
SOC_ENUM("AIF1ADC1 HPF Mode", aif1adc1_hpf),
SOC_DOUBLE("AIF1ADC1 HPF Switch", WM8994_AIF1_ADC1_FILTERS, 12, 11, 1, 0),
-SOC_ENUM("AIF1ADC2 HPF Mode", aif1adc2_hpf),
-SOC_DOUBLE("AIF1ADC2 HPF Switch", WM8994_AIF1_ADC2_FILTERS, 12, 11, 1, 0),
-
SOC_ENUM("AIF2ADC HPF Mode", aif2adc_hpf),
SOC_DOUBLE("AIF2ADC HPF Switch", WM8994_AIF2_ADC_FILTERS, 12, 11, 1, 0),
8, 1, 0),
};
+/* Controls not available on WM1811 */
+static const struct snd_kcontrol_new wm8994_snd_controls[] = {
+SOC_DOUBLE_R_TLV("AIF1ADC2 Volume", WM8994_AIF1_ADC2_LEFT_VOLUME,
+ WM8994_AIF1_ADC2_RIGHT_VOLUME,
+ 1, 119, 0, digital_tlv),
+SOC_DOUBLE_R_TLV("AIF1DAC2 Volume", WM8994_AIF1_DAC2_LEFT_VOLUME,
+ WM8994_AIF1_DAC2_RIGHT_VOLUME, 1, 96, 0, digital_tlv),
+
+SOC_SINGLE("AIF1DAC2 EQ Switch", WM8994_AIF1_DAC2_EQ_GAINS_1, 0, 1, 0),
+
+WM8994_DRC_SWITCH("AIF1DAC2 DRC Switch", WM8994_AIF1_DRC2_1, 2),
+WM8994_DRC_SWITCH("AIF1ADC2L DRC Switch", WM8994_AIF1_DRC2_1, 1),
+WM8994_DRC_SWITCH("AIF1ADC2R DRC Switch", WM8994_AIF1_DRC2_1, 0),
+
+SOC_ENUM("AIF1ADC2 HPF Mode", aif1adc2_hpf),
+SOC_DOUBLE("AIF1ADC2 HPF Switch", WM8994_AIF1_ADC2_FILTERS, 12, 11, 1, 0),
+};
+
static const struct snd_kcontrol_new wm8994_eq_controls[] = {
SOC_SINGLE_TLV("AIF1DAC1 EQ1 Volume", WM8994_AIF1_DAC1_EQ_GAINS_1, 11, 31, 0,
eq_tlv),
wm8994_handle_pdata(wm8994);
wm_hubs_add_analogue_controls(component);
- snd_soc_add_component_controls(component, wm8994_snd_controls,
- ARRAY_SIZE(wm8994_snd_controls));
+ snd_soc_add_component_controls(component, wm8994_common_snd_controls,
+ ARRAY_SIZE(wm8994_common_snd_controls));
snd_soc_dapm_new_controls(dapm, wm8994_dapm_widgets,
ARRAY_SIZE(wm8994_dapm_widgets));
switch (control->type) {
case WM8994:
+ snd_soc_add_component_controls(component, wm8994_snd_controls,
+ ARRAY_SIZE(wm8994_snd_controls));
snd_soc_dapm_new_controls(dapm, wm8994_specific_dapm_widgets,
ARRAY_SIZE(wm8994_specific_dapm_widgets));
if (control->revision < 4) {
}
break;
case WM8958:
+ snd_soc_add_component_controls(component, wm8994_snd_controls,
+ ARRAY_SIZE(wm8994_snd_controls));
snd_soc_add_component_controls(component, wm8958_snd_controls,
- ARRAY_SIZE(wm8958_snd_controls));
+ ARRAY_SIZE(wm8958_snd_controls));
snd_soc_dapm_new_controls(dapm, wm8958_dapm_widgets,
ARRAY_SIZE(wm8958_dapm_widgets));
if (control->revision < 1) {
}
if (in) {
- if (in & WMFW_CTL_FLAG_READABLE)
- out |= rd;
+ out |= rd;
if (in & WMFW_CTL_FLAG_WRITEABLE)
out |= wr;
if (in & WMFW_CTL_FLAG_VOLATILE)
u32 xmalg, addr, magic;
int i, ret;
+ alg_region = wm_adsp_find_alg_region(dsp, WMFW_ADSP2_XM, dsp->fw_id);
+ if (!alg_region) {
+ adsp_err(dsp, "No algorithm region found\n");
+ return -EINVAL;
+ }
+
buf = wm_adsp_buffer_alloc(dsp);
if (!buf)
return -ENOMEM;
- alg_region = wm_adsp_find_alg_region(dsp, WMFW_ADSP2_XM, dsp->fw_id);
xmalg = dsp->ops->sys_config_size / sizeof(__be32);
addr = alg_region->base + xmalg + ALG_XM_FIELD(magic);
SND_SOC_DAPM_HP("Headphone Jack", NULL),
SND_SOC_DAPM_MIC("Headset Mic", NULL),
SND_SOC_DAPM_SPK("Spk", NULL),
+};
+
+static const struct snd_soc_dapm_widget dmic_widgets[] = {
SND_SOC_DAPM_MIC("SoC DMIC", NULL),
};
/* other jacks */
{ "IN1P", NULL, "Headset Mic" },
-
- /* digital mics */
- {"DMic", NULL, "SoC DMIC"},
-
};
static const struct snd_soc_dapm_route speaker_map[] = {
{ "Spk", NULL, "Speaker" },
};
+static const struct snd_soc_dapm_route dmic_map[] = {
+ /* digital mics */
+ {"DMic", NULL, "SoC DMIC"},
+};
+
static int speaker_codec_init(struct snd_soc_pcm_runtime *rtd)
{
struct snd_soc_card *card = rtd->card;
return ret;
}
+static int dmic_init(struct snd_soc_pcm_runtime *rtd)
+{
+ struct snd_soc_card *card = rtd->card;
+ int ret;
+
+ ret = snd_soc_dapm_new_controls(&card->dapm, dmic_widgets,
+ ARRAY_SIZE(dmic_widgets));
+ if (ret) {
+ dev_err(card->dev, "DMic widget addition failed: %d\n", ret);
+ /* Don't need to add routes if widget addition failed */
+ return ret;
+ }
+
+ ret = snd_soc_dapm_add_routes(&card->dapm, dmic_map,
+ ARRAY_SIZE(dmic_map));
+
+ if (ret)
+ dev_err(card->dev, "DMic map addition failed: %d\n", ret);
+
+ return ret;
+}
+
/* sof audio machine driver for rt5682 codec */
static struct snd_soc_card sof_audio_card_rt5682 = {
.name = "sof_rt5682",
links[id].name = "dmic01";
links[id].cpus = &cpus[id];
links[id].cpus->dai_name = "DMIC01 Pin";
+ links[id].init = dmic_init;
if (dmic_be_num > 1) {
/* set up 2 BE links at most */
links[id + 1].name = "dmic16k";
/* need to get main clock from pmc */
if (sof_rt5682_quirk & SOF_RT5682_MCLK_BYTCHT_EN) {
ctx->mclk = devm_clk_get(&pdev->dev, "pmc_plt_clk_3");
+ if (IS_ERR(ctx->mclk)) {
+ ret = PTR_ERR(ctx->mclk);
+
+ dev_err(&pdev->dev,
+ "Failed to get MCLK from pmc_plt_clk_3: %d\n",
+ ret);
+ return ret;
+ }
+
ret = clk_prepare_enable(ctx->mclk);
if (ret < 0) {
dev_err(&pdev->dev,
&sof_audio_card_rt5682);
}
+static int sof_rt5682_remove(struct platform_device *pdev)
+{
+ struct snd_soc_card *card = platform_get_drvdata(pdev);
+ struct snd_soc_component *component = NULL;
+
+ for_each_card_components(card, component) {
+ if (!strcmp(component->name, rt5682_component[0].name)) {
+ snd_soc_component_set_jack(component, NULL, NULL);
+ break;
+ }
+ }
+
+ return 0;
+}
+
static struct platform_driver sof_audio = {
.probe = sof_audio_probe,
+ .remove = sof_rt5682_remove,
.driver = {
.name = "sof_rt5682",
.pm = &snd_soc_pm_ops,
ret = rockchip_pcm_platform_register(&pdev->dev);
if (ret) {
dev_err(&pdev->dev, "Could not register PCM\n");
- return ret;
+ goto err_suspend;
}
return 0;
// Author: Claude <claude@insginal.co.kr>
#include <linux/module.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
.num_links = ARRAY_SIZE(arndale_rt5631_dai),
};
+static void arndale_put_of_nodes(struct snd_soc_card *card)
+{
+ struct snd_soc_dai_link *dai_link;
+ int i;
+
+ for_each_card_prelinks(card, i, dai_link) {
+ of_node_put(dai_link->cpus->of_node);
+ of_node_put(dai_link->codecs->of_node);
+ }
+}
+
static int arndale_audio_probe(struct platform_device *pdev)
{
int n, ret;
if (!arndale_rt5631_dai[0].codecs->of_node) {
dev_err(&pdev->dev,
"Property 'samsung,audio-codec' missing or invalid\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_put_of_nodes;
}
}
ret = devm_snd_soc_register_card(card->dev, card);
+ if (ret) {
+ dev_err(&pdev->dev, "snd_soc_register_card() failed: %d\n", ret);
+ goto err_put_of_nodes;
+ }
+ return 0;
- if (ret)
- dev_err(&pdev->dev, "snd_soc_register_card() failed:%d\n", ret);
-
+err_put_of_nodes:
+ arndale_put_of_nodes(card);
return ret;
}
+static int arndale_audio_remove(struct platform_device *pdev)
+{
+ struct snd_soc_card *card = platform_get_drvdata(pdev);
+
+ arndale_put_of_nodes(card);
+ return 0;
+}
+
static const struct of_device_id samsung_arndale_rt5631_of_match[] __maybe_unused = {
{ .compatible = "samsung,arndale-rt5631", },
{ .compatible = "samsung,arndale-alc5631", },
.of_match_table = of_match_ptr(samsung_arndale_rt5631_of_match),
},
.probe = arndale_audio_probe,
+ .remove = arndale_audio_remove,
};
module_platform_driver(arndale_audio_driver);
}
/* set format */
+ rdai->bit_clk_inv = 0;
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_I2S:
rdai->sys_delay = 0;
return ret;
}
- snd_soc_dai_trigger(cpu_dai, substream, cmd);
+ ret = snd_soc_dai_trigger(cpu_dai, substream, cmd);
if (ret < 0)
return ret;
return ret;
}
- snd_soc_dai_bespoke_trigger(cpu_dai, substream, cmd);
+ ret = snd_soc_dai_bespoke_trigger(cpu_dai, substream, cmd);
if (ret < 0)
return ret;
{
struct snd_soc_dpcm *dpcm;
unsigned long flags;
+ char *name;
/* only add new dpcms */
for_each_dpcm_be(fe, stream, dpcm) {
stream ? "<-" : "->", be->dai_link->name);
#ifdef CONFIG_DEBUG_FS
- dpcm->debugfs_state = debugfs_create_dir(be->dai_link->name,
- fe->debugfs_dpcm_root);
- debugfs_create_u32("state", 0644, dpcm->debugfs_state, &dpcm->state);
+ name = kasprintf(GFP_KERNEL, "%s:%s", be->dai_link->name,
+ stream ? "capture" : "playback");
+ if (name) {
+ dpcm->debugfs_state = debugfs_create_dir(name,
+ fe->debugfs_dpcm_root);
+ debugfs_create_u32("state", 0644, dpcm->debugfs_state,
+ &dpcm->state);
+ kfree(name);
+ }
#endif
return 1;
}
/* map user to kernel widget ID */
template.id = get_widget_id(le32_to_cpu(w->id));
- if (template.id < 0)
+ if ((int)template.id < 0)
return template.id;
/* strings are allocated here, but used and freed by the widget */
struct snd_sof_dev *sdev = scontrol->sdev;
struct sof_ipc_ctrl_data *cdata = scontrol->control_data;
unsigned int i, channels = scontrol->num_channels;
+ bool change = false;
+ u32 value;
/* update each channel */
for (i = 0; i < channels; i++) {
- cdata->chanv[i].value =
- mixer_to_ipc(ucontrol->value.integer.value[i],
+ value = mixer_to_ipc(ucontrol->value.integer.value[i],
scontrol->volume_table, sm->max + 1);
+ change = change || (value != cdata->chanv[i].value);
cdata->chanv[i].channel = i;
+ cdata->chanv[i].value = value;
}
/* notify DSP of mixer updates */
SOF_CTRL_TYPE_VALUE_CHAN_GET,
SOF_CTRL_CMD_VOLUME,
true);
-
- return 0;
+ return change;
}
int snd_sof_switch_get(struct snd_kcontrol *kcontrol,
struct snd_sof_dev *sdev = scontrol->sdev;
struct sof_ipc_ctrl_data *cdata = scontrol->control_data;
unsigned int i, channels = scontrol->num_channels;
+ bool change = false;
+ u32 value;
/* update each channel */
for (i = 0; i < channels; i++) {
- cdata->chanv[i].value = ucontrol->value.integer.value[i];
+ value = ucontrol->value.integer.value[i];
+ change = change || (value != cdata->chanv[i].value);
cdata->chanv[i].channel = i;
+ cdata->chanv[i].value = value;
}
/* notify DSP of mixer updates */
SOF_CTRL_CMD_SWITCH,
true);
- return 0;
+ return change;
}
int snd_sof_enum_get(struct snd_kcontrol *kcontrol,
struct snd_sof_dev *sdev = scontrol->sdev;
struct sof_ipc_ctrl_data *cdata = scontrol->control_data;
unsigned int i, channels = scontrol->num_channels;
+ bool change = false;
+ u32 value;
/* update each channel */
for (i = 0; i < channels; i++) {
- cdata->chanv[i].value = ucontrol->value.enumerated.item[i];
+ value = ucontrol->value.enumerated.item[i];
+ change = change || (value != cdata->chanv[i].value);
cdata->chanv[i].channel = i;
+ cdata->chanv[i].value = value;
}
/* notify DSP of enum updates */
SOF_CTRL_CMD_ENUM,
true);
- return 0;
+ return change;
}
int snd_sof_bytes_get(struct snd_kcontrol *kcontrol,
Say Y if you want to enable HDAudio codecs with SOF.
If unsure select "N".
+config SND_SOC_SOF_HDA_ALWAYS_ENABLE_DMI_L1
+ bool "SOF enable DMI Link L1"
+ help
+ This option enables DMI L1 for both playback and capture
+ and disables known workarounds for specific HDaudio platforms.
+ Only use to look into power optimizations on platforms not
+ affected by DMI L1 issues. This option is not recommended.
+ Say Y if you want to enable DMI Link L1
+ If unsure, select "N".
+
endif ## SND_SOC_SOF_HDA_COMMON
config SND_SOC_SOF_HDA_LINK_BASELINE
#define MBOX_SIZE 0x1000
#define MBOX_DUMP_SIZE 0x30
#define EXCEPT_OFFSET 0x800
+#define EXCEPT_MAX_HDR_SIZE 0x400
/* DSP peripherals */
#define DMAC0_OFFSET 0xFE000
/* note: variable AR register array is not read */
/* then get panic info */
+ if (xoops->arch_hdr.totalsize > EXCEPT_MAX_HDR_SIZE) {
+ dev_err(sdev->dev, "invalid header size 0x%x. FW oops is bogus\n",
+ xoops->arch_hdr.totalsize);
+ return;
+ }
offset += xoops->arch_hdr.totalsize;
sof_mailbox_read(sdev, offset, panic_info, sizeof(*panic_info));
/* TODO: add offsets */
sdev->mmio_bar = BDW_DSP_BAR;
sdev->mailbox_bar = BDW_DSP_BAR;
+ sdev->dsp_oops_offset = MBOX_OFFSET;
/* PCI base */
mmio = platform_get_resource(pdev, IORESOURCE_MEM,
#define MBOX_OFFSET 0x144000
#define MBOX_SIZE 0x1000
#define EXCEPT_OFFSET 0x800
+#define EXCEPT_MAX_HDR_SIZE 0x400
/* DSP peripherals */
#define DMAC0_OFFSET 0x098000
/* note: variable AR register array is not read */
/* then get panic info */
+ if (xoops->arch_hdr.totalsize > EXCEPT_MAX_HDR_SIZE) {
+ dev_err(sdev->dev, "invalid header size 0x%x. FW oops is bogus\n",
+ xoops->arch_hdr.totalsize);
+ return;
+ }
offset += xoops->arch_hdr.totalsize;
sof_mailbox_read(sdev, offset, panic_info, sizeof(*panic_info));
*/
int hda_dsp_ctrl_clock_power_gating(struct snd_sof_dev *sdev, bool enable)
{
-#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
- struct hdac_bus *bus = sof_to_bus(sdev);
-#endif
u32 val;
/* enable/disable audio dsp clock gating */
val = enable ? PCI_CGCTL_ADSPDCGE : 0;
snd_sof_pci_update_bits(sdev, PCI_CGCTL, PCI_CGCTL_ADSPDCGE, val);
-#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
- /* enable/disable L1 support */
- val = enable ? SOF_HDA_VS_EM2_L1SEN : 0;
- snd_hdac_chip_updatel(bus, VS_EM2, SOF_HDA_VS_EM2_L1SEN, val);
-#endif
+ /* enable/disable DMI Link L1 support */
+ val = enable ? HDA_VS_INTEL_EM2_L1SEN : 0;
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, HDA_VS_INTEL_EM2,
+ HDA_VS_INTEL_EM2_L1SEN, val);
/* enable/disable audio dsp power gating */
val = enable ? 0 : PCI_PGCTL_ADSPPGD;
return -ENODEV;
}
hstream = &dsp_stream->hstream;
+ hstream->substream = NULL;
/* allocate DMA buffer */
ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV_SG, &pci->dev, size, dmab);
direction == SNDRV_PCM_STREAM_PLAYBACK ?
"playback" : "capture");
+ /*
+ * Disable DMI Link L1 entry when capture stream is opened.
+ * Workaround to address a known issue with host DMA that results
+ * in xruns during pause/release in capture scenarios.
+ */
+ if (!IS_ENABLED(SND_SOC_SOF_HDA_ALWAYS_ENABLE_DMI_L1))
+ if (stream && direction == SNDRV_PCM_STREAM_CAPTURE)
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
+ HDA_VS_INTEL_EM2,
+ HDA_VS_INTEL_EM2_L1SEN, 0);
+
return stream;
}
{
struct hdac_bus *bus = sof_to_bus(sdev);
struct hdac_stream *s;
+ bool active_capture_stream = false;
+ bool found = false;
spin_lock_irq(&bus->reg_lock);
- /* find used stream */
+ /*
+ * close stream matching the stream tag
+ * and check if there are any open capture streams.
+ */
list_for_each_entry(s, &bus->stream_list, list) {
- if (s->direction == direction &&
- s->opened && s->stream_tag == stream_tag) {
+ if (!s->opened)
+ continue;
+
+ if (s->direction == direction && s->stream_tag == stream_tag) {
s->opened = false;
- spin_unlock_irq(&bus->reg_lock);
- return 0;
+ found = true;
+ } else if (s->direction == SNDRV_PCM_STREAM_CAPTURE) {
+ active_capture_stream = true;
}
}
spin_unlock_irq(&bus->reg_lock);
- dev_dbg(sdev->dev, "stream_tag %d not opened!\n", stream_tag);
- return -ENODEV;
+ /* Enable DMI L1 entry if there are no capture streams open */
+ if (!IS_ENABLED(SND_SOC_SOF_HDA_ALWAYS_ENABLE_DMI_L1))
+ if (!active_capture_stream)
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
+ HDA_VS_INTEL_EM2,
+ HDA_VS_INTEL_EM2_L1SEN,
+ HDA_VS_INTEL_EM2_L1SEN);
+
+ if (!found) {
+ dev_dbg(sdev->dev, "stream_tag %d not opened!\n", stream_tag);
+ return -ENODEV;
+ }
+
+ return 0;
}
int hda_dsp_stream_trigger(struct snd_sof_dev *sdev,
#define IS_CFL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa348)
#define IS_CNL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9dc8)
+#define EXCEPT_MAX_HDR_SIZE 0x400
+
/*
* Debug
*/
/* note: variable AR register array is not read */
/* then get panic info */
+ if (xoops->arch_hdr.totalsize > EXCEPT_MAX_HDR_SIZE) {
+ dev_err(sdev->dev, "invalid header size 0x%x. FW oops is bogus\n",
+ xoops->arch_hdr.totalsize);
+ return;
+ }
offset += xoops->arch_hdr.totalsize;
sof_block_read(sdev, sdev->mmio_bar, offset,
panic_info, sizeof(*panic_info));
#define SOF_HDA_WAKESTS 0x0E
#define SOF_HDA_WAKESTS_INT_MASK ((1 << 8) - 1)
#define SOF_HDA_RIRBSTS 0x5d
-#define SOF_HDA_VS_EM2_L1SEN BIT(13)
/* SOF_HDA_GCTL register bist */
#define SOF_HDA_GCTL_RESET BIT(0)
#define HDA_DSP_REG_HIPCIE (HDA_DSP_IPC_BASE + 0x0C)
#define HDA_DSP_REG_HIPCCTL (HDA_DSP_IPC_BASE + 0x10)
+/* Intel Vendor Specific Registers */
+#define HDA_VS_INTEL_EM2 0x1030
+#define HDA_VS_INTEL_EM2_L1SEN BIT(13)
+
/* HIPCI */
#define HDA_DSP_REG_HIPCI_BUSY BIT(31)
#define HDA_DSP_REG_HIPCI_MSG_MASK 0x7FFFFFFF
msecs_to_jiffies(sdev->boot_timeout));
if (ret == 0) {
dev_err(sdev->dev, "error: firmware boot failure\n");
- /* after this point FW_READY msg should be ignored */
- sdev->boot_complete = true;
snd_sof_dsp_dbg_dump(sdev, SOF_DBG_REGS | SOF_DBG_MBOX |
SOF_DBG_TEXT | SOF_DBG_PCI);
+ /* after this point FW_READY msg should be ignored */
+ sdev->boot_complete = true;
return -EIO;
}
snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(component);
struct snd_sof_pcm *spcm;
- int ret;
+ int ret, err = 0;
/* nothing to do for BE */
if (rtd->dai_link->no_pcm)
if (!spcm)
return -EINVAL;
- if (!spcm->prepared[substream->stream])
- return 0;
-
dev_dbg(sdev->dev, "pcm: free stream %d dir %d\n", spcm->pcm.pcm_id,
substream->stream);
- ret = sof_pcm_dsp_pcm_free(substream, sdev, spcm);
+ if (spcm->prepared[substream->stream]) {
+ ret = sof_pcm_dsp_pcm_free(substream, sdev, spcm);
+ if (ret < 0)
+ err = ret;
+ }
snd_pcm_lib_free_pages(substream);
cancel_work_sync(&spcm->stream[substream->stream].period_elapsed_work);
- if (ret < 0)
- return ret;
-
ret = snd_sof_pcm_platform_hw_free(sdev, substream);
- if (ret < 0)
+ if (ret < 0) {
dev_err(sdev->dev, "error: platform hw free failed\n");
+ err = ret;
+ }
- return ret;
+ return err;
}
static int sof_pcm_prepare(struct snd_pcm_substream *substream)
struct sof_ipc_stream stream;
struct sof_ipc_reply reply;
bool reset_hw_params = false;
+ bool ipc_first = false;
int ret;
/* nothing to do for BE */
switch (cmd) {
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
stream.hdr.cmd |= SOF_IPC_STREAM_TRIG_PAUSE;
+ ipc_first = true;
break;
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
stream.hdr.cmd |= SOF_IPC_STREAM_TRIG_RELEASE;
case SNDRV_PCM_TRIGGER_SUSPEND:
case SNDRV_PCM_TRIGGER_STOP:
stream.hdr.cmd |= SOF_IPC_STREAM_TRIG_STOP;
+ ipc_first = true;
reset_hw_params = true;
break;
default:
return -EINVAL;
}
- snd_sof_pcm_platform_trigger(sdev, substream, cmd);
+ /*
+ * DMA and IPC sequence is different for start and stop. Need to send
+ * STOP IPC before stop DMA
+ */
+ if (!ipc_first)
+ snd_sof_pcm_platform_trigger(sdev, substream, cmd);
/* send IPC to the DSP */
ret = sof_ipc_tx_message(sdev->ipc, stream.hdr.cmd, &stream,
sizeof(stream), &reply, sizeof(reply));
+ /* need to STOP DMA even if STOP IPC failed */
+ if (ipc_first)
+ snd_sof_pcm_platform_trigger(sdev, substream, cmd);
+
+ /* free PCM if reset_hw_params is set and the STOP IPC is successful */
if (!ret && reset_hw_params)
ret = sof_pcm_dsp_pcm_free(substream, sdev, spcm);
for (j = 0; j < count; j++) {
/* match token type */
if (!(tokens[j].type == SND_SOC_TPLG_TUPLE_TYPE_WORD ||
- tokens[j].type == SND_SOC_TPLG_TUPLE_TYPE_SHORT))
+ tokens[j].type == SND_SOC_TPLG_TUPLE_TYPE_SHORT ||
+ tokens[j].type == SND_SOC_TPLG_TUPLE_TYPE_BYTE ||
+ tokens[j].type == SND_SOC_TPLG_TUPLE_TYPE_BOOL))
continue;
/* match token id */
if (dir == SND_SOC_CLOCK_OUT && sai->sai_mclk) {
ret = regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX,
SAI_XCR1_NODIV,
- (unsigned int)~SAI_XCR1_NODIV);
+ freq ? 0 : SAI_XCR1_NODIV);
if (ret < 0)
return ret;
+ /* Assume shutdown if requested frequency is 0Hz */
+ if (!freq) {
+ /* Release mclk rate only if rate was actually set */
+ if (sai->mclk_rate) {
+ clk_rate_exclusive_put(sai->sai_mclk);
+ sai->mclk_rate = 0;
+ }
+ return 0;
+ }
+
/* If master clock is used, set parent clock now */
ret = stm32_sai_set_parent_clock(sai, freq);
if (ret)
regmap_update_bits(sai->regmap, STM_SAI_IMR_REGX, SAI_XIMR_MASK, 0);
- regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX, SAI_XCR1_NODIV,
- SAI_XCR1_NODIV);
-
- /* Release mclk rate only if rate was actually set */
- if (sai->mclk_rate) {
- clk_rate_exclusive_put(sai->sai_mclk);
- sai->mclk_rate = 0;
- }
-
clk_disable_unprepare(sai->sai_ck);
spin_lock_irqsave(&sai->irq_lock, flags);
case 0x23ba: /* Playback Designs */
case 0x25ce: /* Mytek devices */
case 0x278b: /* Rotel? */
+ case 0x292b: /* Gustard/Ess based devices */
case 0x2ab6: /* T+A devices */
case 0x3842: /* EVGA */
case 0xc502: /* HiBy devices */
if (d->bLength < sizeof(*d))
return false;
- len = d->bLength < sizeof(*d) + d->bNrInPins;
+ len = sizeof(*d) + d->bNrInPins;
if (d->bLength < len)
return false;
switch (v->protocol) {
#define SVM_EXIT_MWAIT 0x08b
#define SVM_EXIT_MWAIT_COND 0x08c
#define SVM_EXIT_XSETBV 0x08d
+#define SVM_EXIT_RDPRU 0x08e
#define SVM_EXIT_NPF 0x400
#define SVM_EXIT_AVIC_INCOMPLETE_IPI 0x401
#define SVM_EXIT_AVIC_UNACCELERATED_ACCESS 0x402
#define EXIT_REASON_PML_FULL 62
#define EXIT_REASON_XSAVES 63
#define EXIT_REASON_XRSTORS 64
+#define EXIT_REASON_UMWAIT 67
+#define EXIT_REASON_TPAUSE 68
#define VMX_EXIT_REASONS \
{ EXIT_REASON_EXCEPTION_NMI, "EXCEPTION_NMI" }, \
{ EXIT_REASON_RDSEED, "RDSEED" }, \
{ EXIT_REASON_PML_FULL, "PML_FULL" }, \
{ EXIT_REASON_XSAVES, "XSAVES" }, \
- { EXIT_REASON_XRSTORS, "XRSTORS" }
+ { EXIT_REASON_XRSTORS, "XRSTORS" }, \
+ { EXIT_REASON_UMWAIT, "UMWAIT" }, \
+ { EXIT_REASON_TPAUSE, "TPAUSE" }
#define VMX_ABORT_SAVE_GUEST_MSR_FAIL 1
#define VMX_ABORT_LOAD_HOST_PDPTE_FAIL 2
BPF_PROG_TYPE_CGROUP_SYSCTL,
BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE,
BPF_PROG_TYPE_CGROUP_SOCKOPT,
+ BPF_PROG_TYPE_TRACING,
};
enum bpf_attach_type {
BPF_CGROUP_UDP6_RECVMSG,
BPF_CGROUP_GETSOCKOPT,
BPF_CGROUP_SETSOCKOPT,
+ BPF_TRACE_RAW_TP,
__MAX_BPF_ATTACH_TYPE
};
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_probe_read(void *dst, u32 size, const void *src)
+ * int bpf_probe_read(void *dst, u32 size, const void *unsafe_ptr)
* Description
* For tracing programs, safely attempt to read *size* bytes from
- * address *src* and store the data in *dst*.
+ * kernel space address *unsafe_ptr* and store the data in *dst*.
+ *
+ * Generally, use bpf_probe_read_user() or bpf_probe_read_kernel()
+ * instead.
* Return
* 0 on success, or a negative error in case of failure.
*
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_probe_read_str(void *dst, int size, const void *unsafe_ptr)
+ * int bpf_probe_read_str(void *dst, u32 size, const void *unsafe_ptr)
* Description
- * Copy a NUL terminated string from an unsafe address
- * *unsafe_ptr* to *dst*. The *size* should include the
- * terminating NUL byte. In case the string length is smaller than
- * *size*, the target is not padded with further NUL bytes. If the
- * string length is larger than *size*, just *size*-1 bytes are
- * copied and the last byte is set to NUL.
- *
- * On success, the length of the copied string is returned. This
- * makes this helper useful in tracing programs for reading
- * strings, and more importantly to get its length at runtime. See
- * the following snippet:
- *
- * ::
- *
- * SEC("kprobe/sys_open")
- * void bpf_sys_open(struct pt_regs *ctx)
- * {
- * char buf[PATHLEN]; // PATHLEN is defined to 256
- * int res = bpf_probe_read_str(buf, sizeof(buf),
- * ctx->di);
- *
- * // Consume buf, for example push it to
- * // userspace via bpf_perf_event_output(); we
- * // can use res (the string length) as event
- * // size, after checking its boundaries.
- * }
- *
- * In comparison, using **bpf_probe_read()** helper here instead
- * to read the string would require to estimate the length at
- * compile time, and would often result in copying more memory
- * than necessary.
+ * Copy a NUL terminated string from an unsafe kernel address
+ * *unsafe_ptr* to *dst*. See bpf_probe_read_kernel_str() for
+ * more details.
*
- * Another useful use case is when parsing individual process
- * arguments or individual environment variables navigating
- * *current*\ **->mm->arg_start** and *current*\
- * **->mm->env_start**: using this helper and the return value,
- * one can quickly iterate at the right offset of the memory area.
+ * Generally, use bpf_probe_read_user_str() or bpf_probe_read_kernel_str()
+ * instead.
* Return
* On success, the strictly positive length of the string,
* including the trailing NUL character. On error, a negative
* restricted to raw_tracepoint bpf programs.
* Return
* 0 on success, or a negative error in case of failure.
+ *
+ * int bpf_probe_read_user(void *dst, u32 size, const void *unsafe_ptr)
+ * Description
+ * Safely attempt to read *size* bytes from user space address
+ * *unsafe_ptr* and store the data in *dst*.
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
+ * int bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr)
+ * Description
+ * Safely attempt to read *size* bytes from kernel space address
+ * *unsafe_ptr* and store the data in *dst*.
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
+ * int bpf_probe_read_user_str(void *dst, u32 size, const void *unsafe_ptr)
+ * Description
+ * Copy a NUL terminated string from an unsafe user address
+ * *unsafe_ptr* to *dst*. The *size* should include the
+ * terminating NUL byte. In case the string length is smaller than
+ * *size*, the target is not padded with further NUL bytes. If the
+ * string length is larger than *size*, just *size*-1 bytes are
+ * copied and the last byte is set to NUL.
+ *
+ * On success, the length of the copied string is returned. This
+ * makes this helper useful in tracing programs for reading
+ * strings, and more importantly to get its length at runtime. See
+ * the following snippet:
+ *
+ * ::
+ *
+ * SEC("kprobe/sys_open")
+ * void bpf_sys_open(struct pt_regs *ctx)
+ * {
+ * char buf[PATHLEN]; // PATHLEN is defined to 256
+ * int res = bpf_probe_read_user_str(buf, sizeof(buf),
+ * ctx->di);
+ *
+ * // Consume buf, for example push it to
+ * // userspace via bpf_perf_event_output(); we
+ * // can use res (the string length) as event
+ * // size, after checking its boundaries.
+ * }
+ *
+ * In comparison, using **bpf_probe_read_user()** helper here
+ * instead to read the string would require to estimate the length
+ * at compile time, and would often result in copying more memory
+ * than necessary.
+ *
+ * Another useful use case is when parsing individual process
+ * arguments or individual environment variables navigating
+ * *current*\ **->mm->arg_start** and *current*\
+ * **->mm->env_start**: using this helper and the return value,
+ * one can quickly iterate at the right offset of the memory area.
+ * Return
+ * On success, the strictly positive length of the string,
+ * including the trailing NUL character. On error, a negative
+ * value.
+ *
+ * int bpf_probe_read_kernel_str(void *dst, u32 size, const void *unsafe_ptr)
+ * Description
+ * Copy a NUL terminated string from an unsafe kernel address *unsafe_ptr*
+ * to *dst*. Same semantics as with bpf_probe_read_user_str() apply.
+ * Return
+ * On success, the strictly positive length of the string, including
+ * the trailing NUL character. On error, a negative value.
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
FN(sk_storage_delete), \
FN(send_signal), \
FN(tcp_gen_syncookie), \
- FN(skb_output),
+ FN(skb_output), \
+ FN(probe_read_user), \
+ FN(probe_read_kernel), \
+ FN(probe_read_user_str), \
+ FN(probe_read_kernel_str),
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
* function eBPF program intends to call
#define KVM_CAP_ARM_PTRAUTH_GENERIC 172
#define KVM_CAP_PMU_EVENT_FILTER 173
#define KVM_CAP_ARM_IRQ_LINE_LAYOUT_2 174
+#define KVM_CAP_HYPERV_DIRECT_TLBFLUSH 175
#ifdef KVM_CAP_IRQ_ROUTING
#define KVM_REG_S390 0x5000000000000000ULL
#define KVM_REG_ARM64 0x6000000000000000ULL
#define KVM_REG_MIPS 0x7000000000000000ULL
+#define KVM_REG_RISCV 0x8000000000000000ULL
#define KVM_REG_SIZE_SHIFT 52
#define KVM_REG_SIZE_MASK 0x00f0000000000000ULL
#define CLONE_NEWNET 0x40000000 /* New network namespace */
#define CLONE_IO 0x80000000 /* Clone io context */
-/*
- * Arguments for the clone3 syscall
+#ifndef __ASSEMBLY__
+/**
+ * struct clone_args - arguments for the clone3 syscall
+ * @flags: Flags for the new process as listed above.
+ * All flags are valid except for CSIGNAL and
+ * CLONE_DETACHED.
+ * @pidfd: If CLONE_PIDFD is set, a pidfd will be
+ * returned in this argument.
+ * @child_tid: If CLONE_CHILD_SETTID is set, the TID of the
+ * child process will be returned in the child's
+ * memory.
+ * @parent_tid: If CLONE_PARENT_SETTID is set, the TID of
+ * the child process will be returned in the
+ * parent's memory.
+ * @exit_signal: The exit_signal the parent process will be
+ * sent when the child exits.
+ * @stack: Specify the location of the stack for the
+ * child process.
+ * @stack_size: The size of the stack for the child process.
+ * @tls: If CLONE_SETTLS is set, the tls descriptor
+ * is set to tls.
+ *
+ * The structure is versioned by size and thus extensible.
+ * New struct members must go at the end of the struct and
+ * must be properly 64bit aligned.
*/
struct clone_args {
__aligned_u64 flags;
__aligned_u64 stack_size;
__aligned_u64 tls;
};
+#endif
+
+#define CLONE_ARGS_SIZE_VER0 64 /* sizeof first published struct */
/*
* Scheduling policies
memset(&attr, 0, sizeof(attr));
attr.prog_type = load_attr->prog_type;
attr.expected_attach_type = load_attr->expected_attach_type;
- if (attr.prog_type == BPF_PROG_TYPE_RAW_TRACEPOINT)
- /* expected_attach_type is ignored for tracing progs */
- attr.attach_btf_id = attr.expected_attach_type;
+ if (attr.prog_type == BPF_PROG_TYPE_TRACING)
+ attr.attach_btf_id = load_attr->attach_btf_id;
+ else
+ attr.prog_ifindex = load_attr->prog_ifindex;
attr.insn_cnt = (__u32)load_attr->insns_cnt;
attr.insns = ptr_to_u64(load_attr->insns);
attr.license = ptr_to_u64(load_attr->license);
}
attr.kern_version = load_attr->kern_version;
- attr.prog_ifindex = load_attr->prog_ifindex;
attr.prog_btf_fd = load_attr->prog_btf_fd;
attr.func_info_rec_size = load_attr->func_info_rec_size;
attr.func_info_cnt = load_attr->func_info_cnt;
size_t insns_cnt;
const char *license;
__u32 kern_version;
- __u32 prog_ifindex;
+ union {
+ __u32 prog_ifindex;
+ __u32 attach_btf_id;
+ };
__u32 prog_btf_fd;
__u32 func_info_rec_size;
const void *func_info;
unsigned int map_flags;
};
+enum libbpf_pin_type {
+ LIBBPF_PIN_NONE,
+ /* PIN_BY_NAME: pin maps by name (in /sys/fs/bpf by default) */
+ LIBBPF_PIN_BY_NAME,
+};
+
#endif
bpf_program_clear_priv_t clear_priv;
enum bpf_attach_type expected_attach_type;
+ __u32 attach_btf_id;
void *func_info;
__u32 func_info_rec_size;
__u32 func_info_cnt;
void *priv;
bpf_map_clear_priv_t clear_priv;
enum libbpf_map_type libbpf_type;
+ char *pin_path;
+ bool pinned;
};
struct bpf_secdata {
return true;
}
+static int build_map_pin_path(struct bpf_map *map, const char *path)
+{
+ char buf[PATH_MAX];
+ int err, len;
+
+ if (!path)
+ path = "/sys/fs/bpf";
+
+ len = snprintf(buf, PATH_MAX, "%s/%s", path, bpf_map__name(map));
+ if (len < 0)
+ return -EINVAL;
+ else if (len >= PATH_MAX)
+ return -ENAMETOOLONG;
+
+ err = bpf_map__set_pin_path(map, buf);
+ if (err)
+ return err;
+
+ return 0;
+}
+
static int bpf_object__init_user_btf_map(struct bpf_object *obj,
const struct btf_type *sec,
int var_idx, int sec_idx,
- const Elf_Data *data, bool strict)
+ const Elf_Data *data, bool strict,
+ const char *pin_root_path)
{
const struct btf_type *var, *def, *t;
const struct btf_var_secinfo *vi;
}
map->def.value_size = sz;
map->btf_value_type_id = t->type;
+ } else if (strcmp(name, "pinning") == 0) {
+ __u32 val;
+ int err;
+
+ if (!get_map_field_int(map_name, obj->btf, def, m,
+ &val))
+ return -EINVAL;
+ pr_debug("map '%s': found pinning = %u.\n",
+ map_name, val);
+
+ if (val != LIBBPF_PIN_NONE &&
+ val != LIBBPF_PIN_BY_NAME) {
+ pr_warn("map '%s': invalid pinning value %u.\n",
+ map_name, val);
+ return -EINVAL;
+ }
+ if (val == LIBBPF_PIN_BY_NAME) {
+ err = build_map_pin_path(map, pin_root_path);
+ if (err) {
+ pr_warn("map '%s': couldn't build pin path.\n",
+ map_name);
+ return err;
+ }
+ }
} else {
if (strict) {
pr_warn("map '%s': unknown field '%s'.\n",
return 0;
}
-static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict)
+static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict,
+ const char *pin_root_path)
{
const struct btf_type *sec = NULL;
int nr_types, i, vlen, err;
for (i = 0; i < vlen; i++) {
err = bpf_object__init_user_btf_map(obj, sec, i,
obj->efile.btf_maps_shndx,
- data, strict);
+ data, strict, pin_root_path);
if (err)
return err;
}
return 0;
}
-static int bpf_object__init_maps(struct bpf_object *obj, bool relaxed_maps)
+static int bpf_object__init_maps(struct bpf_object *obj, bool relaxed_maps,
+ const char *pin_root_path)
{
bool strict = !relaxed_maps;
int err;
if (err)
return err;
- err = bpf_object__init_user_btf_maps(obj, strict);
+ err = bpf_object__init_user_btf_maps(obj, strict, pin_root_path);
if (err)
return err;
return 0;
}
-static int bpf_object__elf_collect(struct bpf_object *obj, bool relaxed_maps)
+static int bpf_object__elf_collect(struct bpf_object *obj, bool relaxed_maps,
+ const char *pin_root_path)
{
Elf *elf = obj->efile.elf;
GElf_Ehdr *ep = &obj->efile.ehdr;
}
}
- if (!obj->efile.strtabidx || obj->efile.strtabidx >= idx) {
+ if (!obj->efile.strtabidx || obj->efile.strtabidx > idx) {
pr_warn("Corrupted ELF file: index of strtab invalid\n");
return -LIBBPF_ERRNO__FORMAT;
}
err = bpf_object__init_btf(obj, btf_data, btf_ext_data);
if (!err)
- err = bpf_object__init_maps(obj, relaxed_maps);
+ err = bpf_object__init_maps(obj, relaxed_maps, pin_root_path);
if (!err)
err = bpf_object__sanitize_and_load_btf(obj);
if (!err)
return -errno;
new_fd = open("/", O_RDONLY | O_CLOEXEC);
- if (new_fd < 0)
+ if (new_fd < 0) {
+ err = -errno;
goto err_free_new_name;
+ }
new_fd = dup3(fd, new_fd, O_CLOEXEC);
- if (new_fd < 0)
+ if (new_fd < 0) {
+ err = -errno;
goto err_close_new_fd;
+ }
err = zclose(map->fd);
- if (err)
+ if (err) {
+ err = -errno;
goto err_close_new_fd;
+ }
free(map->name);
map->fd = new_fd;
close(new_fd);
err_free_new_name:
free(new_name);
- return -errno;
+ return err;
}
int bpf_map__resize(struct bpf_map *map, __u32 max_entries)
return 0;
}
+static bool map_is_reuse_compat(const struct bpf_map *map, int map_fd)
+{
+ struct bpf_map_info map_info = {};
+ char msg[STRERR_BUFSIZE];
+ __u32 map_info_len;
+
+ map_info_len = sizeof(map_info);
+
+ if (bpf_obj_get_info_by_fd(map_fd, &map_info, &map_info_len)) {
+ pr_warn("failed to get map info for map FD %d: %s\n",
+ map_fd, libbpf_strerror_r(errno, msg, sizeof(msg)));
+ return false;
+ }
+
+ return (map_info.type == map->def.type &&
+ map_info.key_size == map->def.key_size &&
+ map_info.value_size == map->def.value_size &&
+ map_info.max_entries == map->def.max_entries &&
+ map_info.map_flags == map->def.map_flags);
+}
+
+static int
+bpf_object__reuse_map(struct bpf_map *map)
+{
+ char *cp, errmsg[STRERR_BUFSIZE];
+ int err, pin_fd;
+
+ pin_fd = bpf_obj_get(map->pin_path);
+ if (pin_fd < 0) {
+ err = -errno;
+ if (err == -ENOENT) {
+ pr_debug("found no pinned map to reuse at '%s'\n",
+ map->pin_path);
+ return 0;
+ }
+
+ cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
+ pr_warn("couldn't retrieve pinned map '%s': %s\n",
+ map->pin_path, cp);
+ return err;
+ }
+
+ if (!map_is_reuse_compat(map, pin_fd)) {
+ pr_warn("couldn't reuse pinned map at '%s': parameter mismatch\n",
+ map->pin_path);
+ close(pin_fd);
+ return -EINVAL;
+ }
+
+ err = bpf_map__reuse_fd(map, pin_fd);
+ if (err) {
+ close(pin_fd);
+ return err;
+ }
+ map->pinned = true;
+ pr_debug("reused pinned map at '%s'\n", map->pin_path);
+
+ return 0;
+}
+
static int
bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map)
{
char *cp, errmsg[STRERR_BUFSIZE];
int *pfd = &map->fd;
+ if (map->pin_path) {
+ err = bpf_object__reuse_map(map);
+ if (err) {
+ pr_warn("error reusing pinned map %s\n",
+ map->name);
+ return err;
+ }
+ }
+
if (map->fd >= 0) {
pr_debug("skip map create (preset) %s: fd=%d\n",
map->name, map->fd);
}
}
+ if (map->pin_path && !map->pinned) {
+ err = bpf_map__pin(map, NULL);
+ if (err) {
+ pr_warn("failed to auto-pin map name '%s' at '%s'\n",
+ map->name, map->pin_path);
+ return err;
+ }
+ }
+
pr_debug("created map %s: fd=%d\n", map->name, *pfd);
}
load_attr.line_info_cnt = prog->line_info_cnt;
load_attr.log_level = prog->log_level;
load_attr.prog_flags = prog->prog_flags;
+ load_attr.attach_btf_id = prog->attach_btf_id;
retry_load:
log_buf = malloc(log_buf_size);
return 0;
}
+static int libbpf_attach_btf_id_by_name(const char *name, __u32 *btf_id);
+
static struct bpf_object *
__bpf_object__open(const char *path, const void *obj_buf, size_t obj_buf_sz,
struct bpf_object_open_opts *opts)
{
+ const char *pin_root_path;
struct bpf_program *prog;
struct bpf_object *obj;
const char *obj_name;
obj->relaxed_core_relocs = OPTS_GET(opts, relaxed_core_relocs, false);
relaxed_maps = OPTS_GET(opts, relaxed_maps, false);
+ pin_root_path = OPTS_GET(opts, pin_root_path, NULL);
CHECK_ERR(bpf_object__elf_init(obj), err, out);
CHECK_ERR(bpf_object__check_endianness(obj), err, out);
CHECK_ERR(bpf_object__probe_caps(obj), err, out);
- CHECK_ERR(bpf_object__elf_collect(obj, relaxed_maps), err, out);
+ CHECK_ERR(bpf_object__elf_collect(obj, relaxed_maps, pin_root_path),
+ err, out);
CHECK_ERR(bpf_object__collect_reloc(obj), err, out);
bpf_object__elf_finish(obj);
bpf_object__for_each_program(prog, obj) {
enum bpf_prog_type prog_type;
enum bpf_attach_type attach_type;
+ __u32 btf_id;
err = libbpf_prog_type_by_name(prog->section_name, &prog_type,
&attach_type);
bpf_program__set_type(prog, prog_type);
bpf_program__set_expected_attach_type(prog, attach_type);
+ if (prog_type == BPF_PROG_TYPE_TRACING) {
+ err = libbpf_attach_btf_id_by_name(prog->section_name, &btf_id);
+ if (err)
+ goto out;
+ prog->attach_btf_id = btf_id;
+ }
}
return obj;
return bpf_object__load_xattr(&attr);
}
+static int make_parent_dir(const char *path)
+{
+ char *cp, errmsg[STRERR_BUFSIZE];
+ char *dname, *dir;
+ int err = 0;
+
+ dname = strdup(path);
+ if (dname == NULL)
+ return -ENOMEM;
+
+ dir = dirname(dname);
+ if (mkdir(dir, 0700) && errno != EEXIST)
+ err = -errno;
+
+ free(dname);
+ if (err) {
+ cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
+ pr_warn("failed to mkdir %s: %s\n", path, cp);
+ }
+ return err;
+}
+
static int check_path(const char *path)
{
char *cp, errmsg[STRERR_BUFSIZE];
char *cp, errmsg[STRERR_BUFSIZE];
int err;
+ err = make_parent_dir(path);
+ if (err)
+ return err;
+
err = check_path(path);
if (err)
return err;
return 0;
}
-static int make_dir(const char *path)
-{
- char *cp, errmsg[STRERR_BUFSIZE];
- int err = 0;
-
- if (mkdir(path, 0700) && errno != EEXIST)
- err = -errno;
-
- if (err) {
- cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
- pr_warn("failed to mkdir %s: %s\n", path, cp);
- }
- return err;
-}
-
int bpf_program__pin(struct bpf_program *prog, const char *path)
{
int i, err;
+ err = make_parent_dir(path);
+ if (err)
+ return err;
+
err = check_path(path);
if (err)
return err;
return bpf_program__pin_instance(prog, path, 0);
}
- err = make_dir(path);
- if (err)
- return err;
-
for (i = 0; i < prog->instances.nr; i++) {
char buf[PATH_MAX];
int len;
char *cp, errmsg[STRERR_BUFSIZE];
int err;
- err = check_path(path);
- if (err)
- return err;
-
if (map == NULL) {
pr_warn("invalid map pointer\n");
return -EINVAL;
}
- if (bpf_obj_pin(map->fd, path)) {
- cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
- pr_warn("failed to pin map: %s\n", cp);
- return -errno;
+ if (map->pin_path) {
+ if (path && strcmp(path, map->pin_path)) {
+ pr_warn("map '%s' already has pin path '%s' different from '%s'\n",
+ bpf_map__name(map), map->pin_path, path);
+ return -EINVAL;
+ } else if (map->pinned) {
+ pr_debug("map '%s' already pinned at '%s'; not re-pinning\n",
+ bpf_map__name(map), map->pin_path);
+ return 0;
+ }
+ } else {
+ if (!path) {
+ pr_warn("missing a path to pin map '%s' at\n",
+ bpf_map__name(map));
+ return -EINVAL;
+ } else if (map->pinned) {
+ pr_warn("map '%s' already pinned\n", bpf_map__name(map));
+ return -EEXIST;
+ }
+
+ map->pin_path = strdup(path);
+ if (!map->pin_path) {
+ err = -errno;
+ goto out_err;
+ }
+ }
+
+ err = make_parent_dir(map->pin_path);
+ if (err)
+ return err;
+
+ err = check_path(map->pin_path);
+ if (err)
+ return err;
+
+ if (bpf_obj_pin(map->fd, map->pin_path)) {
+ err = -errno;
+ goto out_err;
}
- pr_debug("pinned map '%s'\n", path);
+ map->pinned = true;
+ pr_debug("pinned map '%s'\n", map->pin_path);
return 0;
+
+out_err:
+ cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
+ pr_warn("failed to pin map: %s\n", cp);
+ return err;
}
int bpf_map__unpin(struct bpf_map *map, const char *path)
{
int err;
- err = check_path(path);
- if (err)
- return err;
-
if (map == NULL) {
pr_warn("invalid map pointer\n");
return -EINVAL;
}
+ if (map->pin_path) {
+ if (path && strcmp(path, map->pin_path)) {
+ pr_warn("map '%s' already has pin path '%s' different from '%s'\n",
+ bpf_map__name(map), map->pin_path, path);
+ return -EINVAL;
+ }
+ path = map->pin_path;
+ } else if (!path) {
+ pr_warn("no path to unpin map '%s' from\n",
+ bpf_map__name(map));
+ return -EINVAL;
+ }
+
+ err = check_path(path);
+ if (err)
+ return err;
+
err = unlink(path);
if (err != 0)
return -errno;
- pr_debug("unpinned map '%s'\n", path);
+
+ map->pinned = false;
+ pr_debug("unpinned map '%s' from '%s'\n", bpf_map__name(map), path);
return 0;
}
+int bpf_map__set_pin_path(struct bpf_map *map, const char *path)
+{
+ char *new = NULL;
+
+ if (path) {
+ new = strdup(path);
+ if (!new)
+ return -errno;
+ }
+
+ free(map->pin_path);
+ map->pin_path = new;
+ return 0;
+}
+
+const char *bpf_map__get_pin_path(const struct bpf_map *map)
+{
+ return map->pin_path;
+}
+
+bool bpf_map__is_pinned(const struct bpf_map *map)
+{
+ return map->pinned;
+}
+
int bpf_object__pin_maps(struct bpf_object *obj, const char *path)
{
struct bpf_map *map;
return -ENOENT;
}
- err = make_dir(path);
- if (err)
- return err;
-
bpf_object__for_each_map(map, obj) {
+ char *pin_path = NULL;
char buf[PATH_MAX];
- int len;
- len = snprintf(buf, PATH_MAX, "%s/%s", path,
- bpf_map__name(map));
- if (len < 0) {
- err = -EINVAL;
- goto err_unpin_maps;
- } else if (len >= PATH_MAX) {
- err = -ENAMETOOLONG;
- goto err_unpin_maps;
+ if (path) {
+ int len;
+
+ len = snprintf(buf, PATH_MAX, "%s/%s", path,
+ bpf_map__name(map));
+ if (len < 0) {
+ err = -EINVAL;
+ goto err_unpin_maps;
+ } else if (len >= PATH_MAX) {
+ err = -ENAMETOOLONG;
+ goto err_unpin_maps;
+ }
+ pin_path = buf;
+ } else if (!map->pin_path) {
+ continue;
}
- err = bpf_map__pin(map, buf);
+ err = bpf_map__pin(map, pin_path);
if (err)
goto err_unpin_maps;
}
err_unpin_maps:
while ((map = bpf_map__prev(map, obj))) {
- char buf[PATH_MAX];
- int len;
-
- len = snprintf(buf, PATH_MAX, "%s/%s", path,
- bpf_map__name(map));
- if (len < 0)
- continue;
- else if (len >= PATH_MAX)
+ if (!map->pin_path)
continue;
- bpf_map__unpin(map, buf);
+ bpf_map__unpin(map, NULL);
}
return err;
return -ENOENT;
bpf_object__for_each_map(map, obj) {
+ char *pin_path = NULL;
char buf[PATH_MAX];
- int len;
- len = snprintf(buf, PATH_MAX, "%s/%s", path,
- bpf_map__name(map));
- if (len < 0)
- return -EINVAL;
- else if (len >= PATH_MAX)
- return -ENAMETOOLONG;
+ if (path) {
+ int len;
+
+ len = snprintf(buf, PATH_MAX, "%s/%s", path,
+ bpf_map__name(map));
+ if (len < 0)
+ return -EINVAL;
+ else if (len >= PATH_MAX)
+ return -ENAMETOOLONG;
+ pin_path = buf;
+ } else if (!map->pin_path) {
+ continue;
+ }
- err = bpf_map__unpin(map, buf);
+ err = bpf_map__unpin(map, pin_path);
if (err)
return err;
}
return -ENOENT;
}
- err = make_dir(path);
- if (err)
- return err;
-
bpf_object__for_each_program(prog, obj) {
char buf[PATH_MAX];
int len;
for (i = 0; i < obj->nr_maps; i++) {
zfree(&obj->maps[i].name);
+ zfree(&obj->maps[i].pin_path);
if (obj->maps[i].clear_priv)
obj->maps[i].clear_priv(&obj->maps[i],
obj->maps[i].priv);
BPF_PROG_TYPE_FNS(raw_tracepoint, BPF_PROG_TYPE_RAW_TRACEPOINT);
BPF_PROG_TYPE_FNS(xdp, BPF_PROG_TYPE_XDP);
BPF_PROG_TYPE_FNS(perf_event, BPF_PROG_TYPE_PERF_EVENT);
+BPF_PROG_TYPE_FNS(tracing, BPF_PROG_TYPE_TRACING);
enum bpf_attach_type
bpf_program__get_expected_attach_type(struct bpf_program *prog)
BPF_PROG_SEC_IMPL(string, ptype, eatype, 1, 0, eatype)
/* Programs that use BTF to identify attach point */
-#define BPF_PROG_BTF(string, ptype) BPF_PROG_SEC_IMPL(string, ptype, 0, 0, 1, 0)
+#define BPF_PROG_BTF(string, ptype, eatype) \
+ BPF_PROG_SEC_IMPL(string, ptype, eatype, 0, 1, 0)
/* Programs that can be attached but attach type can't be identified by section
* name. Kept for backward compatibility.
BPF_PROG_SEC("tp/", BPF_PROG_TYPE_TRACEPOINT),
BPF_PROG_SEC("raw_tracepoint/", BPF_PROG_TYPE_RAW_TRACEPOINT),
BPF_PROG_SEC("raw_tp/", BPF_PROG_TYPE_RAW_TRACEPOINT),
- BPF_PROG_BTF("tp_btf/", BPF_PROG_TYPE_RAW_TRACEPOINT),
+ BPF_PROG_BTF("tp_btf/", BPF_PROG_TYPE_TRACING,
+ BPF_TRACE_RAW_TP),
BPF_PROG_SEC("xdp", BPF_PROG_TYPE_XDP),
BPF_PROG_SEC("perf_event", BPF_PROG_TYPE_PERF_EVENT),
BPF_PROG_SEC("lwt_in", BPF_PROG_TYPE_LWT_IN),
continue;
*prog_type = section_names[i].prog_type;
*expected_attach_type = section_names[i].expected_attach_type;
- if (section_names[i].is_attach_btf) {
- struct btf *btf = bpf_core_find_kernel_btf();
- char raw_tp_btf_name[128] = "btf_trace_";
- char *dst = raw_tp_btf_name + sizeof("btf_trace_") - 1;
- int ret;
-
- if (IS_ERR(btf)) {
- pr_warn("vmlinux BTF is not found\n");
- return -EINVAL;
- }
- /* prepend "btf_trace_" prefix per kernel convention */
- strncat(dst, name + section_names[i].len,
- sizeof(raw_tp_btf_name) - sizeof("btf_trace_"));
- ret = btf__find_by_name(btf, raw_tp_btf_name);
- btf__free(btf);
- if (ret <= 0) {
- pr_warn("%s is not found in vmlinux BTF\n", dst);
- return -EINVAL;
- }
- *expected_attach_type = ret;
- }
return 0;
}
pr_warn("failed to guess program type based on ELF section name '%s'\n", name);
return -ESRCH;
}
+#define BTF_PREFIX "btf_trace_"
+static int libbpf_attach_btf_id_by_name(const char *name, __u32 *btf_id)
+{
+ struct btf *btf = bpf_core_find_kernel_btf();
+ char raw_tp_btf_name[128] = BTF_PREFIX;
+ char *dst = raw_tp_btf_name + sizeof(BTF_PREFIX) - 1;
+ int ret, i, err = -EINVAL;
+
+ if (IS_ERR(btf)) {
+ pr_warn("vmlinux BTF is not found\n");
+ return -EINVAL;
+ }
+
+ if (!name)
+ goto out;
+
+ for (i = 0; i < ARRAY_SIZE(section_names); i++) {
+ if (!section_names[i].is_attach_btf)
+ continue;
+ if (strncmp(name, section_names[i].sec, section_names[i].len))
+ continue;
+ /* prepend "btf_trace_" prefix per kernel convention */
+ strncat(dst, name + section_names[i].len,
+ sizeof(raw_tp_btf_name) - sizeof(BTF_PREFIX));
+ ret = btf__find_by_name(btf, raw_tp_btf_name);
+ if (ret <= 0) {
+ pr_warn("%s is not found in vmlinux BTF\n", dst);
+ goto out;
+ }
+ *btf_id = ret;
+ err = 0;
+ goto out;
+ }
+ pr_warn("failed to identify btf_id based on ELF section name '%s'\n", name);
+ err = -ESRCH;
+out:
+ btf__free(btf);
+ return err;
+}
+
int libbpf_attach_type_by_name(const char *name,
enum bpf_attach_type *attach_type)
{
bool relaxed_maps;
/* process CO-RE relocations non-strictly, allowing them to fail */
bool relaxed_core_relocs;
+ /* maps that set the 'pinning' attribute in their definition will have
+ * their pin_path attribute set to a file in this directory, and be
+ * auto-pinned to that path on load; defaults to "/sys/fs/bpf".
+ */
+ const char *pin_root_path;
};
-#define bpf_object_open_opts__last_field relaxed_core_relocs
+#define bpf_object_open_opts__last_field pin_root_path
LIBBPF_API struct bpf_object *bpf_object__open(const char *path);
LIBBPF_API struct bpf_object *
__u32 *size);
int bpf_object__variable_offset(const struct bpf_object *obj, const char *name,
__u32 *off);
+
+enum libbpf_pin_type {
+ LIBBPF_PIN_NONE,
+ /* PIN_BY_NAME: pin maps by name (in /sys/fs/bpf by default) */
+ LIBBPF_PIN_BY_NAME,
+};
+
+/* pin_maps and unpin_maps can both be called with a NULL path, in which case
+ * they will use the pin_path attribute of each map (and ignore all maps that
+ * don't have a pin_path set).
+ */
LIBBPF_API int bpf_object__pin_maps(struct bpf_object *obj, const char *path);
LIBBPF_API int bpf_object__unpin_maps(struct bpf_object *obj,
const char *path);
LIBBPF_API int bpf_program__set_sched_act(struct bpf_program *prog);
LIBBPF_API int bpf_program__set_xdp(struct bpf_program *prog);
LIBBPF_API int bpf_program__set_perf_event(struct bpf_program *prog);
+LIBBPF_API int bpf_program__set_tracing(struct bpf_program *prog);
LIBBPF_API enum bpf_prog_type bpf_program__get_type(struct bpf_program *prog);
LIBBPF_API void bpf_program__set_type(struct bpf_program *prog,
LIBBPF_API bool bpf_program__is_sched_act(const struct bpf_program *prog);
LIBBPF_API bool bpf_program__is_xdp(const struct bpf_program *prog);
LIBBPF_API bool bpf_program__is_perf_event(const struct bpf_program *prog);
+LIBBPF_API bool bpf_program__is_tracing(const struct bpf_program *prog);
/*
* No need for __attribute__((packed)), all members of 'bpf_map_def'
LIBBPF_API bool bpf_map__is_offload_neutral(const struct bpf_map *map);
LIBBPF_API bool bpf_map__is_internal(const struct bpf_map *map);
LIBBPF_API void bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex);
+LIBBPF_API int bpf_map__set_pin_path(struct bpf_map *map, const char *path);
+LIBBPF_API const char *bpf_map__get_pin_path(const struct bpf_map *map);
+LIBBPF_API bool bpf_map__is_pinned(const struct bpf_map *map);
LIBBPF_API int bpf_map__pin(struct bpf_map *map, const char *path);
LIBBPF_API int bpf_map__unpin(struct bpf_map *map, const char *path);
LIBBPF_0.0.6 {
global:
+ bpf_map__get_pin_path;
+ bpf_map__is_pinned;
+ bpf_map__set_pin_path;
bpf_object__open_file;
bpf_object__open_mem;
bpf_program__get_expected_attach_type;
bpf_program__get_type;
+ bpf_program__is_tracing;
+ bpf_program__set_tracing;
} LIBBPF_0.0.5;
case BPF_PROG_TYPE_FLOW_DISSECTOR:
case BPF_PROG_TYPE_CGROUP_SYSCTL:
case BPF_PROG_TYPE_CGROUP_SOCKOPT:
+ case BPF_PROG_TYPE_TRACING:
default:
break;
}
int fd;
};
+/* Up until and including Linux 5.3 */
+struct xdp_ring_offset_v1 {
+ __u64 producer;
+ __u64 consumer;
+ __u64 desc;
+};
+
+/* Up until and including Linux 5.3 */
+struct xdp_mmap_offsets_v1 {
+ struct xdp_ring_offset_v1 rx;
+ struct xdp_ring_offset_v1 tx;
+ struct xdp_ring_offset_v1 fr;
+ struct xdp_ring_offset_v1 cr;
+};
+
int xsk_umem__fd(const struct xsk_umem *umem)
{
return umem ? umem->fd : -EINVAL;
return 0;
}
+static void xsk_mmap_offsets_v1(struct xdp_mmap_offsets *off)
+{
+ struct xdp_mmap_offsets_v1 off_v1;
+
+ /* getsockopt on a kernel <= 5.3 has no flags fields.
+ * Copy over the offsets to the correct places in the >=5.4 format
+ * and put the flags where they would have been on that kernel.
+ */
+ memcpy(&off_v1, off, sizeof(off_v1));
+
+ off->rx.producer = off_v1.rx.producer;
+ off->rx.consumer = off_v1.rx.consumer;
+ off->rx.desc = off_v1.rx.desc;
+ off->rx.flags = off_v1.rx.consumer + sizeof(__u32);
+
+ off->tx.producer = off_v1.tx.producer;
+ off->tx.consumer = off_v1.tx.consumer;
+ off->tx.desc = off_v1.tx.desc;
+ off->tx.flags = off_v1.tx.consumer + sizeof(__u32);
+
+ off->fr.producer = off_v1.fr.producer;
+ off->fr.consumer = off_v1.fr.consumer;
+ off->fr.desc = off_v1.fr.desc;
+ off->fr.flags = off_v1.fr.consumer + sizeof(__u32);
+
+ off->cr.producer = off_v1.cr.producer;
+ off->cr.consumer = off_v1.cr.consumer;
+ off->cr.desc = off_v1.cr.desc;
+ off->cr.flags = off_v1.cr.consumer + sizeof(__u32);
+}
+
+static int xsk_get_mmap_offsets(int fd, struct xdp_mmap_offsets *off)
+{
+ socklen_t optlen;
+ int err;
+
+ optlen = sizeof(*off);
+ err = getsockopt(fd, SOL_XDP, XDP_MMAP_OFFSETS, off, &optlen);
+ if (err)
+ return err;
+
+ if (optlen == sizeof(*off))
+ return 0;
+
+ if (optlen == sizeof(struct xdp_mmap_offsets_v1)) {
+ xsk_mmap_offsets_v1(off);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
int xsk_umem__create_v0_0_4(struct xsk_umem **umem_ptr, void *umem_area,
__u64 size, struct xsk_ring_prod *fill,
struct xsk_ring_cons *comp,
struct xdp_mmap_offsets off;
struct xdp_umem_reg mr;
struct xsk_umem *umem;
- socklen_t optlen;
void *map;
int err;
goto out_socket;
}
- optlen = sizeof(off);
- err = getsockopt(umem->fd, SOL_XDP, XDP_MMAP_OFFSETS, &off, &optlen);
+ err = xsk_get_mmap_offsets(umem->fd, &off);
if (err) {
err = -errno;
goto out_socket;
struct sockaddr_xdp sxdp = {};
struct xdp_mmap_offsets off;
struct xsk_socket *xsk;
- socklen_t optlen;
int err;
if (!umem || !xsk_ptr || !rx || !tx)
}
}
- optlen = sizeof(off);
- err = getsockopt(xsk->fd, SOL_XDP, XDP_MMAP_OFFSETS, &off, &optlen);
+ err = xsk_get_mmap_offsets(xsk->fd, &off);
if (err) {
err = -errno;
goto out_socket;
int xsk_umem__delete(struct xsk_umem *umem)
{
struct xdp_mmap_offsets off;
- socklen_t optlen;
int err;
if (!umem)
if (umem->refcount)
return -EBUSY;
- optlen = sizeof(off);
- err = getsockopt(umem->fd, SOL_XDP, XDP_MMAP_OFFSETS, &off, &optlen);
+ err = xsk_get_mmap_offsets(umem->fd, &off);
if (!err) {
munmap(umem->fill->ring - off.fr.desc,
off.fr.desc + umem->config.fill_size * sizeof(__u64));
{
size_t desc_sz = sizeof(struct xdp_desc);
struct xdp_mmap_offsets off;
- socklen_t optlen;
int err;
if (!xsk)
close(xsk->prog_fd);
}
- optlen = sizeof(off);
- err = getsockopt(xsk->fd, SOL_XDP, XDP_MMAP_OFFSETS, &off, &optlen);
+ err = xsk_get_mmap_offsets(xsk->fd, &off);
if (!err) {
if (xsk->rx) {
munmap(xsk->rx->ring - off.rx.desc,
bool add_sym = false;
bool add_dso = false;
bool add_src = false;
+ int ret = 0;
if (!buf)
return -ENOMEM;
add_dso = true;
} else if (strcmp(tok, "offset")) {
pr_err("unrecognized sort token: %s\n", tok);
- return -EINVAL;
+ ret = -EINVAL;
+ goto err;
}
}
add_sym ? "symbol," : "",
add_dso ? "dso," : "",
add_src ? "cl_srcline," : "",
- "node") < 0)
- return -ENOMEM;
+ "node") < 0) {
+ ret = -ENOMEM;
+ goto err;
+ }
c2c.show_src = add_src;
-
+err:
free(buf);
- return 0;
+ return ret;
}
static int setup_coalesce(const char *coalesce, bool no_source)
new = realloc(new_flags, len + strlen(cpt) + 2);
if (new == NULL) {
free(new_flags);
+ free(orig_flags);
return NULL;
}
jvmti-y += jvmti_agent.o
# For strlcpy
-jvmti-y += libstring.o
+jvmti-y += libstring.o libctype.o
CFLAGS_jvmti = -fPIC -DPIC -I$(JDIR)/include -I$(JDIR)/include/linux
CFLAGS_REMOVE_jvmti = -Wmissing-declarations
$(OUTPUT)jvmti/libstring.o: ../lib/string.c FORCE
$(call rule_mkdir)
$(call if_changed_dep,cc_o_c)
+
+$(OUTPUT)jvmti/libctype.o: ../lib/ctype.c FORCE
+ $(call rule_mkdir)
+ $(call if_changed_dep,cc_o_c)
info_node = perf_env__find_bpf_prog_info(dso->bpf_prog.env,
dso->bpf_prog.id);
if (!info_node) {
- return SYMBOL_ANNOTATE_ERRNO__BPF_MISSING_BTF;
+ ret = SYMBOL_ANNOTATE_ERRNO__BPF_MISSING_BTF;
goto out;
}
info_linear = info_node->info_linear;
if (tofd < 0)
goto out;
- if (fchmod(tofd, mode))
- goto out_close_to;
-
if (st.st_size == 0) { /* /proc? do it slowly... */
err = slow_copyfile(from, tmp, nsi);
+ if (!err && fchmod(tofd, mode))
+ err = -1;
goto out_close_to;
}
+ if (fchmod(tofd, mode))
+ goto out_close_to;
+
nsinfo__mountns_enter(nsi, &nsc);
fromfd = open(from, O_RDONLY);
nsinfo__mountns_exit(&nsc);
is_open = false;
if (c2->leader == leader) {
if (is_open)
- perf_evsel__close(&evsel->core);
+ perf_evsel__close(&c2->core);
c2->leader = c2;
c2->core.nr_members = 0;
}
continue;
if (WARN_ONCE(cnt >= size,
- "failed to write MEM_TOPOLOGY, way too many nodes\n"))
+ "failed to write MEM_TOPOLOGY, way too many nodes\n")) {
+ closedir(dir);
return -1;
+ }
ret = memory_node__read(&nodes[cnt++], idx);
}
if (!strcmp(d->d_name, ".") || !strcmp(d->d_name, ".."))
continue;
- if (!match_pat(d->d_name, pat))
- return -2;
+ if (!match_pat(d->d_name, pat)) {
+ ret = -2;
+ break;
+ }
scnprintf(namebuf, sizeof(namebuf), "%s/%s",
path, d->d_name);
$(OUTPUT)/urandom_read: urandom_read.c
$(CC) -o $@ $< -Wl,--build-id
+$(OUTPUT)/test_stub.o: test_stub.c
+ $(CC) -c $(CFLAGS) -o $@ $<
+
BPFOBJ := $(OUTPUT)/libbpf.a
$(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED): $(OUTPUT)/test_stub.o $(BPFOBJ)
| sed -n '/<...> search starts here:/,/End of search list./{ s| \(/.*\)|-idirafter \1|p }')
endef
+# Determine target endianness.
+IS_LITTLE_ENDIAN = $(shell $(CC) -dM -E - </dev/null | \
+ grep 'define __BYTE_ORDER__ __ORDER_LITTLE_ENDIAN__')
+MENDIAN=$(if $(IS_LITTLE_ENDIAN),-mlittle-endian,-mbig-endian)
+
CLANG_SYS_INCLUDES = $(call get_sys_includes,$(CLANG))
-BPF_CFLAGS = -g -D__TARGET_ARCH_$(SRCARCH) \
+BPF_CFLAGS = -g -D__TARGET_ARCH_$(SRCARCH) $(MENDIAN) \
-I. -I./include/uapi -I$(APIDIR) \
-I$(BPFDIR) -I$(abspath $(OUTPUT)/../usr/include)
# Define test_progs BPF-GCC-flavored test runner.
ifneq ($(BPF_GCC),)
-IS_LITTLE_ENDIAN = $(shell $(CC) -dM -E - </dev/null | \
- grep 'define __BYTE_ORDER__ __ORDER_LITTLE_ENDIAN__')
-MENDIAN=$(if $(IS_LITTLE_ENDIAN),-mlittle-endian,-mbig-endian)
-
TRUNNER_BPF_BUILD_RULE := GCC_BPF_BUILD_RULE
-TRUNNER_BPF_CFLAGS := $(BPF_CFLAGS) $(call get_sys_includes,gcc) $(MENDIAN)
+TRUNNER_BPF_CFLAGS := $(BPF_CFLAGS) $(call get_sys_includes,gcc)
TRUNNER_BPF_LDFLAGS :=
$(eval $(call DEFINE_TEST_RUNNER,test_progs,bpf_gcc))
endif
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <test_progs.h>
+
+__u32 get_map_id(struct bpf_object *obj, const char *name)
+{
+ struct bpf_map_info map_info = {};
+ __u32 map_info_len, duration = 0;
+ struct bpf_map *map;
+ int err;
+
+ map_info_len = sizeof(map_info);
+
+ map = bpf_object__find_map_by_name(obj, name);
+ if (CHECK(!map, "find map", "NULL map"))
+ return 0;
+
+ err = bpf_obj_get_info_by_fd(bpf_map__fd(map),
+ &map_info, &map_info_len);
+ CHECK(err, "get map info", "err %d errno %d", err, errno);
+ return map_info.id;
+}
+
+void test_pinning(void)
+{
+ const char *file_invalid = "./test_pinning_invalid.o";
+ const char *custpinpath = "/sys/fs/bpf/custom/pinmap";
+ const char *nopinpath = "/sys/fs/bpf/nopinmap";
+ const char *nopinpath2 = "/sys/fs/bpf/nopinmap2";
+ const char *custpath = "/sys/fs/bpf/custom";
+ const char *pinpath = "/sys/fs/bpf/pinmap";
+ const char *file = "./test_pinning.o";
+ __u32 map_id, map_id2, duration = 0;
+ struct stat statbuf = {};
+ struct bpf_object *obj;
+ struct bpf_map *map;
+ int err;
+ DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts,
+ .pin_root_path = custpath,
+ );
+
+ /* check that opening fails with invalid pinning value in map def */
+ obj = bpf_object__open_file(file_invalid, NULL);
+ err = libbpf_get_error(obj);
+ if (CHECK(err != -EINVAL, "invalid open", "err %d errno %d\n", err, errno)) {
+ obj = NULL;
+ goto out;
+ }
+
+ /* open the valid object file */
+ obj = bpf_object__open_file(file, NULL);
+ err = libbpf_get_error(obj);
+ if (CHECK(err, "default open", "err %d errno %d\n", err, errno)) {
+ obj = NULL;
+ goto out;
+ }
+
+ err = bpf_object__load(obj);
+ if (CHECK(err, "default load", "err %d errno %d\n", err, errno))
+ goto out;
+
+ /* check that pinmap was pinned */
+ err = stat(pinpath, &statbuf);
+ if (CHECK(err, "stat pinpath", "err %d errno %d\n", err, errno))
+ goto out;
+
+ /* check that nopinmap was *not* pinned */
+ err = stat(nopinpath, &statbuf);
+ if (CHECK(!err || errno != ENOENT, "stat nopinpath",
+ "err %d errno %d\n", err, errno))
+ goto out;
+
+ /* check that nopinmap2 was *not* pinned */
+ err = stat(nopinpath2, &statbuf);
+ if (CHECK(!err || errno != ENOENT, "stat nopinpath2",
+ "err %d errno %d\n", err, errno))
+ goto out;
+
+ map_id = get_map_id(obj, "pinmap");
+ if (!map_id)
+ goto out;
+
+ bpf_object__close(obj);
+
+ obj = bpf_object__open_file(file, NULL);
+ if (CHECK_FAIL(libbpf_get_error(obj))) {
+ obj = NULL;
+ goto out;
+ }
+
+ err = bpf_object__load(obj);
+ if (CHECK(err, "default load", "err %d errno %d\n", err, errno))
+ goto out;
+
+ /* check that same map ID was reused for second load */
+ map_id2 = get_map_id(obj, "pinmap");
+ if (CHECK(map_id != map_id2, "check reuse",
+ "err %d errno %d id %d id2 %d\n", err, errno, map_id, map_id2))
+ goto out;
+
+ /* should be no-op to re-pin same map */
+ map = bpf_object__find_map_by_name(obj, "pinmap");
+ if (CHECK(!map, "find map", "NULL map"))
+ goto out;
+
+ err = bpf_map__pin(map, NULL);
+ if (CHECK(err, "re-pin map", "err %d errno %d\n", err, errno))
+ goto out;
+
+ /* but error to pin at different location */
+ err = bpf_map__pin(map, "/sys/fs/bpf/other");
+ if (CHECK(!err, "pin map different", "err %d errno %d\n", err, errno))
+ goto out;
+
+ /* unpin maps with a pin_path set */
+ err = bpf_object__unpin_maps(obj, NULL);
+ if (CHECK(err, "unpin maps", "err %d errno %d\n", err, errno))
+ goto out;
+
+ /* and re-pin them... */
+ err = bpf_object__pin_maps(obj, NULL);
+ if (CHECK(err, "pin maps", "err %d errno %d\n", err, errno))
+ goto out;
+
+ /* set pinning path of other map and re-pin all */
+ map = bpf_object__find_map_by_name(obj, "nopinmap");
+ if (CHECK(!map, "find map", "NULL map"))
+ goto out;
+
+ err = bpf_map__set_pin_path(map, custpinpath);
+ if (CHECK(err, "set pin path", "err %d errno %d\n", err, errno))
+ goto out;
+
+ /* should only pin the one unpinned map */
+ err = bpf_object__pin_maps(obj, NULL);
+ if (CHECK(err, "pin maps", "err %d errno %d\n", err, errno))
+ goto out;
+
+ /* check that nopinmap was pinned at the custom path */
+ err = stat(custpinpath, &statbuf);
+ if (CHECK(err, "stat custpinpath", "err %d errno %d\n", err, errno))
+ goto out;
+
+ /* remove the custom pin path to re-test it with auto-pinning below */
+ err = unlink(custpinpath);
+ if (CHECK(err, "unlink custpinpath", "err %d errno %d\n", err, errno))
+ goto out;
+
+ err = rmdir(custpath);
+ if (CHECK(err, "rmdir custpindir", "err %d errno %d\n", err, errno))
+ goto out;
+
+ bpf_object__close(obj);
+
+ /* open the valid object file again */
+ obj = bpf_object__open_file(file, NULL);
+ err = libbpf_get_error(obj);
+ if (CHECK(err, "default open", "err %d errno %d\n", err, errno)) {
+ obj = NULL;
+ goto out;
+ }
+
+ /* swap pin paths of the two maps */
+ bpf_object__for_each_map(map, obj) {
+ if (!strcmp(bpf_map__name(map), "nopinmap"))
+ err = bpf_map__set_pin_path(map, pinpath);
+ else if (!strcmp(bpf_map__name(map), "pinmap"))
+ err = bpf_map__set_pin_path(map, NULL);
+ else
+ continue;
+
+ if (CHECK(err, "set pin path", "err %d errno %d\n", err, errno))
+ goto out;
+ }
+
+ /* should fail because of map parameter mismatch */
+ err = bpf_object__load(obj);
+ if (CHECK(err != -EINVAL, "param mismatch load", "err %d errno %d\n", err, errno))
+ goto out;
+
+ bpf_object__close(obj);
+
+ /* test auto-pinning at custom path with open opt */
+ obj = bpf_object__open_file(file, &opts);
+ if (CHECK_FAIL(libbpf_get_error(obj))) {
+ obj = NULL;
+ goto out;
+ }
+
+ err = bpf_object__load(obj);
+ if (CHECK(err, "custom load", "err %d errno %d\n", err, errno))
+ goto out;
+
+ /* check that pinmap was pinned at the custom path */
+ err = stat(custpinpath, &statbuf);
+ if (CHECK(err, "stat custpinpath", "err %d errno %d\n", err, errno))
+ goto out;
+
+out:
+ unlink(pinpath);
+ unlink(nopinpath);
+ unlink(nopinpath2);
+ unlink(custpinpath);
+ rmdir(custpath);
+ if (obj)
+ bpf_object__close(obj);
+}
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+
+void test_probe_user(void)
+{
+#define kprobe_name "__sys_connect"
+ const char *prog_name = "kprobe/" kprobe_name;
+ const char *obj_file = "./test_probe_user.o";
+ DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts, );
+ int err, results_map_fd, sock_fd, duration = 0;
+ struct sockaddr curr, orig, tmp;
+ struct sockaddr_in *in = (struct sockaddr_in *)&curr;
+ struct bpf_link *kprobe_link = NULL;
+ struct bpf_program *kprobe_prog;
+ struct bpf_object *obj;
+ static const int zero = 0;
+
+ obj = bpf_object__open_file(obj_file, &opts);
+ if (CHECK(IS_ERR(obj), "obj_open_file", "err %ld\n", PTR_ERR(obj)))
+ return;
+
+ kprobe_prog = bpf_object__find_program_by_title(obj, prog_name);
+ if (CHECK(!kprobe_prog, "find_probe",
+ "prog '%s' not found\n", prog_name))
+ goto cleanup;
+
+ err = bpf_object__load(obj);
+ if (CHECK(err, "obj_load", "err %d\n", err))
+ goto cleanup;
+
+ results_map_fd = bpf_find_map(__func__, obj, "test_pro.bss");
+ if (CHECK(results_map_fd < 0, "find_bss_map",
+ "err %d\n", results_map_fd))
+ goto cleanup;
+
+ kprobe_link = bpf_program__attach_kprobe(kprobe_prog, false,
+ kprobe_name);
+ if (CHECK(IS_ERR(kprobe_link), "attach_kprobe",
+ "err %ld\n", PTR_ERR(kprobe_link))) {
+ kprobe_link = NULL;
+ goto cleanup;
+ }
+
+ memset(&curr, 0, sizeof(curr));
+ in->sin_family = AF_INET;
+ in->sin_port = htons(5555);
+ in->sin_addr.s_addr = inet_addr("255.255.255.255");
+ memcpy(&orig, &curr, sizeof(curr));
+
+ sock_fd = socket(AF_INET, SOCK_STREAM, 0);
+ if (CHECK(sock_fd < 0, "create_sock_fd", "err %d\n", sock_fd))
+ goto cleanup;
+
+ connect(sock_fd, &curr, sizeof(curr));
+ close(sock_fd);
+
+ err = bpf_map_lookup_elem(results_map_fd, &zero, &tmp);
+ if (CHECK(err, "get_kprobe_res",
+ "failed to get kprobe res: %d\n", err))
+ goto cleanup;
+
+ in = (struct sockaddr_in *)&tmp;
+ if (CHECK(memcmp(&tmp, &orig, sizeof(orig)), "check_kprobe_res",
+ "wrong kprobe res from probe read: %s:%u\n",
+ inet_ntoa(in->sin_addr), ntohs(in->sin_port)))
+ goto cleanup;
+
+ memset(&tmp, 0xab, sizeof(tmp));
+
+ in = (struct sockaddr_in *)&curr;
+ if (CHECK(memcmp(&curr, &tmp, sizeof(tmp)), "check_kprobe_res",
+ "wrong kprobe res from probe write: %s:%u\n",
+ inet_ntoa(in->sin_addr), ntohs(in->sin_port)))
+ goto cleanup;
+cleanup:
+ bpf_link__destroy(kprobe_link);
+ bpf_object__close(obj);
+}
func = ptr->func;
}));
- bpf_probe_read(&pkt_type, sizeof(pkt_type), _(&skb->__pkt_type_offset));
+ bpf_probe_read_kernel(&pkt_type, sizeof(pkt_type), _(&skb->__pkt_type_offset));
pkt_type &= 7;
/* read eth proto */
- bpf_probe_read(&pkt_data, sizeof(pkt_data), data + 12);
+ bpf_probe_read_kernel(&pkt_data, sizeof(pkt_data), data + 12);
bpf_printk("rcuhead.next %llx func %llx\n", ptr, func);
bpf_printk("skb->len %d users %d pkt_type %x\n",
void* thread_state;
int key;
- bpf_probe_read(&key, sizeof(key), (void*)(long)pidData->tls_key_addr);
- bpf_probe_read(&thread_state, sizeof(thread_state),
- tls_base + 0x310 + key * 0x10 + 0x08);
+ bpf_probe_read_user(&key, sizeof(key), (void*)(long)pidData->tls_key_addr);
+ bpf_probe_read_user(&thread_state, sizeof(thread_state),
+ tls_base + 0x310 + key * 0x10 + 0x08);
return thread_state;
}
FrameData *frame, Symbol *symbol)
{
// read data from PyFrameObject
- bpf_probe_read(&frame->f_back,
- sizeof(frame->f_back),
- frame_ptr + pidData->offsets.PyFrameObject_back);
- bpf_probe_read(&frame->f_code,
- sizeof(frame->f_code),
- frame_ptr + pidData->offsets.PyFrameObject_code);
+ bpf_probe_read_user(&frame->f_back,
+ sizeof(frame->f_back),
+ frame_ptr + pidData->offsets.PyFrameObject_back);
+ bpf_probe_read_user(&frame->f_code,
+ sizeof(frame->f_code),
+ frame_ptr + pidData->offsets.PyFrameObject_code);
// read data from PyCodeObject
if (!frame->f_code)
return false;
- bpf_probe_read(&frame->co_filename,
- sizeof(frame->co_filename),
- frame->f_code + pidData->offsets.PyCodeObject_filename);
- bpf_probe_read(&frame->co_name,
- sizeof(frame->co_name),
- frame->f_code + pidData->offsets.PyCodeObject_name);
+ bpf_probe_read_user(&frame->co_filename,
+ sizeof(frame->co_filename),
+ frame->f_code + pidData->offsets.PyCodeObject_filename);
+ bpf_probe_read_user(&frame->co_name,
+ sizeof(frame->co_name),
+ frame->f_code + pidData->offsets.PyCodeObject_name);
// read actual names into symbol
if (frame->co_filename)
- bpf_probe_read_str(&symbol->file,
- sizeof(symbol->file),
- frame->co_filename + pidData->offsets.String_data);
+ bpf_probe_read_user_str(&symbol->file,
+ sizeof(symbol->file),
+ frame->co_filename +
+ pidData->offsets.String_data);
if (frame->co_name)
- bpf_probe_read_str(&symbol->name,
- sizeof(symbol->name),
- frame->co_name + pidData->offsets.String_data);
+ bpf_probe_read_user_str(&symbol->name,
+ sizeof(symbol->name),
+ frame->co_name +
+ pidData->offsets.String_data);
return true;
}
event->kernel_stack_id = bpf_get_stackid(ctx, &stackmap, 0);
void* thread_state_current = (void*)0;
- bpf_probe_read(&thread_state_current,
- sizeof(thread_state_current),
- (void*)(long)pidData->current_state_addr);
+ bpf_probe_read_user(&thread_state_current,
+ sizeof(thread_state_current),
+ (void*)(long)pidData->current_state_addr);
struct task_struct* task = (struct task_struct*)bpf_get_current_task();
void* tls_base = (void*)task;
if (pidData->use_tls) {
uint64_t pthread_created;
uint64_t pthread_self;
- bpf_probe_read(&pthread_self, sizeof(pthread_self), tls_base + 0x10);
+ bpf_probe_read_user(&pthread_self, sizeof(pthread_self),
+ tls_base + 0x10);
- bpf_probe_read(&pthread_created,
- sizeof(pthread_created),
- thread_state + pidData->offsets.PyThreadState_thread);
+ bpf_probe_read_user(&pthread_created,
+ sizeof(pthread_created),
+ thread_state +
+ pidData->offsets.PyThreadState_thread);
event->pthread_match = pthread_created == pthread_self;
} else {
event->pthread_match = 1;
Symbol sym = {};
int cur_cpu = bpf_get_smp_processor_id();
- bpf_probe_read(&frame_ptr,
- sizeof(frame_ptr),
- thread_state + pidData->offsets.PyThreadState_frame);
+ bpf_probe_read_user(&frame_ptr,
+ sizeof(frame_ptr),
+ thread_state +
+ pidData->offsets.PyThreadState_frame);
int32_t* symbol_counter = bpf_map_lookup_elem(&symbolmap, &sym);
if (symbol_counter == NULL)
/*
* having volatile doesn't change anything on BPF side, but clang
* emits warnings for passing `volatile const char *` into
- * bpf_probe_read_str that expects just `const char *`
+ * bpf_probe_read_user_str that expects just `const char *`
*/
const char* tag;
/*
dtv_t *dtv;
void *tls_ptr;
- bpf_probe_read(&tls_index, sizeof(struct tls_index),
- (void *)loc->offset);
+ bpf_probe_read_user(&tls_index, sizeof(struct tls_index),
+ (void *)loc->offset);
/* valid module index is always positive */
if (tls_index.module > 0) {
/* dtv = ((struct tcbhead *)tls_base)->dtv[tls_index.module] */
- bpf_probe_read(&dtv, sizeof(dtv),
- &((struct tcbhead *)tls_base)->dtv);
+ bpf_probe_read_user(&dtv, sizeof(dtv),
+ &((struct tcbhead *)tls_base)->dtv);
dtv += tls_index.module;
} else {
dtv = NULL;
}
- bpf_probe_read(&tls_ptr, sizeof(void *), dtv);
+ bpf_probe_read_user(&tls_ptr, sizeof(void *), dtv);
/* if pointer has (void *)-1 value, then TLS wasn't initialized yet */
return tls_ptr && tls_ptr != (void *)-1
? tls_ptr + tls_index.offset
if (!location)
return;
- bpf_probe_read(value, sizeof(struct strobe_value_generic), location);
+ bpf_probe_read_user(value, sizeof(struct strobe_value_generic), location);
data->int_vals[idx] = value->val;
if (value->header.len)
data->int_vals_set_mask |= (1 << idx);
if (!location)
return 0;
- bpf_probe_read(value, sizeof(struct strobe_value_generic), location);
- len = bpf_probe_read_str(payload, STROBE_MAX_STR_LEN, value->ptr);
+ bpf_probe_read_user(value, sizeof(struct strobe_value_generic), location);
+ len = bpf_probe_read_user_str(payload, STROBE_MAX_STR_LEN, value->ptr);
/*
- * if bpf_probe_read_str returns error (<0), due to casting to
+ * if bpf_probe_read_user_str returns error (<0), due to casting to
* unsinged int, it will become big number, so next check is
* sufficient to check for errors AND prove to BPF verifier, that
- * bpf_probe_read_str won't return anything bigger than
+ * bpf_probe_read_user_str won't return anything bigger than
* STROBE_MAX_STR_LEN
*/
if (len > STROBE_MAX_STR_LEN)
if (!location)
return payload;
- bpf_probe_read(value, sizeof(struct strobe_value_generic), location);
- if (bpf_probe_read(&map, sizeof(struct strobe_map_raw), value->ptr))
+ bpf_probe_read_user(value, sizeof(struct strobe_value_generic), location);
+ if (bpf_probe_read_user(&map, sizeof(struct strobe_map_raw), value->ptr))
return payload;
descr->id = map.id;
data->req_meta_valid = 1;
}
- len = bpf_probe_read_str(payload, STROBE_MAX_STR_LEN, map.tag);
+ len = bpf_probe_read_user_str(payload, STROBE_MAX_STR_LEN, map.tag);
if (len <= STROBE_MAX_STR_LEN) {
descr->tag_len = len;
payload += len;
break;
descr->key_lens[i] = 0;
- len = bpf_probe_read_str(payload, STROBE_MAX_STR_LEN,
- map.entries[i].key);
+ len = bpf_probe_read_user_str(payload, STROBE_MAX_STR_LEN,
+ map.entries[i].key);
if (len <= STROBE_MAX_STR_LEN) {
descr->key_lens[i] = len;
payload += len;
}
descr->val_lens[i] = 0;
- len = bpf_probe_read_str(payload, STROBE_MAX_STR_LEN,
- map.entries[i].val);
+ len = bpf_probe_read_user_str(payload, STROBE_MAX_STR_LEN,
+ map.entries[i].val);
if (len <= STROBE_MAX_STR_LEN) {
descr->val_lens[i] = len;
payload += len;
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bpf.h>
+#include "bpf_helpers.h"
+
+int _version SEC("version") = 1;
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, __u64);
+ __uint(pinning, LIBBPF_PIN_BY_NAME);
+} pinmap SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, __u64);
+} nopinmap SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, __u64);
+ __uint(pinning, LIBBPF_PIN_NONE);
+} nopinmap2 SEC(".maps");
+
+char _license[] SEC("license") = "GPL";
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bpf.h>
+#include "bpf_helpers.h"
+
+int _version SEC("version") = 1;
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, __u64);
+ __uint(pinning, 2); /* invalid */
+} nopinmap3 SEC(".maps");
+
+char _license[] SEC("license") = "GPL";
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/ptrace.h>
+#include <linux/bpf.h>
+
+#include <netinet/in.h>
+
+#include "bpf_helpers.h"
+#include "bpf_tracing.h"
+
+static struct sockaddr_in old;
+
+SEC("kprobe/__sys_connect")
+int handle_sys_connect(struct pt_regs *ctx)
+{
+ void *ptr = (void *)PT_REGS_PARM2(ctx);
+ struct sockaddr_in new;
+
+ bpf_probe_read_user(&old, sizeof(old), ptr);
+ __builtin_memset(&new, 0xab, sizeof(new));
+ bpf_probe_write_user(ptr, &new, sizeof(new));
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
#include <sys/socket.h>
#include "bpf_helpers.h"
-#define _(P) ({typeof(P) val = 0; bpf_probe_read(&val, sizeof(val), &P); val;})
+#define _(P) ({typeof(P) val = 0; bpf_probe_read_kernel(&val, sizeof(val), &P); val;})
#define TCP_ESTATS_MAGIC 0xBAADBEEF
/* This test case needs "sock" and "pt_regs" data structure.
import pprint
import random
import re
+import stat
import string
import struct
import subprocess
for f in out.split():
if f == "ports":
continue
+
p = os.path.join(path, f)
if os.path.isfile(p) and os.access(p, os.R_OK):
_, out = cmd('cat %s/%s' % (path, f))
.result = OP_EPERM,
},
{
+ .descr = "ctx:write sysctl:write read ok narrow",
+ .insns = {
+ /* u64 w = (u16)write & 1; */
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+ BPF_LDX_MEM(BPF_H, BPF_REG_7, BPF_REG_1,
+ offsetof(struct bpf_sysctl, write)),
+#else
+ BPF_LDX_MEM(BPF_H, BPF_REG_7, BPF_REG_1,
+ offsetof(struct bpf_sysctl, write) + 2),
+#endif
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_7, 1),
+ /* return 1 - w; */
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_7),
+ BPF_EXIT_INSN(),
+ },
+ .attach_type = BPF_CGROUP_SYSCTL,
+ .sysctl = "kernel/domainname",
+ .open_flags = O_WRONLY,
+ .newval = "(none)", /* same as default, should fail anyway */
+ .result = OP_EPERM,
+ },
+ {
.descr = "ctx:write sysctl:read write reject",
.insns = {
/* write = X */
# start the listener
ip netns exec ${NS_DST} bash -c \
- "nc -4 -l -s ${IP_DST} -p 9000 >/dev/null &"
+ "nc -4 -l -p 9000 >/dev/null &"
declare -i NC_PID=$!
sleep 1
/s390x/sync_regs_test
+/s390x/memop
/x86_64/cr4_cpuid_sync_test
/x86_64/evmcs_test
/x86_64/hyperv_cpuid
/x86_64/state_test
/x86_64/sync_regs_test
/x86_64/vmx_close_while_nested_test
+/x86_64/vmx_dirty_log_test
/x86_64/vmx_set_nested_state_test
/x86_64/vmx_tsc_adjust_test
/clear_dirty_log_test
void prepare_vmcs(struct vmx_pages *vmx, void *guest_rip, void *guest_rsp);
bool load_vmcs(struct vmx_pages *vmx);
+void nested_vmx_check_supported(void);
+
void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm,
uint64_t nested_paddr, uint64_t paddr, uint32_t eptp_memslot);
void nested_map(struct vmx_pages *vmx, struct kvm_vm *vm,
init_vmcs_guest_state(guest_rip, guest_rsp);
}
+void nested_vmx_check_supported(void)
+{
+ struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1);
+
+ if (!(entry->ecx & CPUID_VMX)) {
+ fprintf(stderr, "nested VMX not enabled, skipping test\n");
+ exit(KSFT_SKIP);
+ }
+}
+
void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm,
uint64_t nested_paddr, uint64_t paddr, uint32_t eptp_memslot)
{
#define VCPU_ID 5
+#define UCALL_PIO_PORT ((uint16_t)0x1000)
+
+/*
+ * ucall is embedded here to protect against compiler reshuffling registers
+ * before calling a function. In this test we only need to get KVM_EXIT_IO
+ * vmexit and preserve RBX, no additional information is needed.
+ */
void guest_code(void)
{
- /*
- * use a callee-save register, otherwise the compiler
- * saves it around the call to GUEST_SYNC.
- */
- register u32 stage asm("rbx");
- for (;;) {
- GUEST_SYNC(0);
- stage++;
- asm volatile ("" : : "r" (stage));
- }
+ asm volatile("1: in %[port], %%al\n"
+ "add $0x1, %%rbx\n"
+ "jmp 1b"
+ : : [port] "d" (UCALL_PIO_PORT) : "rax", "rbx");
}
static void compare_regs(struct kvm_regs *left, struct kvm_regs *right)
int main(int argc, char *argv[])
{
vm_vaddr_t vmx_pages_gva;
- struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1);
- if (!(entry->ecx & CPUID_VMX)) {
- fprintf(stderr, "nested VMX not enabled, skipping test\n");
- exit(KSFT_SKIP);
- }
+ nested_vmx_check_supported();
vm = vm_create_default(VCPU_ID, 0, (void *) l1_guest_code);
vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
struct ucall uc;
bool done = false;
+ nested_vmx_check_supported();
+
/* Create VM */
vm = vm_create_default(VCPU_ID, 0, l1_guest_code);
vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
{
struct kvm_vm *vm;
struct kvm_nested_state state;
- struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1);
have_evmcs = kvm_check_cap(KVM_CAP_HYPERV_ENLIGHTENED_VMCS);
* AMD currently does not implement set_nested_state, so for now we
* just early out.
*/
- if (!(entry->ecx & CPUID_VMX)) {
- fprintf(stderr, "nested VMX not enabled, skipping test\n");
- exit(KSFT_SKIP);
- }
+ nested_vmx_check_supported();
vm = vm_create_default(VCPU_ID, 0, 0);
state.flags = KVM_STATE_NESTED_RUN_PENDING;
test_nested_state_expect_einval(vm, &state);
- /*
- * TODO: When SVM support is added for KVM_SET_NESTED_STATE
- * add tests here to support it like VMX.
- */
- if (entry->ecx & CPUID_VMX)
- test_vmx_nested_state(vm);
+ test_vmx_nested_state(vm);
kvm_vm_free(vm);
return 0;
int main(int argc, char *argv[])
{
vm_vaddr_t vmx_pages_gva;
- struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1);
- if (!(entry->ecx & CPUID_VMX)) {
- fprintf(stderr, "nested VMX not enabled, skipping test\n");
- exit(KSFT_SKIP);
- }
+ nested_vmx_check_supported();
vm = vm_create_default(VCPU_ID, 0, (void *) l1_guest_code);
vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
fi
log_test $rc 0 "Prefix route with metric on link up"
+ # explicitly check for metric changes on edge scenarios
+ run_cmd "$IP addr flush dev dummy2"
+ run_cmd "$IP addr add dev dummy2 172.16.104.0/24 metric 259"
+ run_cmd "$IP addr change dev dummy2 172.16.104.0/24 metric 260"
+ rc=$?
+ if [ $rc -eq 0 ]; then
+ check_route "172.16.104.0/24 dev dummy2 proto kernel scope link src 172.16.104.0 metric 260"
+ rc=$?
+ fi
+ log_test $rc 0 "Modify metric of .0/24 address"
+
+ run_cmd "$IP addr flush dev dummy2"
+ run_cmd "$IP addr add dev dummy2 172.16.104.1/32 peer 172.16.104.2 metric 260"
+ run_cmd "$IP addr change dev dummy2 172.16.104.1/32 peer 172.16.104.2 metric 261"
+ rc=$?
+ if [ $rc -eq 0 ]; then
+ check_route "172.16.104.2 dev dummy2 proto kernel scope link src 172.16.104.1 metric 261"
+ rc=$?
+ fi
+ log_test $rc 0 "Modify metric of address with peer route"
+
$IP li del dummy1
$IP li del dummy2
cleanup
{
struct epoll_event ev;
int epfd, i, test_fd;
- uint16_t test_family;
+ int test_family;
socklen_t len;
epfd = epoll_create(1);
send_from_v4(proto);
test_fd = receive_once(epfd, proto);
+ len = sizeof(test_family);
if (getsockopt(test_fd, SOL_SOCKET, SO_DOMAIN, &test_family, &len))
error(1, errno, "failed to read socket domain");
if (test_family != AF_INET)
]
},
{
+ "id": "e38c",
+ "name": "Add simple ct action with cookie",
+ "category": [
+ "actions",
+ "ct"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action ct",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action ct index 42 cookie deadbeef",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action ct",
+ "matchPattern": "action order [0-9]*: ct zone 0 pipe.*index 42 ref.*cookie deadbeef",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action ct"
+ ]
+ },
+ {
"id": "9f20",
"name": "Add ct clear action",
"category": [
]
},
{
+ "id": "0bc1",
+ "name": "Add ct clear action with cookie of max length",
+ "category": [
+ "actions",
+ "ct"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action ct",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action ct clear index 42 cookie aa11bb22cc33dd44ee55ff66aa11b1b2",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action ct",
+ "matchPattern": "action order [0-9]*: ct clear pipe.*index 42 ref.*cookie aa11bb22cc33dd44ee55ff66aa11b1b2",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action ct"
+ ]
+ },
+ {
"id": "5bea",
"name": "Try ct with zone",
"category": [
]
},
{
+ "id": "2faa",
+ "name": "Try ct with mark + mask and cookie",
+ "category": [
+ "actions",
+ "ct"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action ct",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action ct mark 0x42/0xf0 index 42 cookie aa11bb22cc33dd44ee55ff66aa11b1b2",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action ct",
+ "matchPattern": "action order [0-9]*: ct mark 66/0xf0 zone 0 pipe.*index 42 ref.*cookie aa11bb22cc33dd44ee55ff66aa11b1b2",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action ct"
+ ]
+ },
+ {
"id": "3991",
"name": "Add simple ct action with no_percpu flag",
"category": [
#include <linux/kvm.h>
#include <linux/kvm_host.h>
#include <linux/perf_event.h>
+#include <linux/perf/arm_pmu.h>
#include <linux/uaccess.h>
#include <asm/kvm_emulate.h>
#include <kvm/arm_pmu.h>
if (kvm_pmu_pmc_is_chained(pmc) &&
kvm_pmu_idx_is_high_counter(select_idx))
counter = upper_32_bits(counter);
-
- else if (!kvm_pmu_idx_is_64bit(vcpu, select_idx))
+ else if (select_idx != ARMV8_PMU_CYCLE_IDX)
counter = lower_32_bits(counter);
return counter;
*/
static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc)
{
- u64 counter, reg;
+ u64 counter, reg, val;
pmc = kvm_pmu_get_canonical_pmc(pmc);
if (!pmc->perf_event)
counter = kvm_pmu_get_pair_counter_value(vcpu, pmc);
- if (kvm_pmu_pmc_is_chained(pmc)) {
- reg = PMEVCNTR0_EL0 + pmc->idx;
- __vcpu_sys_reg(vcpu, reg) = lower_32_bits(counter);
- __vcpu_sys_reg(vcpu, reg + 1) = upper_32_bits(counter);
+ if (pmc->idx == ARMV8_PMU_CYCLE_IDX) {
+ reg = PMCCNTR_EL0;
+ val = counter;
} else {
- reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX)
- ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + pmc->idx;
- __vcpu_sys_reg(vcpu, reg) = lower_32_bits(counter);
+ reg = PMEVCNTR0_EL0 + pmc->idx;
+ val = lower_32_bits(counter);
}
+ __vcpu_sys_reg(vcpu, reg) = val;
+
+ if (kvm_pmu_pmc_is_chained(pmc))
+ __vcpu_sys_reg(vcpu, reg + 1) = upper_32_bits(counter);
+
kvm_pmu_release_perf_event(pmc);
}
struct pt_regs *regs)
{
struct kvm_pmc *pmc = perf_event->overflow_handler_context;
+ struct arm_pmu *cpu_pmu = to_arm_pmu(perf_event->pmu);
struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
int idx = pmc->idx;
+ u64 period;
+
+ cpu_pmu->pmu.stop(perf_event, PERF_EF_UPDATE);
+
+ /*
+ * Reset the sample period to the architectural limit,
+ * i.e. the point where the counter overflows.
+ */
+ period = -(local64_read(&perf_event->count));
+
+ if (!kvm_pmu_idx_is_64bit(vcpu, pmc->idx))
+ period &= GENMASK(31, 0);
+
+ local64_set(&perf_event->hw.period_left, 0);
+ perf_event->attr.sample_period = period;
+ perf_event->hw.sample_period = period;
__vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(idx);
kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
kvm_vcpu_kick(vcpu);
}
+
+ cpu_pmu->pmu.start(perf_event, PERF_EF_RELOAD);
}
/**
* high counter.
*/
attr.sample_period = (-counter) & GENMASK(63, 0);
+ if (kvm_pmu_counter_is_enabled(vcpu, pmc->idx + 1))
+ attr.config1 |= PERF_ATTR_CFG1_KVM_PMU_CHAINED;
+
event = perf_event_create_kernel_counter(&attr, -1, current,
kvm_pmu_perf_overflow,
pmc + 1);
-
- if (kvm_pmu_counter_is_enabled(vcpu, pmc->idx + 1))
- attr.config1 |= PERF_ATTR_CFG1_KVM_PMU_CHAINED;
} else {
/* The initial sample period (overflow count) of an event. */
if (kvm_pmu_idx_is_64bit(vcpu, pmc->idx))
static struct kvm *kvm_create_vm(unsigned long type)
{
- int r, i;
struct kvm *kvm = kvm_arch_alloc_vm();
+ int r = -ENOMEM;
+ int i;
if (!kvm)
return ERR_PTR(-ENOMEM);
mutex_init(&kvm->lock);
mutex_init(&kvm->irq_lock);
mutex_init(&kvm->slots_lock);
- refcount_set(&kvm->users_count, 1);
INIT_LIST_HEAD(&kvm->devices);
- r = kvm_arch_init_vm(kvm, type);
- if (r)
- goto out_err_no_disable;
-
- r = hardware_enable_all();
- if (r)
- goto out_err_no_disable;
-
-#ifdef CONFIG_HAVE_KVM_IRQFD
- INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list);
-#endif
-
BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX);
- r = -ENOMEM;
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
struct kvm_memslots *slots = kvm_alloc_memslots();
+
if (!slots)
- goto out_err_no_srcu;
+ goto out_err_no_arch_destroy_vm;
/* Generations must be different for each address space. */
slots->generation = i;
rcu_assign_pointer(kvm->memslots[i], slots);
}
- if (init_srcu_struct(&kvm->srcu))
- goto out_err_no_srcu;
- if (init_srcu_struct(&kvm->irq_srcu))
- goto out_err_no_irq_srcu;
for (i = 0; i < KVM_NR_BUSES; i++) {
rcu_assign_pointer(kvm->buses[i],
kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL_ACCOUNT));
if (!kvm->buses[i])
- goto out_err;
+ goto out_err_no_arch_destroy_vm;
}
+ refcount_set(&kvm->users_count, 1);
+ r = kvm_arch_init_vm(kvm, type);
+ if (r)
+ goto out_err_no_arch_destroy_vm;
+
+ r = hardware_enable_all();
+ if (r)
+ goto out_err_no_disable;
+
+#ifdef CONFIG_HAVE_KVM_IRQFD
+ INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list);
+#endif
+
+ if (init_srcu_struct(&kvm->srcu))
+ goto out_err_no_srcu;
+ if (init_srcu_struct(&kvm->irq_srcu))
+ goto out_err_no_irq_srcu;
+
r = kvm_init_mmu_notifier(kvm);
if (r)
goto out_err;
out_err_no_srcu:
hardware_disable_all();
out_err_no_disable:
- refcount_set(&kvm->users_count, 0);
+ kvm_arch_destroy_vm(kvm);
+ WARN_ON_ONCE(!refcount_dec_and_test(&kvm->users_count));
+out_err_no_arch_destroy_vm:
for (i = 0; i < KVM_NR_BUSES; i++)
kfree(kvm_get_bus(kvm, i));
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
kvm_arch_vcpu_unblocking(vcpu);
block_ns = ktime_to_ns(cur) - ktime_to_ns(start);
- if (!vcpu_valid_wakeup(vcpu))
- shrink_halt_poll_ns(vcpu);
- else if (halt_poll_ns) {
- if (block_ns <= vcpu->halt_poll_ns)
- ;
- /* we had a long block, shrink polling */
- else if (vcpu->halt_poll_ns && block_ns > halt_poll_ns)
+ if (!kvm_arch_no_poll(vcpu)) {
+ if (!vcpu_valid_wakeup(vcpu)) {
shrink_halt_poll_ns(vcpu);
- /* we had a short halt and our poll time is too small */
- else if (vcpu->halt_poll_ns < halt_poll_ns &&
- block_ns < halt_poll_ns)
- grow_halt_poll_ns(vcpu);
- } else
- vcpu->halt_poll_ns = 0;
+ } else if (halt_poll_ns) {
+ if (block_ns <= vcpu->halt_poll_ns)
+ ;
+ /* we had a long block, shrink polling */
+ else if (vcpu->halt_poll_ns && block_ns > halt_poll_ns)
+ shrink_halt_poll_ns(vcpu);
+ /* we had a short halt and our poll time is too small */
+ else if (vcpu->halt_poll_ns < halt_poll_ns &&
+ block_ns < halt_poll_ns)
+ grow_halt_poll_ns(vcpu);
+ } else {
+ vcpu->halt_poll_ns = 0;
+ }
+ }
trace_kvm_vcpu_wakeup(block_ns, waited, vcpu_valid_wakeup(vcpu));
kvm_arch_vcpu_block_finish(vcpu);