# RPM spec file (make rpm-pkg)
#
/*.spec
+/rpmbuild/
#
# Debian directory (make deb-pkg)
Alexander Mikhalitsyn <alexander@mihalicyn.com> <alexander.mikhalitsyn@virtuozzo.com>
Alexander Mikhalitsyn <alexander@mihalicyn.com> <aleksandr.mikhalitsyn@canonical.com>
Alexandre Belloni <alexandre.belloni@bootlin.com> <alexandre.belloni@free-electrons.com>
+Alexandre Ghiti <alex@ghiti.fr> <alexandre.ghiti@canonical.com>
Alexei Starovoitov <ast@kernel.org> <alexei.starovoitov@gmail.com>
Alexei Starovoitov <ast@kernel.org> <ast@fb.com>
Alexei Starovoitov <ast@kernel.org> <ast@plumgrid.com>
Dengcheng Zhu <dzhu@wavecomp.com> <dengcheng.zhu@imgtec.com>
Dengcheng Zhu <dzhu@wavecomp.com> <dengcheng.zhu@mips.com>
<dev.kurt@vandijck-laurijssen.be> <kurt.van.dijck@eia.be>
-Dikshita Agarwal <dikshita@qti.qualcomm.com> <dikshita@codeaurora.org>
+Dikshita Agarwal <quic_dikshita@quicinc.com> <dikshita@codeaurora.org>
Dmitry Baryshkov <dbaryshkov@gmail.com>
Dmitry Baryshkov <dbaryshkov@gmail.com> <[dbaryshkov@gmail.com]>
Dmitry Baryshkov <dbaryshkov@gmail.com> <dmitry_baryshkov@mentor.com>
Domen Puncer <domen@coderock.org>
Douglas Gilbert <dougg@torque.net>
Ed L. Cashin <ecashin@coraid.com>
+Enric Balletbo i Serra <eballetbo@kernel.org> <enric.balletbo@collabora.com>
+Enric Balletbo i Serra <eballetbo@kernel.org> <eballetbo@iseebcn.com>
Erik Kaneda <erik.kaneda@intel.com> <erik.schmauss@intel.com>
Eugen Hristev <eugen.hristev@collabora.com> <eugen.hristev@microchip.com>
Evgeniy Polyakov <johnpol@2ka.mipt.ru>
Jan Glauber <jan.glauber@gmail.com> <jglauber@cavium.com>
Jarkko Sakkinen <jarkko@kernel.org> <jarkko.sakkinen@linux.intel.com>
Jarkko Sakkinen <jarkko@kernel.org> <jarkko@profian.com>
+Jarkko Sakkinen <jarkko@kernel.org> <jarkko.sakkinen@tuni.fi>
Jason Gunthorpe <jgg@ziepe.ca> <jgg@mellanox.com>
Jason Gunthorpe <jgg@ziepe.ca> <jgg@nvidia.com>
Jason Gunthorpe <jgg@ziepe.ca> <jgunthorpe@obsidianresearch.com>
Jens Osterkamp <Jens.Osterkamp@de.ibm.com>
Jernej Skrabec <jernej.skrabec@gmail.com> <jernej.skrabec@siol.net>
Jessica Zhang <quic_jesszhan@quicinc.com> <jesszhan@codeaurora.org>
+Jiri Pirko <jiri@resnulli.us> <jiri@nvidia.com>
+Jiri Pirko <jiri@resnulli.us> <jiri@mellanox.com>
+Jiri Pirko <jiri@resnulli.us> <jpirko@redhat.com>
Jiri Slaby <jirislaby@kernel.org> <jirislaby@gmail.com>
Jiri Slaby <jirislaby@kernel.org> <jslaby@novell.com>
Jiri Slaby <jirislaby@kernel.org> <jslaby@suse.com>
Quentin Perret <qperret@qperret.net> <quentin.perret@arm.com>
Rafael J. Wysocki <rjw@rjwysocki.net> <rjw@sisk.pl>
Rajeev Nandan <quic_rajeevny@quicinc.com> <rajeevny@codeaurora.org>
+Rajendra Nayak <quic_rjendra@quicinc.com> <rnayak@codeaurora.org>
Rajesh Shah <rajesh.shah@intel.com>
Ralf Baechle <ralf@linux-mips.org>
Ralf Wildenhues <Ralf.Wildenhues@gmx.de>
Ricardo Ribalda <ribalda@kernel.org> <ricardo@ribalda.com>
Ricardo Ribalda <ribalda@kernel.org> Ricardo Ribalda Delgado <ribalda@kernel.org>
Ricardo Ribalda <ribalda@kernel.org> <ricardo.ribalda@gmail.com>
+Richard Leitner <richard.leitner@linux.dev> <dev@g0hl1n.net>
+Richard Leitner <richard.leitner@linux.dev> <me@g0hl1n.net>
+Richard Leitner <richard.leitner@linux.dev> <richard.leitner@skidata.com>
Robert Foss <rfoss@kernel.org> <robert.foss@linaro.org>
Roman Gushchin <roman.gushchin@linux.dev> <guro@fb.com>
Roman Gushchin <roman.gushchin@linux.dev> <guroan@gmail.com>
Rudolf Marek <R.Marek@sh.cvut.cz>
Rui Saraiva <rmps@joel.ist.utl.pt>
Sachin P Sant <ssant@in.ibm.com>
+Sai Prakash Ranjan <quic_saipraka@quicinc.com> <saiprakash.ranjan@codeaurora.org>
Sakari Ailus <sakari.ailus@linux.intel.com> <sakari.ailus@iki.fi>
Sam Ravnborg <sam@mars.ravnborg.org>
Sankeerth Billakanti <quic_sbillaka@quicinc.com> <sbillaka@codeaurora.org>
Thomas Körper <socketcan@esd.eu> <thomas.koerper@esd.eu>
Thomas Pedersen <twp@codeaurora.org>
Tiezhu Yang <yangtiezhu@loongson.cn> <kernelpatch@126.com>
+Tobias Klauser <tklauser@distanz.ch> <tobias.klauser@gmail.com>
+Tobias Klauser <tklauser@distanz.ch> <klto@zhaw.ch>
+Tobias Klauser <tklauser@distanz.ch> <tklauser@nuerscht.ch>
+Tobias Klauser <tklauser@distanz.ch> <tklauser@xenon.tklauser.home>
Todor Tomov <todor.too@gmail.com> <todor.tomov@linaro.org>
Tony Luck <tony.luck@intel.com>
TripleX Chung <xxx.phy@gmail.com> <triplex@zh-kernel.org>
VFIO User API
-------------------------------------------------------------------------------
-Please see include/linux/vfio.h for complete API documentation.
+Please see include/uapi/linux/vfio.h for complete API documentation.
VFIO bus driver API
-------------------------------------------------------------------------------
return
-ECHILD and it will be called again in ref-walk mode.
-``_weak_revalidate``
+``d_weak_revalidate``
called when the VFS needs to revalidate a "jumped" dentry. This
is called when a path-walk ends at dentry that was not acquired
by doing a lookup in the parent directory. This includes "/",
platform devices.
- Devices behind real busses where there is a connector resource
- are represented as struct spi_device or struct i2c_device. Note
+ are represented as struct spi_device or struct i2c_client. Note
that standard UARTs are not busses so there is no struct uart_device,
although some of them may be represented by struct serdev_device.
emptied entirely into the mainline during the merge window, you can pull it
forward with a command like::
- git merge v5.2-rc1^0
-
-The "^0" will cause Git to do a fast-forward merge (which should be
-possible in this situation), thus avoiding the addition of a spurious merge
-commit.
+ git merge --ff-only v5.2-rc1
The guidelines laid out above are just that: guidelines. There will always
be situations that call out for a different solution, and these guidelines
Overview
========
-Huge pages as described at Documentation/mm/hugetlbpage.rst are typically
-preallocated for application use. These huge pages are instantiated in a
-task's address space at page fault time if the VMA indicates huge pages are
-to be used. If no huge page exists at page fault time, the task is sent
+Huge pages as described at Documentation/admin-guide/mm/hugetlbpage.rst are
+typically preallocated for application use. These huge pages are instantiated
+in a task's address space at page fault time if the VMA indicates huge pages
+are to be used. If no huge page exists at page fault time, the task is sent
a SIGBUS and often dies an unhappy death. Shortly after huge page support
was added, it was determined that it would be better to detect a shortage
of huge pages at mmap() time. The idea is that if there were not enough
also populated on boot using one of ``kernelcore``, ``movablecore`` and
``movable_node`` kernel command line parameters. See
Documentation/mm/page_migration.rst and
- Documentation/admin-guide/mm/memory_hotplug.rst for additional details.
+ Documentation/admin-guide/mm/memory-hotplug.rst for additional details.
* ``ZONE_DEVICE`` represents memory residing on devices such as PMEM and GPU.
It has different characteristics than RAM zone types and it exists to provide
-# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
%YAML 1.2
---
$id: http://kernel.org/schemas/netlink/genetlink-c.yaml#
-# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
%YAML 1.2
---
$id: http://kernel.org/schemas/netlink/genetlink-legacy.yaml#
-# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
%YAML 1.2
---
$id: http://kernel.org/schemas/netlink/genetlink-legacy.yaml#
-# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
name: ethtool
-# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
name: fou
-# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
name: netdev
-
type: flags
name: xdp-act
+ render-max: true
entries:
-
name: basic
An XDP program can use these kfuncs to read the metadata into stack
variables for its own consumption. Or, to pass the metadata on to other
consumers, an XDP program can store it into the metadata area carried
-ahead of the packet.
+ahead of the packet. Not all packets will necessary have the requested
+metadata available in which case the driver returns ``-ENODATA``.
Not all kfuncs have to be implemented by the device driver; when not
-implemented, the default ones that return ``-EOPNOTSUPP`` will be used.
+implemented, the default ones that return ``-EOPNOTSUPP`` will be used
+to indicate the device driver have not implemented this kfunc.
+
Within an XDP frame, the metadata layout (accessed via ``xdp_buff``) is
as follows::
This dialect contains many extensions to the language [gnu-extensions]_,
and many of them are used within the kernel as a matter of course.
-There is some support for compiling the kernel with ``icc`` [icc]_ for several
-of the architectures, although at the time of writing it is not completed,
-requiring third-party patches.
-
Attributes
----------
Please refer to ``include/linux/compiler_attributes.h`` for more information.
+Rust
+----
+
+The kernel has experimental support for the Rust programming language
+[rust-language]_ under ``CONFIG_RUST``. It is compiled with ``rustc`` [rustc]_
+under ``--edition=2021`` [rust-editions]_. Editions are a way to introduce
+small changes to the language that are not backwards compatible.
+
+On top of that, some unstable features [rust-unstable-features]_ are used in
+the kernel. Unstable features may change in the future, thus it is an important
+goal to reach a point where only stable features are used.
+
+Please refer to Documentation/rust/index.rst for more information.
+
.. [c-language] http://www.open-std.org/jtc1/sc22/wg14/www/standards
.. [gcc] https://gcc.gnu.org
.. [clang] https://clang.llvm.org
-.. [icc] https://software.intel.com/en-us/c-compilers
.. [gcc-c-dialect-options] https://gcc.gnu.org/onlinedocs/gcc/C-Dialect-Options.html
.. [gnu-extensions] https://gcc.gnu.org/onlinedocs/gcc/C-Extensions.html
.. [gcc-attribute-syntax] https://gcc.gnu.org/onlinedocs/gcc/Attribute-Syntax.html
.. [n2049] http://www.open-std.org/jtc1/sc22/wg14/www/docs/n2049.pdf
-
+.. [rust-language] https://www.rust-lang.org
+.. [rustc] https://doc.rust-lang.org/rustc/
+.. [rust-editions] https://doc.rust-lang.org/edition-guide/editions/
+.. [rust-unstable-features] https://github.com/Rust-for-Linux/linux/issues/2
reviewers sometimes get grumpy. Even in that case, though, respond
politely and address the problems they have pointed out. When sending a next
version, add a ``patch changelog`` to the cover letter or to individual patches
-explaining difference aganst previous submission (see
+explaining difference against previous submission (see
:ref:`the_canonical_patch_format`).
See Documentation/process/email-clients.rst for recommendations on email
needs to be handed to it. Architectures must define arch_scale_cpu_capacity()
for that purpose.
-The arm and arm64 architectures directly map this to the arch_topology driver
+The arm, arm64, and RISC-V architectures directly map this to the arch_topology driver
CPU scaling data, which is derived from the capacity-dmips-mhz CPU binding; see
Documentation/devicetree/bindings/cpu/cpu-capacity.txt.
概述
====
-Documentation/mm/hugetlbpage.rst 中描述的巨页通常是预先分配给应用程序使用的。如果VMA指
+Documentation/admin-guide/mm/hugetlbpage.rst
+中描述的巨页通常是预先分配给应用程序使用的 。如果VMA指
示要使用巨页,这些巨页会在缺页异常时被实例化到任务的地址空间。如果在缺页异常
时没有巨页存在,任务就会被发送一个SIGBUS,并经常不高兴地死去。在加入巨页支
持后不久,人们决定,在mmap()时检测巨页的短缺情况会更好。这个想法是,如果
当前,Linux无法凭自身算出CPU算力,因此必须要有把这个信息传递给Linux的方式。每个架构必须为此
定义arch_scale_cpu_capacity()函数。
-arm和arm64架构直接把这个信息映射到arch_topology驱动的CPU scaling数据中(译注:参考
+arm、arm64和RISC-V架构直接把这个信息映射到arch_topology驱动的CPU scaling数据中(译注:参考
arch_topology.h的percpu变量cpu_scale),它是从capacity-dmips-mhz CPU binding中衍生计算
出来的。参见Documentation/devicetree/bindings/cpu/cpu-capacity.txt。
--- /dev/null
+=======================
+Linux UVC Gadget Driver
+=======================
+
+Overview
+--------
+The UVC Gadget driver is a driver for hardware on the *device* side of a USB
+connection. It is intended to run on a Linux system that has USB device-side
+hardware such as boards with an OTG port.
+
+On the device system, once the driver is bound it appears as a V4L2 device with
+the output capability.
+
+On the host side (once connected via USB cable), a device running the UVC Gadget
+driver *and controlled by an appropriate userspace program* should appear as a UVC
+specification compliant camera, and function appropriately with any program
+designed to handle them. The userspace program running on the device system can
+queue image buffers from a variety of sources to be transmitted via the USB
+connection. Typically this would mean forwarding the buffers from a camera sensor
+peripheral, but the source of the buffer is entirely dependent on the userspace
+companion program.
+
+Configuring the device kernel
+-----------------------------
+The Kconfig options USB_CONFIGFS, USB_LIBCOMPOSITE, USB_CONFIGFS_F_UVC and
+USB_F_UVC must be selected to enable support for the UVC gadget.
+
+Configuring the gadget through configfs
+---------------------------------------
+The UVC Gadget expects to be configured through configfs using the UVC function.
+This allows a significant degree of flexibility, as many of a UVC device's
+settings can be controlled this way.
+
+Not all of the available attributes are described here. For a complete enumeration
+see Documentation/ABI/testing/configfs-usb-gadget-uvc
+
+Assumptions
+~~~~~~~~~~~
+This section assumes that you have mounted configfs at `/sys/kernel/config` and
+created a gadget as `/sys/kernel/config/usb_gadget/g1`.
+
+The UVC Function
+~~~~~~~~~~~~~~~~
+
+The first step is to create the UVC function:
+
+.. code-block:: bash
+
+ # These variables will be assumed throughout the rest of the document
+ CONFIGFS="/sys/kernel/config"
+ GADGET="$CONFIGFS/usb_gadget/g1"
+ FUNCTION="$GADGET/functions/uvc.0"
+
+ mkdir -p $FUNCTION
+
+Formats and Frames
+~~~~~~~~~~~~~~~~~~
+
+You must configure the gadget by telling it which formats you support, as well
+as the frame sizes and frame intervals that are supported for each format. In
+the current implementation there is no way for the gadget to refuse to set a
+format that the host instructs it to set, so it is important that this step is
+completed *accurately* to ensure that the host never asks for a format that
+can't be provided.
+
+Formats are created under the streaming/uncompressed and streaming/mjpeg configfs
+groups, with the framesizes created under the formats in the following
+structure:
+
+::
+
+ uvc.0 +
+ |
+ + streaming +
+ |
+ + mjpeg +
+ | |
+ | + mjpeg +
+ | |
+ | + 720p
+ | |
+ | + 1080p
+ |
+ + uncompressed +
+ |
+ + yuyv +
+ |
+ + 720p
+ |
+ + 1080p
+
+Each frame can then be configured with a width and height, plus the maximum
+buffer size required to store a single frame, and finally with the supported
+frame intervals for that format and framesize. Width and height are enumerated in
+units of pixels, frame interval in units of 100ns. To create the structure
+above with 2, 15 and 100 fps frameintervals for each framesize for example you
+might do:
+
+.. code-block:: bash
+
+ create_frame() {
+ # Example usage:
+ # create_frame <width> <height> <group> <format name>
+
+ WIDTH=$1
+ HEIGHT=$2
+ FORMAT=$3
+ NAME=$4
+
+ wdir=$FUNCTION/streaming/$FORMAT/$NAME/${HEIGHT}p
+
+ mkdir -p $wdir
+ echo $WIDTH > $wdir/wWidth
+ echo $HEIGHT > $wdir/wHeight
+ echo $(( $WIDTH * $HEIGHT * 2 )) > $wdir/dwMaxVideoFrameBufferSize
+ cat <<EOF > $wdir/dwFrameInterval
+ 666666
+ 100000
+ 5000000
+ EOF
+ }
+
+ create_frame 1280 720 mjpeg mjpeg
+ create_frame 1920 1080 mjpeg mjpeg
+ create_frame 1280 720 uncompressed yuyv
+ create_frame 1920 1080 uncompressed yuyv
+
+The only uncompressed format currently supported is YUYV, which is detailed at
+Documentation/userspace-api/media/v4l/pixfmt-packed.yuv.rst.
+
+Color Matching Descriptors
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+It's possible to specify some colometry information for each format you create.
+This step is optional, and default information will be included if this step is
+skipped; those default values follow those defined in the Color Matching Descriptor
+section of the UVC specification.
+
+To create a Color Matching Descriptor, create a configfs item and set its three
+attributes to your desired settings and then link to it from the format you wish
+it to be associated with:
+
+.. code-block:: bash
+
+ # Create a new Color Matching Descriptor
+
+ mkdir $FUNCTION/streaming/color_matching/yuyv
+ pushd $FUNCTION/streaming/color_matching/yuyv
+
+ echo 1 > bColorPrimaries
+ echo 1 > bTransferCharacteristics
+ echo 4 > bMatrixCoefficients
+
+ popd
+
+ # Create a symlink to the Color Matching Descriptor from the format's config item
+ ln -s $FUNCTION/streaming/color_matching/yuyv $FUNCTION/streaming/uncompressed/yuyv
+
+For details about the valid values, consult the UVC specification. Note that a
+default color matching descriptor exists and is used by any format which does
+not have a link to a different Color Matching Descriptor. It's possible to
+change the attribute settings for the default descriptor, so bear in mind that if
+you do that you are altering the defaults for any format that does not link to
+a different one.
+
+
+Header linking
+~~~~~~~~~~~~~~
+
+The UVC specification requires that Format and Frame descriptors be preceded by
+Headers detailing things such as the number and cumulative size of the different
+Format descriptors that follow. This and similar operations are acheived in
+configfs by linking between the configfs item representing the header and the
+config items representing those other descriptors, in this manner:
+
+.. code-block:: bash
+
+ mkdir $FUNCTION/streaming/header/h
+
+ # This section links the format descriptors and their associated frames
+ # to the header
+ cd $FUNCTION/streaming/header/h
+ ln -s ../../uncompressed/yuyv
+ ln -s ../../mjpeg/mjpeg
+
+ # This section ensures that the header will be transmitted for each
+ # speed's set of descriptors. If support for a particular speed is not
+ # needed then it can be skipped here.
+ cd ../../class/fs
+ ln -s ../../header/h
+ cd ../../class/hs
+ ln -s ../../header/h
+ cd ../../class/ss
+ ln -s ../../header/h
+ cd ../../../control
+ mkdir header/h
+ ln -s header/h class/fs
+ ln -s header/h class/ss
+
+
+Extension Unit Support
+~~~~~~~~~~~~~~~~~~~~~~
+
+A UVC Extension Unit (XU) basically provides a distinct unit to which control set
+and get requests can be addressed. The meaning of those control requests is
+entirely implementation dependent, but may be used to control settings outside
+of the UVC specification (for example enabling or disabling video effects). An
+XU can be inserted into the UVC unit chain or left free-hanging.
+
+Configuring an extension unit involves creating an entry in the appropriate
+directory and setting its attributes appropriately, like so:
+
+.. code-block:: bash
+
+ mkdir $FUNCTION/control/extensions/xu.0
+ pushd $FUNCTION/control/extensions/xu.0
+
+ # Set the bUnitID of the Processing Unit as the source for this
+ # Extension Unit
+ echo 2 > baSourceID
+
+ # Set this XU as the source of the default output terminal. This inserts
+ # the XU into the UVC chain between the PU and OT such that the final
+ # chain is IT > PU > XU.0 > OT
+ cat bUnitID > ../../terminal/output/default/baSourceID
+
+ # Flag some controls as being available for use. The bmControl field is
+ # a bitmap with each bit denoting the availability of a particular
+ # control. For example to flag the 0th, 2nd and 3rd controls available:
+ echo 0x0d > bmControls
+
+ # Set the GUID; this is a vendor-specific code identifying the XU.
+ echo -e -n "\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10" > guidExtensionCode
+
+ popd
+
+The bmControls attribute and the baSourceID attribute are multi-value attributes.
+This means that you may write multiple newline separated values to them. For
+example to flag the 1st, 2nd, 9th and 10th controls as being available you would
+need to write two values to bmControls, like so:
+
+.. code-block:: bash
+
+ cat << EOF > bmControls
+ 0x03
+ 0x03
+ EOF
+
+The multi-value nature of the baSourceID attribute belies the fact that XUs can
+be multiple-input, though note that this currently has no significant effect.
+
+The bControlSize attribute reflects the size of the bmControls attribute, and
+similarly bNrInPins reflects the size of the baSourceID attributes. Both
+attributes are automatically increased / decreased as you set bmControls and
+baSourceID. It is also possible to manually increase or decrease bControlSize
+which has the effect of truncating entries to the new size, or padding entries
+out with 0x00, for example:
+
+::
+
+ $ cat bmControls
+ 0x03
+ 0x05
+
+ $ cat bControlSize
+ 2
+
+ $ echo 1 > bControlSize
+ $ cat bmControls
+ 0x03
+
+ $ echo 2 > bControlSize
+ $ cat bmControls
+ 0x03
+ 0x00
+
+bNrInPins and baSourceID function in the same way.
+
+Custom Strings Support
+~~~~~~~~~~~~~~~~~~~~~~
+
+String descriptors that provide a textual description for various parts of a
+USB device can be defined in the usual place within USB configfs, and may then
+be linked to from the UVC function root or from Extension Unit directories to
+assign those strings as descriptors:
+
+.. code-block:: bash
+
+ # Create a string descriptor in us-EN and link to it from the function
+ # root. The name of the link is significant here, as it declares this
+ # descriptor to be intended for the Interface Association Descriptor.
+ # Other significant link names at function root are vs0_desc and vs1_desc
+ # For the VideoStreaming Interface 0/1 Descriptors.
+
+ mkdir -p $GADGET/strings/0x409/iad_desc
+ echo -n "Interface Associaton Descriptor" > $GADGET/strings/0x409/iad_desc/s
+ ln -s $GADGET/strings/0x409/iad_desc $FUNCTION/iad_desc
+
+ # Because the link to a String Descriptor from an Extension Unit clearly
+ # associates the two, the name of this link is not significant and may
+ # be set freely.
+
+ mkdir -p $GADGET/strings/0x409/xu.0
+ echo -n "A Very Useful Extension Unit" > $GADGET/strings/0x409/xu.0/s
+ ln -s $GADGET/strings/0x409/xu.0 $FUNCTION/control/extensions/xu.0
+
+The interrupt endpoint
+~~~~~~~~~~~~~~~~~~~~~~
+
+The VideoControl interface has an optional interrupt endpoint which is by default
+disabled. This is intended to support delayed response control set requests for
+UVC (which should respond through the interrupt endpoint rather than tying up
+endpoint 0). At present support for sending data through this endpoint is missing
+and so it is left disabled to avoid confusion. If you wish to enable it you can
+do so through the configfs attribute:
+
+.. code-block:: bash
+
+ echo 1 > $FUNCTION/control/enable_interrupt_ep
+
+Bandwidth configuration
+~~~~~~~~~~~~~~~~~~~~~~~
+
+There are three attributes which control the bandwidth of the USB connection.
+These live in the function root and can be set within limits:
+
+.. code-block:: bash
+
+ # streaming_interval sets bInterval. Values range from 1..255
+ echo 1 > $FUNCTION/streaming_interval
+
+ # streaming_maxpacket sets wMaxPacketSize. Valid values are 1024/2048/3072
+ echo 3072 > $FUNCTION/streaming_maxpacket
+
+ # streaming_maxburst sets bMaxBurst. Valid values are 1..15
+ echo 1 > $FUNCTION/streaming_maxburst
+
+
+The values passed here will be clamped to valid values according to the UVC
+specification (which depend on the speed of the USB connection). To understand
+how the settings influence bandwidth you should consult the UVC specifications,
+but a rule of thumb is that increasing the streaming_maxpacket setting will
+improve bandwidth (and thus the maximum possible framerate), whilst the same is
+true for streaming_maxburst provided the USB connection is running at SuperSpeed.
+Increasing streaming_interval will reduce bandwidth and framerate.
+
+The userspace application
+-------------------------
+By itself, the UVC Gadget driver cannot do anything particularly interesting. It
+must be paired with a userspace program that responds to UVC control requests and
+fills buffers to be queued to the V4L2 device that the driver creates. How those
+things are achieved is implementation dependent and beyond the scope of this
+document, but a reference application can be found at https://gitlab.freedesktop.org/camera/uvc-gadget
gadget_multi
gadget_printer
gadget_serial
+ gadget_uvc
gadget-testing
iuu_phoenix
mass-storage
This document describes details of the schema.
See :doc:`intro-specs` for a practical starting guide.
-All specs must be licensed under ``GPL-2.0-only OR BSD-3-Clause``
+All specs must be licensed under
+``((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)``
to allow for easy adoption in user space code.
Compatibility levels
The "pad" and "reserved" fields may be used for future extensions and should be
set to 0s by userspace.
+4.138 KVM_ARM_SET_COUNTER_OFFSET
+--------------------------------
+
+:Capability: KVM_CAP_COUNTER_OFFSET
+:Architectures: arm64
+:Type: vm ioctl
+:Parameters: struct kvm_arm_counter_offset (in)
+:Returns: 0 on success, < 0 on error
+
+This capability indicates that userspace is able to apply a single VM-wide
+offset to both the virtual and physical counters as viewed by the guest
+using the KVM_ARM_SET_CNT_OFFSET ioctl and the following data structure:
+
+::
+
+ struct kvm_arm_counter_offset {
+ __u64 counter_offset;
+ __u64 reserved;
+ };
+
+The offset describes a number of counter cycles that are subtracted from
+both virtual and physical counter views (similar to the effects of the
+CNTVOFF_EL2 and CNTPOFF_EL2 system registers, but only global). The offset
+always applies to all vcpus (already created or created after this ioctl)
+for this VM.
+
+It is userspace's responsibility to compute the offset based, for example,
+on previous values of the guest counters.
+
+Any value other than 0 for the "reserved" field may result in an error
+(-EINVAL) being returned. This ioctl can also return -EBUSY if any vcpu
+ioctl is issued concurrently.
+
+Note that using this ioctl results in KVM ignoring subsequent userspace
+writes to the CNTVCT_EL0 and CNTPCT_EL0 registers using the SET_ONE_REG
+interface. No error will be returned, but the resulting offset will not be
+applied.
+
5. The kvm_run structure
========================
__u64 nr;
__u64 args[6];
__u64 ret;
- __u32 longmode;
- __u32 pad;
+ __u64 flags;
} hypercall;
-Unused. This was once used for 'hypercall to userspace'. To implement
-such functionality, use KVM_EXIT_IO (x86) or KVM_EXIT_MMIO (all except s390).
+
+It is strongly recommended that userspace use ``KVM_EXIT_IO`` (x86) or
+``KVM_EXIT_MMIO`` (all except s390) to implement functionality that
+requires a guest to interact with host userpace.
.. note:: KVM_EXIT_IO is significantly faster than KVM_EXIT_MMIO.
+For arm64:
+----------
+
+SMCCC exits can be enabled depending on the configuration of the SMCCC
+filter. See the Documentation/virt/kvm/devices/vm.rst
+``KVM_ARM_SMCCC_FILTER`` for more details.
+
+``nr`` contains the function ID of the guest's SMCCC call. Userspace is
+expected to use the ``KVM_GET_ONE_REG`` ioctl to retrieve the call
+parameters from the vCPU's GPRs.
+
+Definition of ``flags``:
+ - ``KVM_HYPERCALL_EXIT_SMC``: Indicates that the guest used the SMC
+ conduit to initiate the SMCCC call. If this bit is 0 then the guest
+ used the HVC conduit for the SMCCC call.
+
+ - ``KVM_HYPERCALL_EXIT_16BIT``: Indicates that the guest used a 16bit
+ instruction to initiate the SMCCC call. If this bit is 0 then the
+ guest used a 32bit instruction. An AArch64 guest always has this
+ bit set to 0.
+
+At the point of exit, PC points to the instruction immediately following
+the trapping instruction.
+
::
/* KVM_EXIT_TPR_ACCESS */
will clear DR6.RTM.
7.18 KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2
+--------------------------------------
:Architectures: x86, arm64, mips
:Parameters: args[0] whether feature should be enabled or not
if it is enabled
:Returns: -EFAULT if the given address is not accessible from kernel space;
0 in case of success.
+
+6. GROUP: KVM_ARM_VM_SMCCC_CTRL
+===============================
+
+:Architectures: arm64
+
+6.1. ATTRIBUTE: KVM_ARM_VM_SMCCC_FILTER (w/o)
+---------------------------------------------
+
+:Parameters: Pointer to a ``struct kvm_smccc_filter``
+
+:Returns:
+
+ ====== ===========================================
+ EEXIST Range intersects with a previously inserted
+ or reserved range
+ EBUSY A vCPU in the VM has already run
+ EINVAL Invalid filter configuration
+ ENOMEM Failed to allocate memory for the in-kernel
+ representation of the SMCCC filter
+ ====== ===========================================
+
+Requests the installation of an SMCCC call filter described as follows::
+
+ enum kvm_smccc_filter_action {
+ KVM_SMCCC_FILTER_HANDLE = 0,
+ KVM_SMCCC_FILTER_DENY,
+ KVM_SMCCC_FILTER_FWD_TO_USER,
+ };
+
+ struct kvm_smccc_filter {
+ __u32 base;
+ __u32 nr_functions;
+ __u8 action;
+ __u8 pad[15];
+ };
+
+The filter is defined as a set of non-overlapping ranges. Each
+range defines an action to be applied to SMCCC calls within the range.
+Userspace can insert multiple ranges into the filter by using
+successive calls to this attribute.
+
+The default configuration of KVM is such that all implemented SMCCC
+calls are allowed. Thus, the SMCCC filter can be defined sparsely
+by userspace, only describing ranges that modify the default behavior.
+
+The range expressed by ``struct kvm_smccc_filter`` is
+[``base``, ``base + nr_functions``). The range is not allowed to wrap,
+i.e. userspace cannot rely on ``base + nr_functions`` overflowing.
+
+The SMCCC filter applies to both SMC and HVC calls initiated by the
+guest. The SMCCC filter gates the in-kernel emulation of SMCCC calls
+and as such takes effect before other interfaces that interact with
+SMCCC calls (e.g. hypercall bitmap registers).
+
+Actions:
+
+ - ``KVM_SMCCC_FILTER_HANDLE``: Allows the guest SMCCC call to be
+ handled in-kernel. It is strongly recommended that userspace *not*
+ explicitly describe the allowed SMCCC call ranges.
+
+ - ``KVM_SMCCC_FILTER_DENY``: Rejects the guest SMCCC call in-kernel
+ and returns to the guest.
+
+ - ``KVM_SMCCC_FILTER_FWD_TO_USER``: The guest SMCCC call is forwarded
+ to userspace with an exit reason of ``KVM_EXIT_HYPERCALL``.
+
+The ``pad`` field is reserved for future use and must be zero. KVM may
+return ``-EINVAL`` if the field is nonzero.
+
+KVM reserves the 'Arm Architecture Calls' range of function IDs and
+will reject attempts to define a filter for any portion of these ranges:
+
+ =========== ===============
+ Start End (inclusive)
+ =========== ===============
+ 0x8000_0000 0x8000_FFFF
+ 0xC000_0000 0xC000_FFFF
+ =========== ===============
- kvm->mn_active_invalidate_count ensures that pairs of
invalidate_range_start() and invalidate_range_end() callbacks
use the same memslots array. kvm->slots_lock and kvm->slots_arch_lock
- are taken on the waiting side in install_new_memslots, so MMU notifiers
+ are taken on the waiting side when modifying memslots, so MMU notifiers
must not take either kvm->slots_lock or kvm->slots_arch_lock.
For SRCU:
F: include/uapi/linux/dm-*.h
DEVLINK
-M: Jiri Pirko <jiri@nvidia.com>
+M: Jiri Pirko <jiri@resnulli.us>
L: netdev@vger.kernel.org
S: Supported
F: Documentation/networking/devlink
L: linux-nvme@lists.infradead.org
S: Supported
W: http://git.infradead.org/nvme.git
-T: git://git.infradead.org/nvme.git
+T: git git://git.infradead.org/nvme.git
F: Documentation/nvme/
-F: drivers/nvme/host/
F: drivers/nvme/common/
-F: include/linux/nvme.h
+F: drivers/nvme/host/
F: include/linux/nvme-*.h
+F: include/linux/nvme.h
F: include/uapi/linux/nvme_ioctl.h
NVM EXPRESS FABRICS AUTHENTICATION
L: linux-nvme@lists.infradead.org
S: Supported
W: http://git.infradead.org/nvme.git
-T: git://git.infradead.org/nvme.git
+T: git git://git.infradead.org/nvme.git
F: drivers/nvme/target/
NVMEM FRAMEWORK
F: drivers/hwmon/nzxt-smart2.c
OBJAGG
-M: Jiri Pirko <jiri@nvidia.com>
+M: Jiri Pirko <jiri@resnulli.us>
L: netdev@vger.kernel.org
S: Supported
F: include/linux/objagg.h
F: include/linux/hp_sdc.h
PARMAN
-M: Jiri Pirko <jiri@nvidia.com>
+M: Jiri Pirko <jiri@resnulli.us>
L: netdev@vger.kernel.org
S: Supported
F: include/linux/parman.h
F: Documentation/devicetree/bindings/usb/microchip,mpfs-musb.yaml
F: arch/riscv/boot/dts/microchip/
F: drivers/char/hw_random/mpfs-rng.c
-F: drivers/clk/microchip/clk-mpfs.c
+F: drivers/clk/microchip/clk-mpfs*.c
F: drivers/i2c/busses/i2c-microchip-corei2c.c
F: drivers/mailbox/mailbox-mpfs.c
F: drivers/pci/controller/pcie-microchip-host.c
F: drivers/net/ethernet/sis/sis900.*
SIS FRAMEBUFFER DRIVER
-M: Thomas Winischhofer <thomas@winischhofer.net>
-S: Maintained
-W: http://www.winischhofer.net/linuxsisvga.shtml
+S: Orphan
F: Documentation/fb/sisfb.rst
F: drivers/video/fbdev/sis/
F: include/video/sisfb.h
M: Valentina Manea <valentina.manea.m@gmail.com>
M: Shuah Khan <shuah@kernel.org>
M: Shuah Khan <skhan@linuxfoundation.org>
+R: Hongren Zheng <i@zenithal.me>
L: linux-usb@vger.kernel.org
S: Maintained
F: Documentation/usb/usbip_protocol.rst
VERSION = 6
PATCHLEVEL = 3
SUBLEVEL = 0
-EXTRAVERSION = -rc2
+EXTRAVERSION = -rc4
NAME = Hurr durr I'ma ninja sloth
# *DOCUMENTATION*
cscope gtags TAGS tags help% %docs check% coccicheck \
$(version_h) headers headers_% archheaders archscripts \
%asm-generic kernelversion %src-pkg dt_binding_check \
- outputmakefile rustavailable rustfmt rustfmtcheck \
- scripts_package
+ outputmakefile rustavailable rustfmt rustfmtcheck
# Installation targets should not require compiler. Unfortunately, vdso_install
# is an exception where build artifacts may be updated. This must be fixed.
no-compiler-targets := $(no-dot-config-targets) install dtbs_install \
certs/signing_key.pem \
certs/x509.genkey \
vmlinux-gdb.py \
- *.spec \
+ *.spec rpmbuild \
rust/libmacros.so
# clean - Delete most, but leave enough to build external modules
%pkg: include/config/kernel.release FORCE
$(Q)$(MAKE) -f $(srctree)/scripts/Makefile.package $@
-PHONY += scripts_package
-scripts_package: scripts_basic
- $(Q)$(MAKE) $(build)=scripts scripts/list-gitignored
-
# Brief documentation of the typical targets used
# ---------------------------------------------------------------------------
else # KBUILD_EXTMOD
+filechk_kernel.release = echo $(KERNELRELEASE)
+
###
# External module support.
# When building external modules the kernel used as basis is considered
&usbotg1 {
pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usbotg1>;
disable-over-current;
srp-disable;
hnp-disable;
&usbotg1 {
pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usbotg1>;
disable-over-current;
srp-disable;
hnp-disable;
&usbotg1 {
pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usbotg1>;
disable-over-current;
srp-disable;
hnp-disable;
};
reserved-memory {
+ sbl_region: sbl@2f00000 {
+ reg = <0x02f00000 0x100000>;
+ no-map;
+ };
+
+ external_image_region: external-image@3100000 {
+ reg = <0x03100000 0x200000>;
+ no-map;
+ };
+
adsp_region: adsp@3300000 {
reg = <0x03300000 0x1400000>;
no-map;
tocopy = n;
ua_flags = uaccess_save_and_enable();
- memcpy((void *)to, from, tocopy);
+ __memcpy((void *)to, from, tocopy);
uaccess_restore(ua_flags);
to += tocopy;
from += tocopy;
tocopy = n;
ua_flags = uaccess_save_and_enable();
- memset((void *)addr, 0, tocopy);
+ __memset((void *)addr, 0, tocopy);
uaccess_restore(ua_flags);
addr += tocopy;
n -= tocopy;
};
&enetc_port2 {
- nvmem-cells = <&base_mac_address 2>;
- nvmem-cell-names = "mac-address";
status = "okay";
};
&enetc_port3 {
- nvmem-cells = <&base_mac_address 3>;
- nvmem-cell-names = "mac-address";
status = "okay";
};
managed = "in-band-status";
phy-handle = <&qsgmii_phy0>;
phy-mode = "qsgmii";
- nvmem-cells = <&base_mac_address 4>;
- nvmem-cell-names = "mac-address";
status = "okay";
};
managed = "in-band-status";
phy-handle = <&qsgmii_phy1>;
phy-mode = "qsgmii";
- nvmem-cells = <&base_mac_address 5>;
- nvmem-cell-names = "mac-address";
status = "okay";
};
managed = "in-band-status";
phy-handle = <&qsgmii_phy2>;
phy-mode = "qsgmii";
- nvmem-cells = <&base_mac_address 6>;
- nvmem-cell-names = "mac-address";
status = "okay";
};
managed = "in-band-status";
phy-handle = <&qsgmii_phy3>;
phy-mode = "qsgmii";
- nvmem-cells = <&base_mac_address 7>;
- nvmem-cell-names = "mac-address";
status = "okay";
};
&enetc_port1 {
phy-handle = <&phy0>;
phy-mode = "rgmii-id";
- nvmem-cells = <&base_mac_address 0>;
- nvmem-cell-names = "mac-address";
status = "okay";
};
};
&enetc_port2 {
- nvmem-cells = <&base_mac_address 2>;
- nvmem-cell-names = "mac-address";
status = "okay";
};
&enetc_port3 {
- nvmem-cells = <&base_mac_address 3>;
- nvmem-cell-names = "mac-address";
status = "okay";
};
managed = "in-band-status";
phy-handle = <&phy0>;
phy-mode = "sgmii";
- nvmem-cells = <&base_mac_address 0>;
- nvmem-cell-names = "mac-address";
status = "okay";
};
managed = "in-band-status";
phy-handle = <&phy1>;
phy-mode = "sgmii";
- nvmem-cells = <&base_mac_address 1>;
- nvmem-cell-names = "mac-address";
status = "okay";
};
&enetc_port1 {
phy-handle = <&phy1>;
phy-mode = "rgmii-id";
- nvmem-cells = <&base_mac_address 1>;
- nvmem-cell-names = "mac-address";
status = "okay";
};
phy-handle = <&phy0>;
phy-mode = "sgmii";
managed = "in-band-status";
- nvmem-cells = <&base_mac_address 0>;
- nvmem-cell-names = "mac-address";
status = "okay";
};
label = "bootloader environment";
};
};
-
- otp-1 {
- compatible = "user-otp";
-
- nvmem-layout {
- compatible = "kontron,sl28-vpd";
-
- serial_number: serial-number {
- };
-
- base_mac_address: base-mac-address {
- #nvmem-cell-cells = <1>;
- };
- };
- };
};
};
interrupts = <GIC_SPI 92 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk IMX_SC_R_FSPI_0 IMX_SC_PM_CLK_PER>,
<&clk IMX_SC_R_FSPI_0 IMX_SC_PM_CLK_PER>;
- clock-names = "fspi", "fspi_en";
+ clock-names = "fspi_en", "fspi";
power-domains = <&pd IMX_SC_R_FSPI_0>;
status = "disabled";
};
phy-handle = <ðphy0>;
nvmem-cells = <&fec_mac1>;
nvmem-cell-names = "mac-address";
- snps,reset-gpios = <&pca6416_1 2 GPIO_ACTIVE_LOW>;
- snps,reset-delays-us = <10 20 200000>;
status = "okay";
mdio {
eee-broken-1000t;
qca,disable-smarteee;
qca,disable-hibernation-mode;
+ reset-gpios = <&pca6416_1 2 GPIO_ACTIVE_LOW>;
+ reset-assert-us = <20>;
+ reset-deassert-us = <200000>;
vddio-supply = <&vddio0>;
vddio0: vddio-regulator {
compatible = "wlf,wm8960";
reg = <0x1a>;
clocks = <&clk IMX8MM_CLK_SAI1_ROOT>;
- clock-names = "mclk1";
+ clock-names = "mclk";
wlf,shared-lrclk;
#sound-dai-cells = <0>;
};
sai2: sai@30020000 {
compatible = "fsl,imx8mn-sai", "fsl,imx8mq-sai";
reg = <0x30020000 0x10000>;
+ #sound-dai-cells = <0>;
interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk IMX8MN_CLK_SAI2_IPG>,
<&clk IMX8MN_CLK_DUMMY>,
sai3: sai@30030000 {
compatible = "fsl,imx8mn-sai", "fsl,imx8mq-sai";
reg = <0x30030000 0x10000>;
+ #sound-dai-cells = <0>;
interrupts = <GIC_SPI 50 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk IMX8MN_CLK_SAI3_IPG>,
<&clk IMX8MN_CLK_DUMMY>,
sai5: sai@30050000 {
compatible = "fsl,imx8mn-sai", "fsl,imx8mq-sai";
reg = <0x30050000 0x10000>;
+ #sound-dai-cells = <0>;
interrupts = <GIC_SPI 90 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk IMX8MN_CLK_SAI5_IPG>,
<&clk IMX8MN_CLK_DUMMY>,
sai6: sai@30060000 {
compatible = "fsl,imx8mn-sai", "fsl,imx8mq-sai";
reg = <0x30060000 0x10000>;
+ #sound-dai-cells = <0>;
interrupts = <GIC_SPI 90 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk IMX8MN_CLK_SAI6_IPG>,
<&clk IMX8MN_CLK_DUMMY>,
sai7: sai@300b0000 {
compatible = "fsl,imx8mn-sai", "fsl,imx8mq-sai";
reg = <0x300b0000 0x10000>;
+ #sound-dai-cells = <0>;
interrupts = <GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk IMX8MN_CLK_SAI7_IPG>,
<&clk IMX8MN_CLK_DUMMY>,
reg = <0x32e90000 0x238>;
interrupts = <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk IMX8MP_CLK_MEDIA_DISP2_PIX_ROOT>,
- <&clk IMX8MP_CLK_MEDIA_AXI_ROOT>,
- <&clk IMX8MP_CLK_MEDIA_APB_ROOT>;
+ <&clk IMX8MP_CLK_MEDIA_APB_ROOT>,
+ <&clk IMX8MP_CLK_MEDIA_AXI_ROOT>;
clock-names = "pix", "axi", "disp_axi";
assigned-clocks = <&clk IMX8MP_CLK_MEDIA_DISP2_PIX>,
<&clk IMX8MP_VIDEO_PLL1>;
lpi2c1: i2c@44340000 {
compatible = "fsl,imx93-lpi2c", "fsl,imx7ulp-lpi2c";
reg = <0x44340000 0x10000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk IMX93_CLK_LPI2C1_GATE>,
<&clk IMX93_CLK_BUS_AON>;
lpi2c2: i2c@44350000 {
compatible = "fsl,imx93-lpi2c", "fsl,imx7ulp-lpi2c";
reg = <0x44350000 0x10000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk IMX93_CLK_LPI2C2_GATE>,
<&clk IMX93_CLK_BUS_AON>;
lpi2c3: i2c@42530000 {
compatible = "fsl,imx93-lpi2c", "fsl,imx7ulp-lpi2c";
reg = <0x42530000 0x10000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
interrupts = <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk IMX93_CLK_LPI2C3_GATE>,
<&clk IMX93_CLK_BUS_WAKEUP>;
lpi2c4: i2c@42540000 {
compatible = "fsl,imx93-lpi2c", "fsl,imx7ulp-lpi2c";
reg = <0x42540000 0x10000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
interrupts = <GIC_SPI 63 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk IMX93_CLK_LPI2C4_GATE>,
<&clk IMX93_CLK_BUS_WAKEUP>;
lpi2c5: i2c@426b0000 {
compatible = "fsl,imx93-lpi2c", "fsl,imx7ulp-lpi2c";
reg = <0x426b0000 0x10000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
interrupts = <GIC_SPI 195 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk IMX93_CLK_LPI2C5_GATE>,
<&clk IMX93_CLK_BUS_WAKEUP>;
lpi2c6: i2c@426c0000 {
compatible = "fsl,imx93-lpi2c", "fsl,imx7ulp-lpi2c";
reg = <0x426c0000 0x10000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
interrupts = <GIC_SPI 196 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk IMX93_CLK_LPI2C6_GATE>,
<&clk IMX93_CLK_BUS_WAKEUP>;
lpi2c7: i2c@426d0000 {
compatible = "fsl,imx93-lpi2c", "fsl,imx7ulp-lpi2c";
reg = <0x426d0000 0x10000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
interrupts = <GIC_SPI 197 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk IMX93_CLK_LPI2C7_GATE>,
<&clk IMX93_CLK_BUS_WAKEUP>;
lpi2c8: i2c@426e0000 {
compatible = "fsl,imx93-lpi2c", "fsl,imx7ulp-lpi2c";
reg = <0x426e0000 0x10000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
interrupts = <GIC_SPI 198 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk IMX93_CLK_LPI2C8_GATE>,
<&clk IMX93_CLK_BUS_WAKEUP>;
eqos: ethernet@428a0000 {
compatible = "nxp,imx93-dwmac-eqos", "snps,dwmac-5.10a";
reg = <0x428a0000 0x10000>;
- interrupts = <GIC_SPI 183 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "eth_wake_irq", "macirq";
+ interrupts = <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 183 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "macirq", "eth_wake_irq";
clocks = <&clk IMX93_CLK_ENET_QOS_GATE>,
<&clk IMX93_CLK_ENET_QOS_GATE>,
<&clk IMX93_CLK_ENET_TIMER2>,
<&clk IMX93_CLK_SYS_PLL_PFD0_DIV2>;
assigned-clock-rates = <100000000>, <250000000>;
intf_mode = <&wakeupmix_gpr 0x28>;
- clk_csr = <0>;
+ snps,clk-csr = <0>;
status = "disabled";
};
#address-cells = <2>;
#size-cells = <2>;
- ranges = <0x0 0x0 0x0 0x0 0x0 0x40000000>;
+ ranges = <0x0 0x0 0x0 0x0 0x100 0x0>;
apbmisc: misc@100000 {
compatible = "nvidia,tegra194-misc";
#address-cells = <2>;
#size-cells = <2>;
- ranges = <0x0 0x0 0x0 0x0 0x0 0x40000000>;
+ ranges = <0x0 0x0 0x0 0x0 0x100 0x0>;
misc@100000 {
compatible = "nvidia,tegra234-misc";
&gpio_leds_default {
pins = "gpio81", "gpio82", "gpio83";
};
-
-&sim_ctrl_default {
- pins = "gpio1", "gpio2";
-};
gpios = <&msmgpio 20 GPIO_ACTIVE_HIGH>;
};
+&mpss {
+ pinctrl-0 = <&sim_ctrl_default>;
+ pinctrl-names = "default";
+};
+
&button_default {
pins = "gpio37";
bias-pull-down;
pins = "gpio20", "gpio21", "gpio22";
};
-&sim_ctrl_default {
- pins = "gpio1", "gpio2";
+/* This selects the external SIM card slot by default */
+&msmgpio {
+ sim_ctrl_default: sim-ctrl-default-state {
+ esim-sel-pins {
+ pins = "gpio0", "gpio3";
+ bias-disable;
+ output-low;
+ };
+
+ sim-en-pins {
+ pins = "gpio1";
+ bias-disable;
+ output-low;
+ };
+
+ sim-sel-pins {
+ pins = "gpio2";
+ bias-disable;
+ output-high;
+ };
+ };
};
};
&mpss {
- pinctrl-0 = <&sim_ctrl_default>;
- pinctrl-names = "default";
-
status = "okay";
};
drive-strength = <2>;
bias-disable;
};
-
- sim_ctrl_default: sim-ctrl-default-state {
- function = "gpio";
- drive-strength = <2>;
- bias-disable;
- output-low;
- };
};
};
&remoteproc_nsp0 {
- firmware-name = "qcom/sa8540p/cdsp.mbn";
+ firmware-name = "qcom/sa8540p/cdsp0.mbn";
status = "okay";
};
pinctrl-names = "default";
pinctrl-0 = <&pcie1_clkreq_n>;
+ dma-coherent;
+
iommus = <&apps_smmu 0x1c80 0x1>;
iommu-map = <0x0 &apps_smmu 0x1c80 0x1>,
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-always-on;
};
vreg_s11b: smps11 {
regulator-min-microvolt = <1272000>;
regulator-max-microvolt = <1272000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-always-on;
};
vreg_s12b: smps12 {
regulator-min-microvolt = <984000>;
regulator-max-microvolt = <984000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-always-on;
};
vreg_l3b: ldo3 {
regulator-min-microvolt = <3008000>;
regulator-max-microvolt = <3960000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_AUTO>;
+ regulator-always-on;
};
};
pmic-die-temp@3 {
reg = <PMK8350_ADC7_DIE_TEMP>;
qcom,pre-scaling = <1 1>;
+ label = "pmk8350_die_temp";
};
xo-therm@44 {
reg = <PMK8350_ADC7_AMUX_THM1_100K_PU>;
qcom,hw-settle-time = <200>;
qcom,ratiometric;
+ label = "pmk8350_xo_therm";
};
pmic-die-temp@103 {
reg = <PM8350_ADC7_DIE_TEMP(1)>;
qcom,pre-scaling = <1 1>;
+ label = "pmc8280_1_die_temp";
};
sys-therm@144 {
reg = <PM8350_ADC7_AMUX_THM1_100K_PU(1)>;
qcom,hw-settle-time = <200>;
qcom,ratiometric;
+ label = "sys_therm1";
};
sys-therm@145 {
reg = <PM8350_ADC7_AMUX_THM2_100K_PU(1)>;
qcom,hw-settle-time = <200>;
qcom,ratiometric;
+ label = "sys_therm2";
};
sys-therm@146 {
reg = <PM8350_ADC7_AMUX_THM3_100K_PU(1)>;
qcom,hw-settle-time = <200>;
qcom,ratiometric;
+ label = "sys_therm3";
};
sys-therm@147 {
reg = <PM8350_ADC7_AMUX_THM4_100K_PU(1)>;
qcom,hw-settle-time = <200>;
qcom,ratiometric;
+ label = "sys_therm4";
};
pmic-die-temp@303 {
reg = <PM8350_ADC7_DIE_TEMP(3)>;
qcom,pre-scaling = <1 1>;
+ label = "pmc8280_2_die_temp";
};
sys-therm@344 {
reg = <PM8350_ADC7_AMUX_THM1_100K_PU(3)>;
qcom,hw-settle-time = <200>;
qcom,ratiometric;
+ label = "sys_therm5";
};
sys-therm@345 {
reg = <PM8350_ADC7_AMUX_THM2_100K_PU(3)>;
qcom,hw-settle-time = <200>;
qcom,ratiometric;
+ label = "sys_therm6";
};
sys-therm@346 {
reg = <PM8350_ADC7_AMUX_THM3_100K_PU(3)>;
qcom,hw-settle-time = <200>;
qcom,ratiometric;
+ label = "sys_therm7";
};
sys-therm@347 {
reg = <PM8350_ADC7_AMUX_THM4_100K_PU(3)>;
qcom,hw-settle-time = <200>;
qcom,ratiometric;
+ label = "sys_therm8";
};
pmic-die-temp@403 {
reg = <PMR735A_ADC7_DIE_TEMP>;
qcom,pre-scaling = <1 1>;
+ label = "pmr735a_die_temp";
};
};
"VA DMIC0", "MIC BIAS1",
"VA DMIC1", "MIC BIAS1",
"VA DMIC2", "MIC BIAS3",
- "TX DMIC0", "MIC BIAS1",
- "TX DMIC1", "MIC BIAS2",
- "TX DMIC2", "MIC BIAS3",
+ "VA DMIC0", "VA MIC BIAS1",
+ "VA DMIC1", "VA MIC BIAS1",
+ "VA DMIC2", "VA MIC BIAS3",
"TX SWR_ADC1", "ADC2_OUTPUT";
wcd-playback-dai-link {
va-dai-link {
link-name = "VA Capture";
cpu {
- sound-dai = <&q6apmbedai TX_CODEC_DMA_TX_3>;
+ sound-dai = <&q6apmbedai VA_CODEC_DMA_TX_0>;
};
platform {
vdd-micb-supply = <&vreg_s10b>;
- qcom,dmic-sample-rate = <600000>;
+ qcom,dmic-sample-rate = <4800000>;
status = "okay";
};
qcom,ports-sinterval-low = /bits/ 8 <0x03 0x1f 0x1f 0x07 0x00>;
qcom,ports-offset1 = /bits/ 8 <0x00 0x00 0x0B 0x01 0x00>;
qcom,ports-offset2 = /bits/ 8 <0x00 0x00 0x0B 0x00 0x00>;
- qcom,ports-hstart = /bits/ 8 <0xff 0x03 0xff 0xff 0xff>;
- qcom,ports-hstop = /bits/ 8 <0xff 0x06 0xff 0xff 0xff>;
+ qcom,ports-hstart = /bits/ 8 <0xff 0x03 0x00 0xff 0xff>;
+ qcom,ports-hstop = /bits/ 8 <0xff 0x06 0x0f 0xff 0xff>;
qcom,ports-word-length = /bits/ 8 <0x01 0x07 0x04 0xff 0xff>;
- qcom,ports-block-pack-mode = /bits/ 8 <0xff 0x00 0x01 0xff 0xff>;
+ qcom,ports-block-pack-mode = /bits/ 8 <0xff 0xff 0x01 0xff 0xff>;
qcom,ports-lane-control = /bits/ 8 <0x01 0x00 0x00 0x00 0x00>;
- qcom,ports-block-group-count = /bits/ 8 <0xff 0xff 0xff 0xff 0x00>;
+ qcom,ports-block-group-count = /bits/ 8 <0xff 0xff 0xff 0xff 0xff>;
#sound-dai-cells = <1>;
#address-cells = <2>;
<&intc GIC_SPI 520 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "core", "wake";
- clocks = <&vamacro>;
+ clocks = <&txmacro>;
clock-names = "iface";
label = "TX";
#sound-dai-cells = <1>;
qcom,din-ports = <4>;
qcom,dout-ports = <0>;
- qcom,ports-sinterval-low = /bits/ 8 <0x01 0x03 0x03 0x03>;
- qcom,ports-offset1 = /bits/ 8 <0x01 0x00 0x02 0x01>;
+ qcom,ports-sinterval-low = /bits/ 8 <0x01 0x01 0x03 0x03>;
+ qcom,ports-offset1 = /bits/ 8 <0x01 0x00 0x02 0x00>;
qcom,ports-offset2 = /bits/ 8 <0x00 0x00 0x00 0x00>;
qcom,ports-block-pack-mode = /bits/ 8 <0xff 0xff 0xff 0xff>;
qcom,ports-hstart = /bits/ 8 <0xff 0xff 0xff 0xff>;
qcom,ports-hstop = /bits/ 8 <0xff 0xff 0xff 0xff>;
- qcom,ports-word-length = /bits/ 8 <0xff 0x00 0xff 0xff>;
+ qcom,ports-word-length = /bits/ 8 <0xff 0xff 0xff 0xff>;
qcom,ports-block-group-count = /bits/ 8 <0xff 0xff 0xff 0xff>;
- qcom,ports-lane-control = /bits/ 8 <0x00 0x01 0x00 0x00>;
+ qcom,ports-lane-control = /bits/ 8 <0x00 0x01 0x00 0x01>;
status = "disabled";
};
dma-names = "tx", "rx";
#address-cells = <1>;
#size-cells = <0>;
+ status = "disabled";
};
};
clock-names = "xo";
power-domains = <&rpmpd SM6375_VDDCX>;
+ power-domain-names = "cx";
memory-region = <&pil_cdsp_mem>;
"slave_q2a",
"tbu";
- iommus = <&apps_smmu 0x1d80 0x7f>;
+ iommus = <&apps_smmu 0x1d80 0x3f>;
iommu-map = <0x0 &apps_smmu 0x1d80 0x1>,
<0x100 &apps_smmu 0x1d81 0x1>;
assigned-clocks = <&gcc GCC_PCIE_1_AUX_CLK>;
assigned-clock-rates = <19200000>;
- iommus = <&apps_smmu 0x1e00 0x7f>;
+ iommus = <&apps_smmu 0x1e00 0x3f>;
iommu-map = <0x0 &apps_smmu 0x1e00 0x1>,
<0x100 &apps_smmu 0x1e01 0x1>;
};
&venus {
- firmware-name = "qcom/sm8250/elish/venus.mbn";
+ firmware-name = "qcom/sm8250/xiaomi/elish/venus.mbn";
status = "okay";
};
power-domains = <&gcc UFS_PHY_GDSC>;
iommus = <&apps_smmu 0xe0 0x0>;
+ dma-coherent;
clock-names =
"core_clk",
<&q6prmcc LPASS_HW_DCODEC_VOTE LPASS_CLK_ATTRIBUTE_COUPLE_NO>,
<&vamacro>;
clock-names = "mclk", "npl", "macro", "dcodec", "fsgen";
- assigned-clocks = <&q6prmcc LPASS_CLK_ID_WSA_CORE_TX_MCLK LPASS_CLK_ATTRIBUTE_COUPLE_NO>,
- <&q6prmcc LPASS_CLK_ID_WSA_CORE_TX_2X_MCLK LPASS_CLK_ATTRIBUTE_COUPLE_NO>;
+ assigned-clocks = <&q6prmcc LPASS_CLK_ID_WSA2_CORE_TX_MCLK LPASS_CLK_ATTRIBUTE_COUPLE_NO>,
+ <&q6prmcc LPASS_CLK_ID_WSA2_CORE_TX_2X_MCLK LPASS_CLK_ATTRIBUTE_COUPLE_NO>;
assigned-clock-rates = <19200000>, <19200000>;
#clock-cells = <0>;
power-domains = <&gcc UFS_PHY_GDSC>;
iommus = <&apps_smmu 0xe0 0x0>;
+ dma-coherent;
interconnects = <&aggre1_noc MASTER_UFS_MEM 0 &mc_virt SLAVE_EBI1 0>,
<&gem_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_UFS_MEM_CFG 0>;
CPU0: cpu@0 {
device_type = "cpu";
- compatible = "qcom,kryo";
+ compatible = "arm,cortex-a510";
reg = <0 0>;
enable-method = "psci";
next-level-cache = <&L2_0>;
CPU1: cpu@100 {
device_type = "cpu";
- compatible = "qcom,kryo";
+ compatible = "arm,cortex-a510";
reg = <0 0x100>;
enable-method = "psci";
next-level-cache = <&L2_100>;
CPU2: cpu@200 {
device_type = "cpu";
- compatible = "qcom,kryo";
+ compatible = "arm,cortex-a510";
reg = <0 0x200>;
enable-method = "psci";
next-level-cache = <&L2_200>;
CPU3: cpu@300 {
device_type = "cpu";
- compatible = "qcom,kryo";
+ compatible = "arm,cortex-a715";
reg = <0 0x300>;
enable-method = "psci";
next-level-cache = <&L2_300>;
CPU4: cpu@400 {
device_type = "cpu";
- compatible = "qcom,kryo";
+ compatible = "arm,cortex-a715";
reg = <0 0x400>;
enable-method = "psci";
next-level-cache = <&L2_400>;
CPU5: cpu@500 {
device_type = "cpu";
- compatible = "qcom,kryo";
+ compatible = "arm,cortex-a710";
reg = <0 0x500>;
enable-method = "psci";
next-level-cache = <&L2_500>;
CPU6: cpu@600 {
device_type = "cpu";
- compatible = "qcom,kryo";
+ compatible = "arm,cortex-a710";
reg = <0 0x600>;
enable-method = "psci";
next-level-cache = <&L2_600>;
CPU7: cpu@700 {
device_type = "cpu";
- compatible = "qcom,kryo";
+ compatible = "arm,cortex-x3";
reg = <0 0x700>;
enable-method = "psci";
next-level-cache = <&L2_700>;
required-opps = <&rpmhpd_opp_nom>;
iommus = <&apps_smmu 0x60 0x0>;
+ dma-coherent;
interconnects = <&aggre1_noc MASTER_UFS_MEM 0 &mc_virt SLAVE_EBI1 0>,
<&gem_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_UFS_MEM_CFG 0>;
lpass_tlmm: pinctrl@6e80000 {
compatible = "qcom,sm8550-lpass-lpi-pinctrl";
reg = <0 0x06e80000 0 0x20000>,
- <0 0x0725a000 0 0x10000>;
+ <0 0x07250000 0 0x10000>;
gpio-controller;
#gpio-cells = <2>;
gpio-ranges = <&lpass_tlmm 0 0 23>;
pins = "gpio28", "gpio29";
function = "qup1_se0";
drive-strength = <2>;
- bias-pull-up;
+ bias-pull-up = <2200>;
};
qup_i2c1_data_clk: qup-i2c1-data-clk-state {
pins = "gpio32", "gpio33";
function = "qup1_se1";
drive-strength = <2>;
- bias-pull-up;
+ bias-pull-up = <2200>;
};
qup_i2c2_data_clk: qup-i2c2-data-clk-state {
pins = "gpio36", "gpio37";
function = "qup1_se2";
drive-strength = <2>;
- bias-pull-up;
+ bias-pull-up = <2200>;
};
qup_i2c3_data_clk: qup-i2c3-data-clk-state {
pins = "gpio40", "gpio41";
function = "qup1_se3";
drive-strength = <2>;
- bias-pull-up;
+ bias-pull-up = <2200>;
};
qup_i2c4_data_clk: qup-i2c4-data-clk-state {
pins = "gpio44", "gpio45";
function = "qup1_se4";
drive-strength = <2>;
- bias-pull-up;
+ bias-pull-up = <2200>;
};
qup_i2c5_data_clk: qup-i2c5-data-clk-state {
pins = "gpio52", "gpio53";
function = "qup1_se5";
drive-strength = <2>;
- bias-pull-up;
+ bias-pull-up = <2200>;
};
qup_i2c6_data_clk: qup-i2c6-data-clk-state {
pins = "gpio48", "gpio49";
function = "qup1_se6";
drive-strength = <2>;
- bias-pull-up;
+ bias-pull-up = <2200>;
};
qup_i2c8_data_clk: qup-i2c8-data-clk-state {
pins = "gpio57";
function = "qup2_se0_l1_mira";
drive-strength = <2>;
- bias-pull-up;
+ bias-pull-up = <2200>;
};
sda-pins {
pins = "gpio56";
function = "qup2_se0_l0_mira";
drive-strength = <2>;
- bias-pull-up;
+ bias-pull-up = <2200>;
};
};
pins = "gpio60", "gpio61";
function = "qup2_se1";
drive-strength = <2>;
- bias-pull-up;
+ bias-pull-up = <2200>;
};
qup_i2c10_data_clk: qup-i2c10-data-clk-state {
pins = "gpio64", "gpio65";
function = "qup2_se2";
drive-strength = <2>;
- bias-pull-up;
+ bias-pull-up = <2200>;
};
qup_i2c11_data_clk: qup-i2c11-data-clk-state {
pins = "gpio68", "gpio69";
function = "qup2_se3";
drive-strength = <2>;
- bias-pull-up;
+ bias-pull-up = <2200>;
};
qup_i2c12_data_clk: qup-i2c12-data-clk-state {
pins = "gpio2", "gpio3";
function = "qup2_se4";
drive-strength = <2>;
- bias-pull-up;
+ bias-pull-up = <2200>;
};
qup_i2c13_data_clk: qup-i2c13-data-clk-state {
pins = "gpio80", "gpio81";
function = "qup2_se5";
drive-strength = <2>;
- bias-pull-up;
+ bias-pull-up = <2200>;
};
qup_i2c15_data_clk: qup-i2c15-data-clk-state {
pins = "gpio72", "gpio106";
function = "qup2_se7";
drive-strength = <2>;
- bias-pull-up;
+ bias-pull-up = <2200>;
};
qup_spi0_cs: qup-spi0-cs-state {
#include <linux/types.h>
#include <linux/jump_label.h>
#include <linux/kvm_types.h>
+#include <linux/maple_tree.h>
#include <linux/percpu.h>
#include <linux/psci.h>
#include <asm/arch_gicv3.h>
/* Mandated version of PSCI */
u32 psci_version;
+ /* Protects VM-scoped configuration data */
+ struct mutex config_lock;
+
/*
* If we encounter a data abort without valid instruction syndrome
* information, report this to user space. User space can (and
#define KVM_ARCH_FLAG_EL1_32BIT 4
/* PSCI SYSTEM_SUSPEND enabled for the guest */
#define KVM_ARCH_FLAG_SYSTEM_SUSPEND_ENABLED 5
-
+ /* VM counter offset */
+#define KVM_ARCH_FLAG_VM_COUNTER_OFFSET 6
+ /* Timer PPIs made immutable */
+#define KVM_ARCH_FLAG_TIMER_PPIS_IMMUTABLE 7
+ /* SMCCC filter initialized for the VM */
+#define KVM_ARCH_FLAG_SMCCC_FILTER_CONFIGURED 8
unsigned long flags;
/*
/* Hypercall features firmware registers' descriptor */
struct kvm_smccc_features smccc_feat;
+ struct maple_tree smccc_filter;
/*
* For an untrusted host VM, 'pkvm.handle' is used to lookup
TPIDR_EL2, /* EL2 Software Thread ID Register */
CNTHCTL_EL2, /* Counter-timer Hypervisor Control register */
SP_EL2, /* EL2 Stack Pointer */
+ CNTHP_CTL_EL2,
+ CNTHP_CVAL_EL2,
+ CNTHV_CTL_EL2,
+ CNTHV_CVAL_EL2,
NR_SYS_REGS /* Nothing after this line! */
};
/* vcpu power state */
struct kvm_mp_state mp_state;
+ spinlock_t mp_state_lock;
/* Cache some mmu pages needed inside spinlock regions */
struct kvm_mmu_memory_cache mmu_page_cache;
int __init kvm_sys_reg_table_init(void);
+bool lock_all_vcpus(struct kvm *kvm);
+void unlock_all_vcpus(struct kvm *kvm);
+
/* MMIO helpers */
void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data);
unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len);
int kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm,
struct kvm_arm_copy_mte_tags *copy_tags);
+int kvm_vm_ioctl_set_counter_offset(struct kvm *kvm,
+ struct kvm_arm_counter_offset *offset);
/* Guest/host FPSIMD coordination helpers */
int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu);
(system_supports_32bit_el0() && \
!static_branch_unlikely(&arm64_mismatched_32bit_el0))
+#define kvm_vm_has_ran_once(kvm) \
+ (test_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &(kvm)->arch.flags))
+
int kvm_trng_call(struct kvm_vcpu *vcpu);
#ifdef CONFIG_KVM
extern phys_addr_t hyp_mem_base;
* specific registers encoded in the instructions).
*/
.macro kern_hyp_va reg
+#ifndef __KVM_VHE_HYPERVISOR__
alternative_cb ARM64_ALWAYS_SYSTEM, kvm_update_va_mask
and \reg, \reg, #1 /* mask with va_mask */
ror \reg, \reg, #1 /* rotate to the first tag bit */
add \reg, \reg, #0, lsl 12 /* insert the top 12 bits of the tag */
ror \reg, \reg, #63 /* rotate back */
alternative_cb_end
+#endif
.endm
/*
static __always_inline unsigned long __kern_hyp_va(unsigned long v)
{
+#ifndef __KVM_VHE_HYPERVISOR__
asm volatile(ALTERNATIVE_CB("and %0, %0, #1\n"
"ror %0, %0, #1\n"
"add %0, %0, #0\n"
ARM64_ALWAYS_SYSTEM,
kvm_update_va_mask)
: "+r" (v));
+#endif
return v;
}
#define SYS_CNTFRQ_EL0 sys_reg(3, 3, 14, 0, 0)
+#define SYS_CNTPCT_EL0 sys_reg(3, 3, 14, 0, 1)
#define SYS_CNTPCTSS_EL0 sys_reg(3, 3, 14, 0, 5)
#define SYS_CNTVCTSS_EL0 sys_reg(3, 3, 14, 0, 6)
#define SYS_AARCH32_CNTP_TVAL sys_reg(0, 0, 14, 2, 0)
#define SYS_AARCH32_CNTP_CTL sys_reg(0, 0, 14, 2, 1)
+#define SYS_AARCH32_CNTPCT sys_reg(0, 0, 0, 14, 0)
#define SYS_AARCH32_CNTP_CVAL sys_reg(0, 2, 0, 14, 0)
+#define SYS_AARCH32_CNTPCTSS sys_reg(0, 8, 0, 14, 0)
#define __PMEV_op2(n) ((n) & 0x7)
#define __CNTR_CRm(n) (0x8 | (((n) >> 3) & 0x3))
__u64 reserved[2];
};
+/*
+ * Counter/Timer offset structure. Describe the virtual/physical offset.
+ * To be used with KVM_ARM_SET_COUNTER_OFFSET.
+ */
+struct kvm_arm_counter_offset {
+ __u64 counter_offset;
+ __u64 reserved;
+};
+
#define KVM_ARM_TAGS_TO_GUEST 0
#define KVM_ARM_TAGS_FROM_GUEST 1
#endif
};
+/* Device Control API on vm fd */
+#define KVM_ARM_VM_SMCCC_CTRL 0
+#define KVM_ARM_VM_SMCCC_FILTER 0
+
/* Device Control API: ARM VGIC */
#define KVM_DEV_ARM_VGIC_GRP_ADDR 0
#define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1
#define KVM_ARM_VCPU_TIMER_CTRL 1
#define KVM_ARM_VCPU_TIMER_IRQ_VTIMER 0
#define KVM_ARM_VCPU_TIMER_IRQ_PTIMER 1
+#define KVM_ARM_VCPU_TIMER_IRQ_HVTIMER 2
+#define KVM_ARM_VCPU_TIMER_IRQ_HPTIMER 3
#define KVM_ARM_VCPU_PVTIME_CTRL 2
#define KVM_ARM_VCPU_PVTIME_IPA 0
/* run->fail_entry.hardware_entry_failure_reason codes. */
#define KVM_EXIT_FAIL_ENTRY_CPU_UNSUPPORTED (1ULL << 0)
+enum kvm_smccc_filter_action {
+ KVM_SMCCC_FILTER_HANDLE = 0,
+ KVM_SMCCC_FILTER_DENY,
+ KVM_SMCCC_FILTER_FWD_TO_USER,
+
+#ifdef __KERNEL__
+ NR_SMCCC_FILTER_ACTIONS
+#endif
+};
+
+struct kvm_smccc_filter {
+ __u32 base;
+ __u32 nr_functions;
+ __u8 action;
+ __u8 pad[15];
+};
+
+/* arm64-specific KVM_EXIT_HYPERCALL flags */
+#define KVM_HYPERCALL_EXIT_SMC (1U << 0)
+#define KVM_HYPERCALL_EXIT_16BIT (1U << 1)
+
#endif
#endif /* __ARM_KVM_H__ */
.sign = FTR_UNSIGNED,
.min_field_value = 1,
},
+ {
+ .desc = "Enhanced Counter Virtualization (CNTPOFF)",
+ .capability = ARM64_HAS_ECV_CNTPOFF,
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .matches = has_cpuid_feature,
+ .sys_reg = SYS_ID_AA64MMFR0_EL1,
+ .field_pos = ID_AA64MMFR0_EL1_ECV_SHIFT,
+ .field_width = 4,
+ .sign = FTR_UNSIGNED,
+ .min_field_value = ID_AA64MMFR0_EL1_ECV_CNTPOFF,
+ },
#ifdef CONFIG_ARM64_PAN
{
.desc = "Privileged Access Never",
.long .Lefi_header_end - .L_head // SizeOfHeaders
.long 0 // CheckSum
.short IMAGE_SUBSYSTEM_EFI_APPLICATION // Subsystem
- .short 0 // DllCharacteristics
+ .short IMAGE_DLL_CHARACTERISTICS_NX_COMPAT // DllCharacteristics
.quad 0 // SizeOfStackReserve
.quad 0 // SizeOfStackCommit
.quad 0 // SizeOfHeapReserve
#include <asm/arch_timer.h>
#include <asm/kvm_emulate.h>
#include <asm/kvm_hyp.h>
+#include <asm/kvm_nested.h>
#include <kvm/arm_vgic.h>
#include <kvm/arm_arch_timer.h>
static DEFINE_STATIC_KEY_FALSE(has_gic_active_state);
-static const struct kvm_irq_level default_ptimer_irq = {
- .irq = 30,
- .level = 1,
-};
-
-static const struct kvm_irq_level default_vtimer_irq = {
- .irq = 27,
- .level = 1,
+static const u8 default_ppi[] = {
+ [TIMER_PTIMER] = 30,
+ [TIMER_VTIMER] = 27,
+ [TIMER_HPTIMER] = 26,
+ [TIMER_HVTIMER] = 28,
};
static bool kvm_timer_irq_can_fire(struct arch_timer_context *timer_ctx);
static u64 kvm_arm_timer_read(struct kvm_vcpu *vcpu,
struct arch_timer_context *timer,
enum kvm_arch_timer_regs treg);
+static bool kvm_arch_timer_get_input_level(int vintid);
+
+static struct irq_ops arch_timer_irq_ops = {
+ .get_input_level = kvm_arch_timer_get_input_level,
+};
+
+static bool has_cntpoff(void)
+{
+ return (has_vhe() && cpus_have_final_cap(ARM64_HAS_ECV_CNTPOFF));
+}
+
+static int nr_timers(struct kvm_vcpu *vcpu)
+{
+ if (!vcpu_has_nv(vcpu))
+ return NR_KVM_EL0_TIMERS;
+
+ return NR_KVM_TIMERS;
+}
u32 timer_get_ctl(struct arch_timer_context *ctxt)
{
return __vcpu_sys_reg(vcpu, CNTV_CTL_EL0);
case TIMER_PTIMER:
return __vcpu_sys_reg(vcpu, CNTP_CTL_EL0);
+ case TIMER_HVTIMER:
+ return __vcpu_sys_reg(vcpu, CNTHV_CTL_EL2);
+ case TIMER_HPTIMER:
+ return __vcpu_sys_reg(vcpu, CNTHP_CTL_EL2);
default:
WARN_ON(1);
return 0;
return __vcpu_sys_reg(vcpu, CNTV_CVAL_EL0);
case TIMER_PTIMER:
return __vcpu_sys_reg(vcpu, CNTP_CVAL_EL0);
+ case TIMER_HVTIMER:
+ return __vcpu_sys_reg(vcpu, CNTHV_CVAL_EL2);
+ case TIMER_HPTIMER:
+ return __vcpu_sys_reg(vcpu, CNTHP_CVAL_EL2);
default:
WARN_ON(1);
return 0;
static u64 timer_get_offset(struct arch_timer_context *ctxt)
{
+ u64 offset = 0;
+
+ if (!ctxt)
+ return 0;
+
if (ctxt->offset.vm_offset)
- return *ctxt->offset.vm_offset;
+ offset += *ctxt->offset.vm_offset;
+ if (ctxt->offset.vcpu_offset)
+ offset += *ctxt->offset.vcpu_offset;
- return 0;
+ return offset;
}
static void timer_set_ctl(struct arch_timer_context *ctxt, u32 ctl)
case TIMER_PTIMER:
__vcpu_sys_reg(vcpu, CNTP_CTL_EL0) = ctl;
break;
+ case TIMER_HVTIMER:
+ __vcpu_sys_reg(vcpu, CNTHV_CTL_EL2) = ctl;
+ break;
+ case TIMER_HPTIMER:
+ __vcpu_sys_reg(vcpu, CNTHP_CTL_EL2) = ctl;
+ break;
default:
WARN_ON(1);
}
case TIMER_PTIMER:
__vcpu_sys_reg(vcpu, CNTP_CVAL_EL0) = cval;
break;
+ case TIMER_HVTIMER:
+ __vcpu_sys_reg(vcpu, CNTHV_CVAL_EL2) = cval;
+ break;
+ case TIMER_HPTIMER:
+ __vcpu_sys_reg(vcpu, CNTHP_CVAL_EL2) = cval;
+ break;
default:
WARN_ON(1);
}
static void get_timer_map(struct kvm_vcpu *vcpu, struct timer_map *map)
{
- if (has_vhe()) {
+ if (vcpu_has_nv(vcpu)) {
+ if (is_hyp_ctxt(vcpu)) {
+ map->direct_vtimer = vcpu_hvtimer(vcpu);
+ map->direct_ptimer = vcpu_hptimer(vcpu);
+ map->emul_vtimer = vcpu_vtimer(vcpu);
+ map->emul_ptimer = vcpu_ptimer(vcpu);
+ } else {
+ map->direct_vtimer = vcpu_vtimer(vcpu);
+ map->direct_ptimer = vcpu_ptimer(vcpu);
+ map->emul_vtimer = vcpu_hvtimer(vcpu);
+ map->emul_ptimer = vcpu_hptimer(vcpu);
+ }
+ } else if (has_vhe()) {
map->direct_vtimer = vcpu_vtimer(vcpu);
map->direct_ptimer = vcpu_ptimer(vcpu);
+ map->emul_vtimer = NULL;
map->emul_ptimer = NULL;
} else {
map->direct_vtimer = vcpu_vtimer(vcpu);
map->direct_ptimer = NULL;
+ map->emul_vtimer = NULL;
map->emul_ptimer = vcpu_ptimer(vcpu);
}
ns = cyclecounter_cyc2ns(timecounter->cc,
val - now,
timecounter->mask,
- &timecounter->frac);
+ &timer_ctx->ns_frac);
return ns;
}
static u64 wfit_delay_ns(struct kvm_vcpu *vcpu)
{
- struct arch_timer_context *ctx = vcpu_vtimer(vcpu);
u64 val = vcpu_get_reg(vcpu, kvm_vcpu_sys_get_rt(vcpu));
+ struct arch_timer_context *ctx;
+
+ ctx = (vcpu_has_nv(vcpu) && is_hyp_ctxt(vcpu)) ? vcpu_hvtimer(vcpu)
+ : vcpu_vtimer(vcpu);
return kvm_counter_compute_delta(ctx, val);
}
u64 min_delta = ULLONG_MAX;
int i;
- for (i = 0; i < NR_KVM_TIMERS; i++) {
+ for (i = 0; i < nr_timers(vcpu); i++) {
struct arch_timer_context *ctx = &vcpu->arch.timer_cpu.timers[i];
WARN(ctx->loaded, "timer %d loaded\n", i);
switch (index) {
case TIMER_VTIMER:
+ case TIMER_HVTIMER:
cnt_ctl = read_sysreg_el0(SYS_CNTV_CTL);
break;
case TIMER_PTIMER:
+ case TIMER_HPTIMER:
cnt_ctl = read_sysreg_el0(SYS_CNTP_CTL);
break;
case NR_KVM_TIMERS:
int ret;
timer_ctx->irq.level = new_level;
- trace_kvm_timer_update_irq(vcpu->vcpu_id, timer_ctx->irq.irq,
+ trace_kvm_timer_update_irq(vcpu->vcpu_id, timer_irq(timer_ctx),
timer_ctx->irq.level);
if (!userspace_irqchip(vcpu->kvm)) {
ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
- timer_ctx->irq.irq,
+ timer_irq(timer_ctx),
timer_ctx->irq.level,
timer_ctx);
WARN_ON(ret);
kvm_call_hyp(__kvm_timer_set_cntvoff, cntvoff);
}
+static void set_cntpoff(u64 cntpoff)
+{
+ if (has_cntpoff())
+ write_sysreg_s(cntpoff, SYS_CNTPOFF_EL2);
+}
+
static void timer_save_state(struct arch_timer_context *ctx)
{
struct arch_timer_cpu *timer = vcpu_timer(ctx->vcpu);
goto out;
switch (index) {
+ u64 cval;
+
case TIMER_VTIMER:
+ case TIMER_HVTIMER:
timer_set_ctl(ctx, read_sysreg_el0(SYS_CNTV_CTL));
timer_set_cval(ctx, read_sysreg_el0(SYS_CNTV_CVAL));
set_cntvoff(0);
break;
case TIMER_PTIMER:
+ case TIMER_HPTIMER:
timer_set_ctl(ctx, read_sysreg_el0(SYS_CNTP_CTL));
- timer_set_cval(ctx, read_sysreg_el0(SYS_CNTP_CVAL));
+ cval = read_sysreg_el0(SYS_CNTP_CVAL);
+
+ if (!has_cntpoff())
+ cval -= timer_get_offset(ctx);
+
+ timer_set_cval(ctx, cval);
/* Disable the timer */
write_sysreg_el0(0, SYS_CNTP_CTL);
isb();
+ set_cntpoff(0);
break;
case NR_KVM_TIMERS:
BUG();
*/
if (!kvm_timer_irq_can_fire(map.direct_vtimer) &&
!kvm_timer_irq_can_fire(map.direct_ptimer) &&
+ !kvm_timer_irq_can_fire(map.emul_vtimer) &&
!kvm_timer_irq_can_fire(map.emul_ptimer) &&
!vcpu_has_wfit_active(vcpu))
return;
goto out;
switch (index) {
+ u64 cval, offset;
+
case TIMER_VTIMER:
+ case TIMER_HVTIMER:
set_cntvoff(timer_get_offset(ctx));
write_sysreg_el0(timer_get_cval(ctx), SYS_CNTV_CVAL);
isb();
write_sysreg_el0(timer_get_ctl(ctx), SYS_CNTV_CTL);
break;
case TIMER_PTIMER:
- write_sysreg_el0(timer_get_cval(ctx), SYS_CNTP_CVAL);
+ case TIMER_HPTIMER:
+ cval = timer_get_cval(ctx);
+ offset = timer_get_offset(ctx);
+ set_cntpoff(offset);
+ if (!has_cntpoff())
+ cval += offset;
+ write_sysreg_el0(cval, SYS_CNTP_CVAL);
isb();
write_sysreg_el0(timer_get_ctl(ctx), SYS_CNTP_CTL);
break;
kvm_timer_update_irq(ctx->vcpu, kvm_timer_should_fire(ctx), ctx);
if (irqchip_in_kernel(vcpu->kvm))
- phys_active = kvm_vgic_map_is_active(vcpu, ctx->irq.irq);
+ phys_active = kvm_vgic_map_is_active(vcpu, timer_irq(ctx));
phys_active |= ctx->irq.level;
enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags);
}
+/* If _pred is true, set bit in _set, otherwise set it in _clr */
+#define assign_clear_set_bit(_pred, _bit, _clr, _set) \
+ do { \
+ if (_pred) \
+ (_set) |= (_bit); \
+ else \
+ (_clr) |= (_bit); \
+ } while (0)
+
+static void kvm_timer_vcpu_load_nested_switch(struct kvm_vcpu *vcpu,
+ struct timer_map *map)
+{
+ int hw, ret;
+
+ if (!irqchip_in_kernel(vcpu->kvm))
+ return;
+
+ /*
+ * We only ever unmap the vtimer irq on a VHE system that runs nested
+ * virtualization, in which case we have both a valid emul_vtimer,
+ * emul_ptimer, direct_vtimer, and direct_ptimer.
+ *
+ * Since this is called from kvm_timer_vcpu_load(), a change between
+ * vEL2 and vEL1/0 will have just happened, and the timer_map will
+ * represent this, and therefore we switch the emul/direct mappings
+ * below.
+ */
+ hw = kvm_vgic_get_map(vcpu, timer_irq(map->direct_vtimer));
+ if (hw < 0) {
+ kvm_vgic_unmap_phys_irq(vcpu, timer_irq(map->emul_vtimer));
+ kvm_vgic_unmap_phys_irq(vcpu, timer_irq(map->emul_ptimer));
+
+ ret = kvm_vgic_map_phys_irq(vcpu,
+ map->direct_vtimer->host_timer_irq,
+ timer_irq(map->direct_vtimer),
+ &arch_timer_irq_ops);
+ WARN_ON_ONCE(ret);
+ ret = kvm_vgic_map_phys_irq(vcpu,
+ map->direct_ptimer->host_timer_irq,
+ timer_irq(map->direct_ptimer),
+ &arch_timer_irq_ops);
+ WARN_ON_ONCE(ret);
+
+ /*
+ * The virtual offset behaviour is "interresting", as it
+ * always applies when HCR_EL2.E2H==0, but only when
+ * accessed from EL1 when HCR_EL2.E2H==1. So make sure we
+ * track E2H when putting the HV timer in "direct" mode.
+ */
+ if (map->direct_vtimer == vcpu_hvtimer(vcpu)) {
+ struct arch_timer_offset *offs = &map->direct_vtimer->offset;
+
+ if (vcpu_el2_e2h_is_set(vcpu))
+ offs->vcpu_offset = NULL;
+ else
+ offs->vcpu_offset = &__vcpu_sys_reg(vcpu, CNTVOFF_EL2);
+ }
+ }
+}
+
+static void timer_set_traps(struct kvm_vcpu *vcpu, struct timer_map *map)
+{
+ bool tpt, tpc;
+ u64 clr, set;
+
+ /*
+ * No trapping gets configured here with nVHE. See
+ * __timer_enable_traps(), which is where the stuff happens.
+ */
+ if (!has_vhe())
+ return;
+
+ /*
+ * Our default policy is not to trap anything. As we progress
+ * within this function, reality kicks in and we start adding
+ * traps based on emulation requirements.
+ */
+ tpt = tpc = false;
+
+ /*
+ * We have two possibility to deal with a physical offset:
+ *
+ * - Either we have CNTPOFF (yay!) or the offset is 0:
+ * we let the guest freely access the HW
+ *
+ * - or neither of these condition apply:
+ * we trap accesses to the HW, but still use it
+ * after correcting the physical offset
+ */
+ if (!has_cntpoff() && timer_get_offset(map->direct_ptimer))
+ tpt = tpc = true;
+
+ /*
+ * Apply the enable bits that the guest hypervisor has requested for
+ * its own guest. We can only add traps that wouldn't have been set
+ * above.
+ */
+ if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) {
+ u64 val = __vcpu_sys_reg(vcpu, CNTHCTL_EL2);
+
+ /* Use the VHE format for mental sanity */
+ if (!vcpu_el2_e2h_is_set(vcpu))
+ val = (val & (CNTHCTL_EL1PCEN | CNTHCTL_EL1PCTEN)) << 10;
+
+ tpt |= !(val & (CNTHCTL_EL1PCEN << 10));
+ tpc |= !(val & (CNTHCTL_EL1PCTEN << 10));
+ }
+
+ /*
+ * Now that we have collected our requirements, compute the
+ * trap and enable bits.
+ */
+ set = 0;
+ clr = 0;
+
+ assign_clear_set_bit(tpt, CNTHCTL_EL1PCEN << 10, set, clr);
+ assign_clear_set_bit(tpc, CNTHCTL_EL1PCTEN << 10, set, clr);
+
+ /* This only happens on VHE, so use the CNTKCTL_EL1 accessor */
+ sysreg_clear_set(cntkctl_el1, clr, set);
+}
+
void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu)
{
struct arch_timer_cpu *timer = vcpu_timer(vcpu);
get_timer_map(vcpu, &map);
if (static_branch_likely(&has_gic_active_state)) {
+ if (vcpu_has_nv(vcpu))
+ kvm_timer_vcpu_load_nested_switch(vcpu, &map);
+
kvm_timer_vcpu_load_gic(map.direct_vtimer);
if (map.direct_ptimer)
kvm_timer_vcpu_load_gic(map.direct_ptimer);
timer_restore_state(map.direct_vtimer);
if (map.direct_ptimer)
timer_restore_state(map.direct_ptimer);
-
+ if (map.emul_vtimer)
+ timer_emulate(map.emul_vtimer);
if (map.emul_ptimer)
timer_emulate(map.emul_ptimer);
+
+ timer_set_traps(vcpu, &map);
}
bool kvm_timer_should_notify_user(struct kvm_vcpu *vcpu)
* In any case, we re-schedule the hrtimer for the physical timer when
* coming back to the VCPU thread in kvm_timer_vcpu_load().
*/
+ if (map.emul_vtimer)
+ soft_timer_cancel(&map.emul_vtimer->hrtimer);
if (map.emul_ptimer)
soft_timer_cancel(&map.emul_ptimer->hrtimer);
* resets the timer to be disabled and unmasked and is compliant with
* the ARMv7 architecture.
*/
- timer_set_ctl(vcpu_vtimer(vcpu), 0);
- timer_set_ctl(vcpu_ptimer(vcpu), 0);
+ for (int i = 0; i < nr_timers(vcpu); i++)
+ timer_set_ctl(vcpu_get_timer(vcpu, i), 0);
+
+ /*
+ * A vcpu running at EL2 is in charge of the offset applied to
+ * the virtual timer, so use the physical VM offset, and point
+ * the vcpu offset to CNTVOFF_EL2.
+ */
+ if (vcpu_has_nv(vcpu)) {
+ struct arch_timer_offset *offs = &vcpu_vtimer(vcpu)->offset;
+
+ offs->vcpu_offset = &__vcpu_sys_reg(vcpu, CNTVOFF_EL2);
+ offs->vm_offset = &vcpu->kvm->arch.timer_data.poffset;
+ }
if (timer->enabled) {
- kvm_timer_update_irq(vcpu, false, vcpu_vtimer(vcpu));
- kvm_timer_update_irq(vcpu, false, vcpu_ptimer(vcpu));
+ for (int i = 0; i < nr_timers(vcpu); i++)
+ kvm_timer_update_irq(vcpu, false,
+ vcpu_get_timer(vcpu, i));
if (irqchip_in_kernel(vcpu->kvm)) {
- kvm_vgic_reset_mapped_irq(vcpu, map.direct_vtimer->irq.irq);
+ kvm_vgic_reset_mapped_irq(vcpu, timer_irq(map.direct_vtimer));
if (map.direct_ptimer)
- kvm_vgic_reset_mapped_irq(vcpu, map.direct_ptimer->irq.irq);
+ kvm_vgic_reset_mapped_irq(vcpu, timer_irq(map.direct_ptimer));
}
}
+ if (map.emul_vtimer)
+ soft_timer_cancel(&map.emul_vtimer->hrtimer);
if (map.emul_ptimer)
soft_timer_cancel(&map.emul_ptimer->hrtimer);
return 0;
}
+static void timer_context_init(struct kvm_vcpu *vcpu, int timerid)
+{
+ struct arch_timer_context *ctxt = vcpu_get_timer(vcpu, timerid);
+ struct kvm *kvm = vcpu->kvm;
+
+ ctxt->vcpu = vcpu;
+
+ if (timerid == TIMER_VTIMER)
+ ctxt->offset.vm_offset = &kvm->arch.timer_data.voffset;
+ else
+ ctxt->offset.vm_offset = &kvm->arch.timer_data.poffset;
+
+ hrtimer_init(&ctxt->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
+ ctxt->hrtimer.function = kvm_hrtimer_expire;
+
+ switch (timerid) {
+ case TIMER_PTIMER:
+ case TIMER_HPTIMER:
+ ctxt->host_timer_irq = host_ptimer_irq;
+ break;
+ case TIMER_VTIMER:
+ case TIMER_HVTIMER:
+ ctxt->host_timer_irq = host_vtimer_irq;
+ break;
+ }
+}
+
void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu)
{
struct arch_timer_cpu *timer = vcpu_timer(vcpu);
- struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
- struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
- vtimer->vcpu = vcpu;
- vtimer->offset.vm_offset = &vcpu->kvm->arch.timer_data.voffset;
- ptimer->vcpu = vcpu;
+ for (int i = 0; i < NR_KVM_TIMERS; i++)
+ timer_context_init(vcpu, i);
- /* Synchronize cntvoff across all vtimers of a VM. */
- timer_set_offset(vtimer, kvm_phys_timer_read());
- timer_set_offset(ptimer, 0);
+ /* Synchronize offsets across timers of a VM if not already provided */
+ if (!test_bit(KVM_ARCH_FLAG_VM_COUNTER_OFFSET, &vcpu->kvm->arch.flags)) {
+ timer_set_offset(vcpu_vtimer(vcpu), kvm_phys_timer_read());
+ timer_set_offset(vcpu_ptimer(vcpu), 0);
+ }
hrtimer_init(&timer->bg_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
timer->bg_timer.function = kvm_bg_timer_expire;
+}
- hrtimer_init(&vtimer->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
- hrtimer_init(&ptimer->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
- vtimer->hrtimer.function = kvm_hrtimer_expire;
- ptimer->hrtimer.function = kvm_hrtimer_expire;
-
- vtimer->irq.irq = default_vtimer_irq.irq;
- ptimer->irq.irq = default_ptimer_irq.irq;
-
- vtimer->host_timer_irq = host_vtimer_irq;
- ptimer->host_timer_irq = host_ptimer_irq;
-
- vtimer->host_timer_irq_flags = host_vtimer_irq_flags;
- ptimer->host_timer_irq_flags = host_ptimer_irq_flags;
+void kvm_timer_init_vm(struct kvm *kvm)
+{
+ for (int i = 0; i < NR_KVM_TIMERS; i++)
+ kvm->arch.timer_data.ppi[i] = default_ppi[i];
}
void kvm_timer_cpu_up(void)
kvm_arm_timer_write(vcpu, timer, TIMER_REG_CTL, value);
break;
case KVM_REG_ARM_TIMER_CNT:
- timer = vcpu_vtimer(vcpu);
- timer_set_offset(timer, kvm_phys_timer_read() - value);
+ if (!test_bit(KVM_ARCH_FLAG_VM_COUNTER_OFFSET,
+ &vcpu->kvm->arch.flags)) {
+ timer = vcpu_vtimer(vcpu);
+ timer_set_offset(timer, kvm_phys_timer_read() - value);
+ }
break;
case KVM_REG_ARM_TIMER_CVAL:
timer = vcpu_vtimer(vcpu);
timer = vcpu_ptimer(vcpu);
kvm_arm_timer_write(vcpu, timer, TIMER_REG_CTL, value);
break;
+ case KVM_REG_ARM_PTIMER_CNT:
+ if (!test_bit(KVM_ARCH_FLAG_VM_COUNTER_OFFSET,
+ &vcpu->kvm->arch.flags)) {
+ timer = vcpu_ptimer(vcpu);
+ timer_set_offset(timer, kvm_phys_timer_read() - value);
+ }
+ break;
case KVM_REG_ARM_PTIMER_CVAL:
timer = vcpu_ptimer(vcpu);
kvm_arm_timer_write(vcpu, timer, TIMER_REG_CVAL, value);
val = kvm_phys_timer_read() - timer_get_offset(timer);
break;
+ case TIMER_REG_VOFF:
+ val = *timer->offset.vcpu_offset;
+ break;
+
default:
BUG();
}
get_timer_map(vcpu, &map);
timer = vcpu_get_timer(vcpu, tmr);
- if (timer == map.emul_ptimer)
+ if (timer == map.emul_vtimer || timer == map.emul_ptimer)
return kvm_arm_timer_read(vcpu, timer, treg);
preempt_disable();
timer_set_cval(timer, val);
break;
+ case TIMER_REG_VOFF:
+ *timer->offset.vcpu_offset = val;
+ break;
+
default:
BUG();
}
get_timer_map(vcpu, &map);
timer = vcpu_get_timer(vcpu, tmr);
- if (timer == map.emul_ptimer) {
+ if (timer == map.emul_vtimer || timer == map.emul_ptimer) {
soft_timer_cancel(&timer->hrtimer);
kvm_arm_timer_write(vcpu, timer, treg, val);
timer_emulate(timer);
.free = timer_irq_domain_free,
};
-static struct irq_ops arch_timer_irq_ops = {
- .get_input_level = kvm_arch_timer_get_input_level,
-};
-
static void kvm_irq_fixup_flags(unsigned int virq, u32 *flags)
{
*flags = irq_get_trigger_type(virq);
static bool timer_irqs_are_valid(struct kvm_vcpu *vcpu)
{
- int vtimer_irq, ptimer_irq, ret;
- unsigned long i;
+ u32 ppis = 0;
+ bool valid;
- vtimer_irq = vcpu_vtimer(vcpu)->irq.irq;
- ret = kvm_vgic_set_owner(vcpu, vtimer_irq, vcpu_vtimer(vcpu));
- if (ret)
- return false;
+ mutex_lock(&vcpu->kvm->arch.config_lock);
- ptimer_irq = vcpu_ptimer(vcpu)->irq.irq;
- ret = kvm_vgic_set_owner(vcpu, ptimer_irq, vcpu_ptimer(vcpu));
- if (ret)
- return false;
+ for (int i = 0; i < nr_timers(vcpu); i++) {
+ struct arch_timer_context *ctx;
+ int irq;
+
+ ctx = vcpu_get_timer(vcpu, i);
+ irq = timer_irq(ctx);
+ if (kvm_vgic_set_owner(vcpu, irq, ctx))
+ break;
- kvm_for_each_vcpu(i, vcpu, vcpu->kvm) {
- if (vcpu_vtimer(vcpu)->irq.irq != vtimer_irq ||
- vcpu_ptimer(vcpu)->irq.irq != ptimer_irq)
- return false;
+ /*
+ * We know by construction that we only have PPIs, so
+ * all values are less than 32.
+ */
+ ppis |= BIT(irq);
}
- return true;
+ valid = hweight32(ppis) == nr_timers(vcpu);
+
+ if (valid)
+ set_bit(KVM_ARCH_FLAG_TIMER_PPIS_IMMUTABLE, &vcpu->kvm->arch.flags);
+
+ mutex_unlock(&vcpu->kvm->arch.config_lock);
+
+ return valid;
}
-bool kvm_arch_timer_get_input_level(int vintid)
+static bool kvm_arch_timer_get_input_level(int vintid)
{
struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
- struct arch_timer_context *timer;
if (WARN(!vcpu, "No vcpu context!\n"))
return false;
- if (vintid == vcpu_vtimer(vcpu)->irq.irq)
- timer = vcpu_vtimer(vcpu);
- else if (vintid == vcpu_ptimer(vcpu)->irq.irq)
- timer = vcpu_ptimer(vcpu);
- else
- BUG();
+ for (int i = 0; i < nr_timers(vcpu); i++) {
+ struct arch_timer_context *ctx;
+
+ ctx = vcpu_get_timer(vcpu, i);
+ if (timer_irq(ctx) == vintid)
+ return kvm_timer_should_fire(ctx);
+ }
+
+ /* A timer IRQ has fired, but no matching timer was found? */
+ WARN_RATELIMIT(1, "timer INTID%d unknown\n", vintid);
- return kvm_timer_should_fire(timer);
+ return false;
}
int kvm_timer_enable(struct kvm_vcpu *vcpu)
ret = kvm_vgic_map_phys_irq(vcpu,
map.direct_vtimer->host_timer_irq,
- map.direct_vtimer->irq.irq,
+ timer_irq(map.direct_vtimer),
&arch_timer_irq_ops);
if (ret)
return ret;
if (map.direct_ptimer) {
ret = kvm_vgic_map_phys_irq(vcpu,
map.direct_ptimer->host_timer_irq,
- map.direct_ptimer->irq.irq,
+ timer_irq(map.direct_ptimer),
&arch_timer_irq_ops);
}
return 0;
}
-/*
- * On VHE system, we only need to configure the EL2 timer trap register once,
- * not for every world switch.
- * The host kernel runs at EL2 with HCR_EL2.TGE == 1,
- * and this makes those bits have no effect for the host kernel execution.
- */
+/* If we have CNTPOFF, permanently set ECV to enable it */
void kvm_timer_init_vhe(void)
{
- /* When HCR_EL2.E2H ==1, EL1PCEN and EL1PCTEN are shifted by 10 */
- u32 cnthctl_shift = 10;
- u64 val;
-
- /*
- * VHE systems allow the guest direct access to the EL1 physical
- * timer/counter.
- */
- val = read_sysreg(cnthctl_el2);
- val |= (CNTHCTL_EL1PCEN << cnthctl_shift);
- val |= (CNTHCTL_EL1PCTEN << cnthctl_shift);
- write_sysreg(val, cnthctl_el2);
-}
-
-static void set_timer_irqs(struct kvm *kvm, int vtimer_irq, int ptimer_irq)
-{
- struct kvm_vcpu *vcpu;
- unsigned long i;
-
- kvm_for_each_vcpu(i, vcpu, kvm) {
- vcpu_vtimer(vcpu)->irq.irq = vtimer_irq;
- vcpu_ptimer(vcpu)->irq.irq = ptimer_irq;
- }
+ if (cpus_have_final_cap(ARM64_HAS_ECV_CNTPOFF))
+ sysreg_clear_set(cntkctl_el1, 0, CNTHCTL_ECV);
}
int kvm_arm_timer_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
{
int __user *uaddr = (int __user *)(long)attr->addr;
- struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
- struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
- int irq;
+ int irq, idx, ret = 0;
if (!irqchip_in_kernel(vcpu->kvm))
return -EINVAL;
if (!(irq_is_ppi(irq)))
return -EINVAL;
- if (vcpu->arch.timer_cpu.enabled)
- return -EBUSY;
+ mutex_lock(&vcpu->kvm->arch.config_lock);
+
+ if (test_bit(KVM_ARCH_FLAG_TIMER_PPIS_IMMUTABLE,
+ &vcpu->kvm->arch.flags)) {
+ ret = -EBUSY;
+ goto out;
+ }
switch (attr->attr) {
case KVM_ARM_VCPU_TIMER_IRQ_VTIMER:
- set_timer_irqs(vcpu->kvm, irq, ptimer->irq.irq);
+ idx = TIMER_VTIMER;
break;
case KVM_ARM_VCPU_TIMER_IRQ_PTIMER:
- set_timer_irqs(vcpu->kvm, vtimer->irq.irq, irq);
+ idx = TIMER_PTIMER;
+ break;
+ case KVM_ARM_VCPU_TIMER_IRQ_HVTIMER:
+ idx = TIMER_HVTIMER;
+ break;
+ case KVM_ARM_VCPU_TIMER_IRQ_HPTIMER:
+ idx = TIMER_HPTIMER;
break;
default:
- return -ENXIO;
+ ret = -ENXIO;
+ goto out;
}
- return 0;
+ /*
+ * We cannot validate the IRQ unicity before we run, so take it at
+ * face value. The verdict will be given on first vcpu run, for each
+ * vcpu. Yes this is late. Blame it on the stupid API.
+ */
+ vcpu->kvm->arch.timer_data.ppi[idx] = irq;
+
+out:
+ mutex_unlock(&vcpu->kvm->arch.config_lock);
+ return ret;
}
int kvm_arm_timer_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
case KVM_ARM_VCPU_TIMER_IRQ_PTIMER:
timer = vcpu_ptimer(vcpu);
break;
+ case KVM_ARM_VCPU_TIMER_IRQ_HVTIMER:
+ timer = vcpu_hvtimer(vcpu);
+ break;
+ case KVM_ARM_VCPU_TIMER_IRQ_HPTIMER:
+ timer = vcpu_hptimer(vcpu);
+ break;
default:
return -ENXIO;
}
- irq = timer->irq.irq;
+ irq = timer_irq(timer);
return put_user(irq, uaddr);
}
switch (attr->attr) {
case KVM_ARM_VCPU_TIMER_IRQ_VTIMER:
case KVM_ARM_VCPU_TIMER_IRQ_PTIMER:
+ case KVM_ARM_VCPU_TIMER_IRQ_HVTIMER:
+ case KVM_ARM_VCPU_TIMER_IRQ_HPTIMER:
return 0;
}
return -ENXIO;
}
+
+int kvm_vm_ioctl_set_counter_offset(struct kvm *kvm,
+ struct kvm_arm_counter_offset *offset)
+{
+ int ret = 0;
+
+ if (offset->reserved)
+ return -EINVAL;
+
+ mutex_lock(&kvm->lock);
+
+ if (lock_all_vcpus(kvm)) {
+ set_bit(KVM_ARCH_FLAG_VM_COUNTER_OFFSET, &kvm->arch.flags);
+
+ /*
+ * If userspace decides to set the offset using this
+ * API rather than merely restoring the counter
+ * values, the offset applies to both the virtual and
+ * physical views.
+ */
+ kvm->arch.timer_data.voffset = offset->counter_offset;
+ kvm->arch.timer_data.poffset = offset->counter_offset;
+
+ unlock_all_vcpus(kvm);
+ } else {
+ ret = -EBUSY;
+ }
+
+ mutex_unlock(&kvm->lock);
+
+ return ret;
+}
{
int ret;
+ mutex_init(&kvm->arch.config_lock);
+
+#ifdef CONFIG_LOCKDEP
+ /* Clue in lockdep that the config_lock must be taken inside kvm->lock */
+ mutex_lock(&kvm->lock);
+ mutex_lock(&kvm->arch.config_lock);
+ mutex_unlock(&kvm->arch.config_lock);
+ mutex_unlock(&kvm->lock);
+#endif
+
ret = kvm_share_hyp(kvm, kvm + 1);
if (ret)
return ret;
kvm_vgic_early_init(kvm);
+ kvm_timer_init_vm(kvm);
+
/* The maximum number of VCPUs is limited by the host's GIC model */
kvm->max_vcpus = kvm_arm_default_max_vcpus();
kvm_destroy_vcpus(kvm);
kvm_unshare_hyp(kvm, kvm + 1);
+
+ kvm_arm_teardown_hypercalls(kvm);
}
int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_VCPU_ATTRIBUTES:
case KVM_CAP_PTP_KVM:
case KVM_CAP_ARM_SYSTEM_SUSPEND:
+ case KVM_CAP_COUNTER_OFFSET:
r = 1;
break;
case KVM_CAP_SET_GUEST_DEBUG2:
{
int err;
+ spin_lock_init(&vcpu->arch.mp_state_lock);
+
+#ifdef CONFIG_LOCKDEP
+ /* Inform lockdep that the config_lock is acquired after vcpu->mutex */
+ mutex_lock(&vcpu->mutex);
+ mutex_lock(&vcpu->kvm->arch.config_lock);
+ mutex_unlock(&vcpu->kvm->arch.config_lock);
+ mutex_unlock(&vcpu->mutex);
+#endif
+
/* Force users to call KVM_ARM_VCPU_INIT */
vcpu->arch.target = -1;
bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
vcpu->cpu = -1;
}
-void kvm_arm_vcpu_power_off(struct kvm_vcpu *vcpu)
+static void __kvm_arm_vcpu_power_off(struct kvm_vcpu *vcpu)
{
- vcpu->arch.mp_state.mp_state = KVM_MP_STATE_STOPPED;
+ WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_STOPPED);
kvm_make_request(KVM_REQ_SLEEP, vcpu);
kvm_vcpu_kick(vcpu);
}
+void kvm_arm_vcpu_power_off(struct kvm_vcpu *vcpu)
+{
+ spin_lock(&vcpu->arch.mp_state_lock);
+ __kvm_arm_vcpu_power_off(vcpu);
+ spin_unlock(&vcpu->arch.mp_state_lock);
+}
+
bool kvm_arm_vcpu_stopped(struct kvm_vcpu *vcpu)
{
- return vcpu->arch.mp_state.mp_state == KVM_MP_STATE_STOPPED;
+ return READ_ONCE(vcpu->arch.mp_state.mp_state) == KVM_MP_STATE_STOPPED;
}
static void kvm_arm_vcpu_suspend(struct kvm_vcpu *vcpu)
{
- vcpu->arch.mp_state.mp_state = KVM_MP_STATE_SUSPENDED;
+ WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_SUSPENDED);
kvm_make_request(KVM_REQ_SUSPEND, vcpu);
kvm_vcpu_kick(vcpu);
}
static bool kvm_arm_vcpu_suspended(struct kvm_vcpu *vcpu)
{
- return vcpu->arch.mp_state.mp_state == KVM_MP_STATE_SUSPENDED;
+ return READ_ONCE(vcpu->arch.mp_state.mp_state) == KVM_MP_STATE_SUSPENDED;
}
int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
struct kvm_mp_state *mp_state)
{
- *mp_state = vcpu->arch.mp_state;
+ *mp_state = READ_ONCE(vcpu->arch.mp_state);
return 0;
}
{
int ret = 0;
+ spin_lock(&vcpu->arch.mp_state_lock);
+
switch (mp_state->mp_state) {
case KVM_MP_STATE_RUNNABLE:
- vcpu->arch.mp_state = *mp_state;
+ WRITE_ONCE(vcpu->arch.mp_state, *mp_state);
break;
case KVM_MP_STATE_STOPPED:
- kvm_arm_vcpu_power_off(vcpu);
+ __kvm_arm_vcpu_power_off(vcpu);
break;
case KVM_MP_STATE_SUSPENDED:
kvm_arm_vcpu_suspend(vcpu);
ret = -EINVAL;
}
+ spin_unlock(&vcpu->arch.mp_state_lock);
+
return ret;
}
if (kvm_vm_is_protected(kvm))
kvm_call_hyp_nvhe(__pkvm_vcpu_init_traps, vcpu);
- mutex_lock(&kvm->lock);
+ mutex_lock(&kvm->arch.config_lock);
set_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &kvm->arch.flags);
- mutex_unlock(&kvm->lock);
+ mutex_unlock(&kvm->arch.config_lock);
return ret;
}
/*
* Handle the "start in power-off" case.
*/
+ spin_lock(&vcpu->arch.mp_state_lock);
+
if (test_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features))
- kvm_arm_vcpu_power_off(vcpu);
+ __kvm_arm_vcpu_power_off(vcpu);
else
- vcpu->arch.mp_state.mp_state = KVM_MP_STATE_RUNNABLE;
+ WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_RUNNABLE);
+
+ spin_unlock(&vcpu->arch.mp_state_lock);
return 0;
}
}
}
+static int kvm_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
+{
+ switch (attr->group) {
+ case KVM_ARM_VM_SMCCC_CTRL:
+ return kvm_vm_smccc_has_attr(kvm, attr);
+ default:
+ return -ENXIO;
+ }
+}
+
+static int kvm_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
+{
+ switch (attr->group) {
+ case KVM_ARM_VM_SMCCC_CTRL:
+ return kvm_vm_smccc_set_attr(kvm, attr);
+ default:
+ return -ENXIO;
+ }
+}
+
int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
{
struct kvm *kvm = filp->private_data;
void __user *argp = (void __user *)arg;
+ struct kvm_device_attr attr;
switch (ioctl) {
case KVM_CREATE_IRQCHIP: {
return -EFAULT;
return kvm_vm_ioctl_mte_copy_tags(kvm, ©_tags);
}
+ case KVM_ARM_SET_COUNTER_OFFSET: {
+ struct kvm_arm_counter_offset offset;
+
+ if (copy_from_user(&offset, argp, sizeof(offset)))
+ return -EFAULT;
+ return kvm_vm_ioctl_set_counter_offset(kvm, &offset);
+ }
+ case KVM_HAS_DEVICE_ATTR: {
+ if (copy_from_user(&attr, argp, sizeof(attr)))
+ return -EFAULT;
+
+ return kvm_vm_has_attr(kvm, &attr);
+ }
+ case KVM_SET_DEVICE_ATTR: {
+ if (copy_from_user(&attr, argp, sizeof(attr)))
+ return -EFAULT;
+
+ return kvm_vm_set_attr(kvm, &attr);
+ }
default:
return -EINVAL;
}
}
+/* unlocks vcpus from @vcpu_lock_idx and smaller */
+static void unlock_vcpus(struct kvm *kvm, int vcpu_lock_idx)
+{
+ struct kvm_vcpu *tmp_vcpu;
+
+ for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) {
+ tmp_vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx);
+ mutex_unlock(&tmp_vcpu->mutex);
+ }
+}
+
+void unlock_all_vcpus(struct kvm *kvm)
+{
+ lockdep_assert_held(&kvm->lock);
+
+ unlock_vcpus(kvm, atomic_read(&kvm->online_vcpus) - 1);
+}
+
+/* Returns true if all vcpus were locked, false otherwise */
+bool lock_all_vcpus(struct kvm *kvm)
+{
+ struct kvm_vcpu *tmp_vcpu;
+ unsigned long c;
+
+ lockdep_assert_held(&kvm->lock);
+
+ /*
+ * Any time a vcpu is in an ioctl (including running), the
+ * core KVM code tries to grab the vcpu->mutex.
+ *
+ * By grabbing the vcpu->mutex of all VCPUs we ensure that no
+ * other VCPUs can fiddle with the state while we access it.
+ */
+ kvm_for_each_vcpu(c, tmp_vcpu, kvm) {
+ if (!mutex_trylock(&tmp_vcpu->mutex)) {
+ unlock_vcpus(kvm, c - 1);
+ return false;
+ }
+ }
+
+ return true;
+}
+
static unsigned long nvhe_percpu_size(void)
{
return (unsigned long)CHOOSE_NVHE_SYM(__per_cpu_end) -
return copy_core_reg_indices(vcpu, NULL);
}
-/**
- * ARM64 versions of the TIMER registers, always available on arm64
- */
+static const u64 timer_reg_list[] = {
+ KVM_REG_ARM_TIMER_CTL,
+ KVM_REG_ARM_TIMER_CNT,
+ KVM_REG_ARM_TIMER_CVAL,
+ KVM_REG_ARM_PTIMER_CTL,
+ KVM_REG_ARM_PTIMER_CNT,
+ KVM_REG_ARM_PTIMER_CVAL,
+};
-#define NUM_TIMER_REGS 3
+#define NUM_TIMER_REGS ARRAY_SIZE(timer_reg_list)
static bool is_timer_reg(u64 index)
{
case KVM_REG_ARM_TIMER_CTL:
case KVM_REG_ARM_TIMER_CNT:
case KVM_REG_ARM_TIMER_CVAL:
+ case KVM_REG_ARM_PTIMER_CTL:
+ case KVM_REG_ARM_PTIMER_CNT:
+ case KVM_REG_ARM_PTIMER_CVAL:
return true;
}
return false;
static int copy_timer_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
{
- if (put_user(KVM_REG_ARM_TIMER_CTL, uindices))
- return -EFAULT;
- uindices++;
- if (put_user(KVM_REG_ARM_TIMER_CNT, uindices))
- return -EFAULT;
- uindices++;
- if (put_user(KVM_REG_ARM_TIMER_CVAL, uindices))
- return -EFAULT;
+ for (int i = 0; i < NUM_TIMER_REGS; i++) {
+ if (put_user(timer_reg_list[i], uindices))
+ return -EFAULT;
+ uindices++;
+ }
return 0;
}
switch (attr->group) {
case KVM_ARM_VCPU_PMU_V3_CTRL:
+ mutex_lock(&vcpu->kvm->arch.config_lock);
ret = kvm_arm_pmu_v3_set_attr(vcpu, attr);
+ mutex_unlock(&vcpu->kvm->arch.config_lock);
break;
case KVM_ARM_VCPU_TIMER_CTRL:
ret = kvm_arm_timer_set_attr(vcpu, attr);
static int handle_hvc(struct kvm_vcpu *vcpu)
{
- int ret;
-
trace_kvm_hvc_arm64(*vcpu_pc(vcpu), vcpu_get_reg(vcpu, 0),
kvm_vcpu_hvc_get_imm(vcpu));
vcpu->stat.hvc_exit_stat++;
return 1;
}
- ret = kvm_hvc_call_handler(vcpu);
- if (ret < 0) {
- vcpu_set_reg(vcpu, 0, ~0UL);
- return 1;
- }
-
- return ret;
+ return kvm_smccc_call_handler(vcpu);
}
static int handle_smc(struct kvm_vcpu *vcpu)
{
- int ret;
-
/*
* "If an SMC instruction executed at Non-secure EL1 is
* trapped to EL2 because HCR_EL2.TSC is 1, the exception is a
* Trap exception, not a Secure Monitor Call exception [...]"
*
* We need to advance the PC after the trap, as it would
- * otherwise return to the same address...
- *
- * Only handle SMCs from the virtual EL2 with an immediate of zero and
- * skip it otherwise.
+ * otherwise return to the same address. Furthermore, pre-incrementing
+ * the PC before potentially exiting to userspace maintains the same
+ * abstraction for both SMCs and HVCs.
+ */
+ kvm_incr_pc(vcpu);
+
+ /*
+ * SMCs with a nonzero immediate are reserved according to DEN0028E 2.9
+ * "SMC and HVC immediate value".
*/
- if (!vcpu_is_el2(vcpu) || kvm_vcpu_hvc_get_imm(vcpu)) {
+ if (kvm_vcpu_hvc_get_imm(vcpu)) {
vcpu_set_reg(vcpu, 0, ~0UL);
- kvm_incr_pc(vcpu);
return 1;
}
* at Non-secure EL1 is trapped to EL2 if HCR_EL2.TSC==1, rather than
* being treated as UNDEFINED.
*/
- ret = kvm_hvc_call_handler(vcpu);
- if (ret < 0)
- vcpu_set_reg(vcpu, 0, ~0UL);
-
- kvm_incr_pc(vcpu);
-
- return ret;
+ return kvm_smccc_call_handler(vcpu);
}
/*
#include <asm/kvm_emulate.h>
#include <asm/kvm_hyp.h>
#include <asm/kvm_mmu.h>
+#include <asm/kvm_nested.h>
#include <asm/fpsimd.h>
#include <asm/debug-monitors.h>
#include <asm/processor.h>
return true;
}
+static bool kvm_hyp_handle_cntpct(struct kvm_vcpu *vcpu)
+{
+ struct arch_timer_context *ctxt;
+ u32 sysreg;
+ u64 val;
+
+ /*
+ * We only get here for 64bit guests, 32bit guests will hit
+ * the long and winding road all the way to the standard
+ * handling. Yes, it sucks to be irrelevant.
+ */
+ sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_esr(vcpu));
+
+ switch (sysreg) {
+ case SYS_CNTPCT_EL0:
+ case SYS_CNTPCTSS_EL0:
+ if (vcpu_has_nv(vcpu)) {
+ if (is_hyp_ctxt(vcpu)) {
+ ctxt = vcpu_hptimer(vcpu);
+ break;
+ }
+
+ /* Check for guest hypervisor trapping */
+ val = __vcpu_sys_reg(vcpu, CNTHCTL_EL2);
+ if (!vcpu_el2_e2h_is_set(vcpu))
+ val = (val & CNTHCTL_EL1PCTEN) << 10;
+
+ if (!(val & (CNTHCTL_EL1PCTEN << 10)))
+ return false;
+ }
+
+ ctxt = vcpu_ptimer(vcpu);
+ break;
+ default:
+ return false;
+ }
+
+ val = arch_timer_read_cntpct_el0();
+
+ if (ctxt->offset.vm_offset)
+ val -= *kern_hyp_va(ctxt->offset.vm_offset);
+ if (ctxt->offset.vcpu_offset)
+ val -= *kern_hyp_va(ctxt->offset.vcpu_offset);
+
+ vcpu_set_reg(vcpu, kvm_vcpu_sys_get_rt(vcpu), val);
+ __kvm_skip_instr(vcpu);
+ return true;
+}
+
static bool kvm_hyp_handle_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code)
{
if (cpus_have_final_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM) &&
if (esr_is_ptrauth_trap(kvm_vcpu_get_esr(vcpu)))
return kvm_hyp_handle_ptrauth(vcpu, exit_code);
+ if (kvm_hyp_handle_cntpct(vcpu))
+ return true;
+
return false;
}
/* Now drain all buffered data to memory */
psb_csync();
- dsb(nsh);
}
static void __debug_restore_spe(u64 pmscr_el1)
isb();
/* Drain the trace buffer to memory */
tsb_csync();
- dsb(nsh);
}
static void __debug_restore_trace(u64 trfcr_el1)
params->vttbr = kvm_get_vttbr(mmu);
params->vtcr = host_mmu.arch.vtcr;
params->hcr_el2 |= HCR_VM;
+
+ /*
+ * The CMO below not only cleans the updated params to the
+ * PoC, but also provides the DSB that ensures ongoing
+ * page-table walks that have started before we trapped to EL2
+ * have completed.
+ */
kvm_flush_dcache_to_poc(params, sizeof(*params));
write_sysreg(params->hcr_el2, hcr_el2);
*/
__debug_save_host_buffers_nvhe(vcpu);
+ /*
+ * We're about to restore some new MMU state. Make sure
+ * ongoing page-table walks that have started before we
+ * trapped to EL2 have completed. This also synchronises the
+ * above disabling of SPE and TRBE.
+ *
+ * See DDI0487I.a D8.1.5 "Out-of-context translation regimes",
+ * rule R_LFHQG and subsequent information statements.
+ */
+ dsb(nsh);
+
__kvm_adjust_pc(vcpu);
/*
__timer_disable_traps(vcpu);
__hyp_vgic_save_state(vcpu);
+ /*
+ * Same thing as before the guest run: we're about to switch
+ * the MMU context, so let's make sure we don't have any
+ * ongoing EL1&0 translations.
+ */
+ dsb(nsh);
+
__deactivate_traps(vcpu);
__load_host_stage2();
#include <linux/kvm_host.h>
#include <asm/kvm_hyp.h>
+#include <asm/kvm_mmu.h>
void __kvm_timer_set_cntvoff(u64 cntvoff)
{
*/
void __timer_enable_traps(struct kvm_vcpu *vcpu)
{
- u64 val;
+ u64 clr = 0, set = 0;
/*
* Disallow physical timer access for the guest
- * Physical counter access is allowed
+ * Physical counter access is allowed if no offset is enforced
+ * or running protected (we don't offset anything in this case).
*/
- val = read_sysreg(cnthctl_el2);
- val &= ~CNTHCTL_EL1PCEN;
- val |= CNTHCTL_EL1PCTEN;
- write_sysreg(val, cnthctl_el2);
+ clr = CNTHCTL_EL1PCEN;
+ if (is_protected_kvm_enabled() ||
+ !kern_hyp_va(vcpu->kvm)->arch.timer_data.poffset)
+ set |= CNTHCTL_EL1PCTEN;
+ else
+ clr |= CNTHCTL_EL1PCTEN;
+
+ sysreg_clear_set(cnthctl_el2, clr, set);
}
};
static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu,
- struct tlb_inv_context *cxt)
+ struct tlb_inv_context *cxt,
+ bool nsh)
{
+ /*
+ * We have two requirements:
+ *
+ * - ensure that the page table updates are visible to all
+ * CPUs, for which a dsb(DOMAIN-st) is what we need, DOMAIN
+ * being either ish or nsh, depending on the invalidation
+ * type.
+ *
+ * - complete any speculative page table walk started before
+ * we trapped to EL2 so that we can mess with the MM
+ * registers out of context, for which dsb(nsh) is enough
+ *
+ * The composition of these two barriers is a dsb(DOMAIN), and
+ * the 'nsh' parameter tracks the distinction between
+ * Inner-Shareable and Non-Shareable, as specified by the
+ * callers.
+ */
+ if (nsh)
+ dsb(nsh);
+ else
+ dsb(ish);
+
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
u64 val;
{
struct tlb_inv_context cxt;
- dsb(ishst);
-
/* Switch to requested VMID */
- __tlb_switch_to_guest(mmu, &cxt);
+ __tlb_switch_to_guest(mmu, &cxt, false);
/*
* We could do so much better if we had the VA as well.
{
struct tlb_inv_context cxt;
- dsb(ishst);
-
/* Switch to requested VMID */
- __tlb_switch_to_guest(mmu, &cxt);
+ __tlb_switch_to_guest(mmu, &cxt, false);
__tlbi(vmalls12e1is);
dsb(ish);
struct tlb_inv_context cxt;
/* Switch to requested VMID */
- __tlb_switch_to_guest(mmu, &cxt);
+ __tlb_switch_to_guest(mmu, &cxt, false);
__tlbi(vmalle1);
asm volatile("ic iallu");
void __kvm_flush_vm_context(void)
{
- dsb(ishst);
+ /* Same remark as in __tlb_switch_to_guest() */
+ dsb(ish);
__tlbi(alle1is);
/*
/*
* When we exit from the guest we change a number of CPU configuration
- * parameters, such as traps. Make sure these changes take effect
- * before running the host or additional guests.
+ * parameters, such as traps. We rely on the isb() in kvm_call_hyp*()
+ * to make sure these changes take effect before running the host or
+ * additional guests.
*/
- isb();
-
return ret;
}
#include <asm/kvm_asm.h>
#include <asm/kvm_emulate.h>
#include <asm/kvm_hyp.h>
+#include <asm/kvm_nested.h>
/*
* VHE: Host and guest must save mdscr_el1 and sp_el0 (and the PC and
__sysreg_save_user_state(host_ctxt);
/*
+ * When running a normal EL1 guest, we only load a new vcpu
+ * after a context switch, which imvolves a DSB, so all
+ * speculative EL1&0 walks will have already completed.
+ * If running NV, the vcpu may transition between vEL1 and
+ * vEL2 without a context switch, so make sure we complete
+ * those walks before loading a new context.
+ */
+ if (vcpu_has_nv(vcpu))
+ dsb(nsh);
+
+ /*
* Load guest EL1 and user state
*
* We must restore the 32-bit state before the sysregs, thanks
cycles = systime_snapshot.cycles - vcpu->kvm->arch.timer_data.voffset;
break;
case KVM_PTP_PHYS_COUNTER:
- cycles = systime_snapshot.cycles;
+ cycles = systime_snapshot.cycles - vcpu->kvm->arch.timer_data.poffset;
break;
default:
return;
val[3] = lower_32_bits(cycles);
}
-static bool kvm_hvc_call_default_allowed(u32 func_id)
+static bool kvm_smccc_default_allowed(u32 func_id)
{
switch (func_id) {
/*
}
}
-static bool kvm_hvc_call_allowed(struct kvm_vcpu *vcpu, u32 func_id)
+static bool kvm_smccc_test_fw_bmap(struct kvm_vcpu *vcpu, u32 func_id)
{
struct kvm_smccc_features *smccc_feat = &vcpu->kvm->arch.smccc_feat;
return test_bit(KVM_REG_ARM_VENDOR_HYP_BIT_PTP,
&smccc_feat->vendor_hyp_bmap);
default:
- return kvm_hvc_call_default_allowed(func_id);
+ return false;
+ }
+}
+
+#define SMC32_ARCH_RANGE_BEGIN ARM_SMCCC_VERSION_FUNC_ID
+#define SMC32_ARCH_RANGE_END ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_32, \
+ 0, ARM_SMCCC_FUNC_MASK)
+
+#define SMC64_ARCH_RANGE_BEGIN ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_64, \
+ 0, 0)
+#define SMC64_ARCH_RANGE_END ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_64, \
+ 0, ARM_SMCCC_FUNC_MASK)
+
+static void init_smccc_filter(struct kvm *kvm)
+{
+ int r;
+
+ mt_init(&kvm->arch.smccc_filter);
+
+ /*
+ * Prevent userspace from handling any SMCCC calls in the architecture
+ * range, avoiding the risk of misrepresenting Spectre mitigation status
+ * to the guest.
+ */
+ r = mtree_insert_range(&kvm->arch.smccc_filter,
+ SMC32_ARCH_RANGE_BEGIN, SMC32_ARCH_RANGE_END,
+ xa_mk_value(KVM_SMCCC_FILTER_HANDLE),
+ GFP_KERNEL_ACCOUNT);
+ WARN_ON_ONCE(r);
+
+ r = mtree_insert_range(&kvm->arch.smccc_filter,
+ SMC64_ARCH_RANGE_BEGIN, SMC64_ARCH_RANGE_END,
+ xa_mk_value(KVM_SMCCC_FILTER_HANDLE),
+ GFP_KERNEL_ACCOUNT);
+ WARN_ON_ONCE(r);
+
+}
+
+static int kvm_smccc_set_filter(struct kvm *kvm, struct kvm_smccc_filter __user *uaddr)
+{
+ const void *zero_page = page_to_virt(ZERO_PAGE(0));
+ struct kvm_smccc_filter filter;
+ u32 start, end;
+ int r;
+
+ if (copy_from_user(&filter, uaddr, sizeof(filter)))
+ return -EFAULT;
+
+ if (memcmp(filter.pad, zero_page, sizeof(filter.pad)))
+ return -EINVAL;
+
+ start = filter.base;
+ end = start + filter.nr_functions - 1;
+
+ if (end < start || filter.action >= NR_SMCCC_FILTER_ACTIONS)
+ return -EINVAL;
+
+ mutex_lock(&kvm->arch.config_lock);
+
+ if (kvm_vm_has_ran_once(kvm)) {
+ r = -EBUSY;
+ goto out_unlock;
}
+
+ r = mtree_insert_range(&kvm->arch.smccc_filter, start, end,
+ xa_mk_value(filter.action), GFP_KERNEL_ACCOUNT);
+ if (r)
+ goto out_unlock;
+
+ set_bit(KVM_ARCH_FLAG_SMCCC_FILTER_CONFIGURED, &kvm->arch.flags);
+
+out_unlock:
+ mutex_unlock(&kvm->arch.config_lock);
+ return r;
+}
+
+static u8 kvm_smccc_filter_get_action(struct kvm *kvm, u32 func_id)
+{
+ unsigned long idx = func_id;
+ void *val;
+
+ if (!test_bit(KVM_ARCH_FLAG_SMCCC_FILTER_CONFIGURED, &kvm->arch.flags))
+ return KVM_SMCCC_FILTER_HANDLE;
+
+ /*
+ * But where's the error handling, you say?
+ *
+ * mt_find() returns NULL if no entry was found, which just so happens
+ * to match KVM_SMCCC_FILTER_HANDLE.
+ */
+ val = mt_find(&kvm->arch.smccc_filter, &idx, idx);
+ return xa_to_value(val);
}
-int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
+static u8 kvm_smccc_get_action(struct kvm_vcpu *vcpu, u32 func_id)
+{
+ /*
+ * Intervening actions in the SMCCC filter take precedence over the
+ * pseudo-firmware register bitmaps.
+ */
+ u8 action = kvm_smccc_filter_get_action(vcpu->kvm, func_id);
+ if (action != KVM_SMCCC_FILTER_HANDLE)
+ return action;
+
+ if (kvm_smccc_test_fw_bmap(vcpu, func_id) ||
+ kvm_smccc_default_allowed(func_id))
+ return KVM_SMCCC_FILTER_HANDLE;
+
+ return KVM_SMCCC_FILTER_DENY;
+}
+
+static void kvm_prepare_hypercall_exit(struct kvm_vcpu *vcpu, u32 func_id)
+{
+ u8 ec = ESR_ELx_EC(kvm_vcpu_get_esr(vcpu));
+ struct kvm_run *run = vcpu->run;
+ u64 flags = 0;
+
+ if (ec == ESR_ELx_EC_SMC32 || ec == ESR_ELx_EC_SMC64)
+ flags |= KVM_HYPERCALL_EXIT_SMC;
+
+ if (!kvm_vcpu_trap_il_is32bit(vcpu))
+ flags |= KVM_HYPERCALL_EXIT_16BIT;
+
+ run->exit_reason = KVM_EXIT_HYPERCALL;
+ run->hypercall = (typeof(run->hypercall)) {
+ .nr = func_id,
+ .flags = flags,
+ };
+}
+
+int kvm_smccc_call_handler(struct kvm_vcpu *vcpu)
{
struct kvm_smccc_features *smccc_feat = &vcpu->kvm->arch.smccc_feat;
u32 func_id = smccc_get_function(vcpu);
u64 val[4] = {SMCCC_RET_NOT_SUPPORTED};
u32 feature;
+ u8 action;
gpa_t gpa;
- if (!kvm_hvc_call_allowed(vcpu, func_id))
+ action = kvm_smccc_get_action(vcpu, func_id);
+ switch (action) {
+ case KVM_SMCCC_FILTER_HANDLE:
+ break;
+ case KVM_SMCCC_FILTER_DENY:
+ goto out;
+ case KVM_SMCCC_FILTER_FWD_TO_USER:
+ kvm_prepare_hypercall_exit(vcpu, func_id);
+ return 0;
+ default:
+ WARN_RATELIMIT(1, "Unhandled SMCCC filter action: %d\n", action);
goto out;
+ }
switch (func_id) {
case ARM_SMCCC_VERSION_FUNC_ID:
smccc_feat->std_bmap = KVM_ARM_SMCCC_STD_FEATURES;
smccc_feat->std_hyp_bmap = KVM_ARM_SMCCC_STD_HYP_FEATURES;
smccc_feat->vendor_hyp_bmap = KVM_ARM_SMCCC_VENDOR_HYP_FEATURES;
+
+ init_smccc_filter(kvm);
+}
+
+void kvm_arm_teardown_hypercalls(struct kvm *kvm)
+{
+ mtree_destroy(&kvm->arch.smccc_filter);
}
int kvm_arm_get_fw_num_regs(struct kvm_vcpu *vcpu)
if (val & ~fw_reg_features)
return -EINVAL;
- mutex_lock(&kvm->lock);
+ mutex_lock(&kvm->arch.config_lock);
- if (test_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &kvm->arch.flags) &&
- val != *fw_reg_bmap) {
+ if (kvm_vm_has_ran_once(kvm) && val != *fw_reg_bmap) {
ret = -EBUSY;
goto out;
}
WRITE_ONCE(*fw_reg_bmap, val);
out:
- mutex_unlock(&kvm->lock);
+ mutex_unlock(&kvm->arch.config_lock);
return ret;
}
return -EINVAL;
}
+
+int kvm_vm_smccc_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
+{
+ switch (attr->attr) {
+ case KVM_ARM_VM_SMCCC_FILTER:
+ return 0;
+ default:
+ return -ENXIO;
+ }
+}
+
+int kvm_vm_smccc_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
+{
+ void __user *uaddr = (void __user *)attr->addr;
+
+ switch (attr->attr) {
+ case KVM_ARM_VM_SMCCC_FILTER:
+ return kvm_smccc_set_filter(kvm, uaddr);
+ default:
+ return -ENXIO;
+ }
+}
struct arm_pmu *arm_pmu;
int ret = -ENXIO;
- mutex_lock(&kvm->lock);
+ lockdep_assert_held(&kvm->arch.config_lock);
mutex_lock(&arm_pmus_lock);
list_for_each_entry(entry, &arm_pmus, entry) {
arm_pmu = entry->arm_pmu;
if (arm_pmu->pmu.type == pmu_id) {
- if (test_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &kvm->arch.flags) ||
+ if (kvm_vm_has_ran_once(kvm) ||
(kvm->arch.pmu_filter && kvm->arch.arm_pmu != arm_pmu)) {
ret = -EBUSY;
break;
}
mutex_unlock(&arm_pmus_lock);
- mutex_unlock(&kvm->lock);
return ret;
}
{
struct kvm *kvm = vcpu->kvm;
+ lockdep_assert_held(&kvm->arch.config_lock);
+
if (!kvm_vcpu_has_pmu(vcpu))
return -ENODEV;
if (vcpu->arch.pmu.created)
return -EBUSY;
- mutex_lock(&kvm->lock);
if (!kvm->arch.arm_pmu) {
/* No PMU set, get the default one */
kvm->arch.arm_pmu = kvm_pmu_probe_armpmu();
- if (!kvm->arch.arm_pmu) {
- mutex_unlock(&kvm->lock);
+ if (!kvm->arch.arm_pmu)
return -ENODEV;
- }
}
- mutex_unlock(&kvm->lock);
switch (attr->attr) {
case KVM_ARM_VCPU_PMU_V3_IRQ: {
filter.action != KVM_PMU_EVENT_DENY))
return -EINVAL;
- mutex_lock(&kvm->lock);
-
- if (test_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &kvm->arch.flags)) {
- mutex_unlock(&kvm->lock);
+ if (kvm_vm_has_ran_once(kvm))
return -EBUSY;
- }
if (!kvm->arch.pmu_filter) {
kvm->arch.pmu_filter = bitmap_alloc(nr_events, GFP_KERNEL_ACCOUNT);
- if (!kvm->arch.pmu_filter) {
- mutex_unlock(&kvm->lock);
+ if (!kvm->arch.pmu_filter)
return -ENOMEM;
- }
/*
* The default depends on the first applied filter.
else
bitmap_clear(kvm->arch.pmu_filter, filter.base_event, filter.nevents);
- mutex_unlock(&kvm->lock);
-
return 0;
}
case KVM_ARM_VCPU_PMU_V3_SET_PMU: {
struct vcpu_reset_state *reset_state;
struct kvm *kvm = source_vcpu->kvm;
struct kvm_vcpu *vcpu = NULL;
+ int ret = PSCI_RET_SUCCESS;
unsigned long cpu_id;
cpu_id = smccc_get_arg1(source_vcpu);
*/
if (!vcpu)
return PSCI_RET_INVALID_PARAMS;
+
+ spin_lock(&vcpu->arch.mp_state_lock);
if (!kvm_arm_vcpu_stopped(vcpu)) {
if (kvm_psci_version(source_vcpu) != KVM_ARM_PSCI_0_1)
- return PSCI_RET_ALREADY_ON;
+ ret = PSCI_RET_ALREADY_ON;
else
- return PSCI_RET_INVALID_PARAMS;
+ ret = PSCI_RET_INVALID_PARAMS;
+
+ goto out_unlock;
}
reset_state = &vcpu->arch.reset_state;
*/
reset_state->r0 = smccc_get_arg3(source_vcpu);
- WRITE_ONCE(reset_state->reset, true);
+ reset_state->reset = true;
kvm_make_request(KVM_REQ_VCPU_RESET, vcpu);
/*
*/
smp_wmb();
- vcpu->arch.mp_state.mp_state = KVM_MP_STATE_RUNNABLE;
+ WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_RUNNABLE);
kvm_vcpu_wake_up(vcpu);
- return PSCI_RET_SUCCESS;
+out_unlock:
+ spin_unlock(&vcpu->arch.mp_state_lock);
+ return ret;
}
static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu)
* after this call is handled and before the VCPUs have been
* re-initialized.
*/
- kvm_for_each_vcpu(i, tmp, vcpu->kvm)
- tmp->arch.mp_state.mp_state = KVM_MP_STATE_STOPPED;
+ kvm_for_each_vcpu(i, tmp, vcpu->kvm) {
+ spin_lock(&tmp->arch.mp_state_lock);
+ WRITE_ONCE(tmp->arch.mp_state.mp_state, KVM_MP_STATE_STOPPED);
+ spin_unlock(&tmp->arch.mp_state_lock);
+ }
kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_SLEEP);
memset(&vcpu->run->system_event, 0, sizeof(vcpu->run->system_event));
static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
{
- struct kvm *kvm = vcpu->kvm;
u32 psci_fn = smccc_get_function(vcpu);
unsigned long val;
int ret = 1;
kvm_psci_narrow_to_32bit(vcpu);
fallthrough;
case PSCI_0_2_FN64_CPU_ON:
- mutex_lock(&kvm->lock);
val = kvm_psci_vcpu_on(vcpu);
- mutex_unlock(&kvm->lock);
break;
case PSCI_0_2_FN_AFFINITY_INFO:
kvm_psci_narrow_to_32bit(vcpu);
static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
{
- struct kvm *kvm = vcpu->kvm;
u32 psci_fn = smccc_get_function(vcpu);
unsigned long val;
val = PSCI_RET_SUCCESS;
break;
case KVM_PSCI_FN_CPU_ON:
- mutex_lock(&kvm->lock);
val = kvm_psci_vcpu_on(vcpu);
- mutex_unlock(&kvm->lock);
break;
default:
val = PSCI_RET_NOT_SUPPORTED;
int kvm_psci_call(struct kvm_vcpu *vcpu)
{
u32 psci_fn = smccc_get_function(vcpu);
+ int version = kvm_psci_version(vcpu);
unsigned long val;
val = kvm_psci_check_allowed_function(vcpu, psci_fn);
return 1;
}
- switch (kvm_psci_version(vcpu)) {
+ switch (version) {
case KVM_ARM_PSCI_1_1:
return kvm_psci_1_x_call(vcpu, 1);
case KVM_ARM_PSCI_1_0:
case KVM_ARM_PSCI_0_1:
return kvm_psci_0_1_call(vcpu);
default:
- return -EINVAL;
+ WARN_ONCE(1, "Unknown PSCI version %d", version);
+ smccc_set_retval(vcpu, SMCCC_RET_NOT_SUPPORTED, 0, 0, 0);
+ return 1;
}
}
is32bit = vcpu_has_feature(vcpu, KVM_ARM_VCPU_EL1_32BIT);
- lockdep_assert_held(&kvm->lock);
+ lockdep_assert_held(&kvm->arch.config_lock);
if (test_bit(KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED, &kvm->arch.flags)) {
/*
bool loaded;
u32 pstate;
- mutex_lock(&vcpu->kvm->lock);
+ mutex_lock(&vcpu->kvm->arch.config_lock);
ret = kvm_set_vm_width(vcpu);
- if (!ret) {
- reset_state = vcpu->arch.reset_state;
- WRITE_ONCE(vcpu->arch.reset_state.reset, false);
- }
- mutex_unlock(&vcpu->kvm->lock);
+ mutex_unlock(&vcpu->kvm->arch.config_lock);
if (ret)
return ret;
+ spin_lock(&vcpu->arch.mp_state_lock);
+ reset_state = vcpu->arch.reset_state;
+ vcpu->arch.reset_state.reset = false;
+ spin_unlock(&vcpu->arch.mp_state_lock);
+
/* Reset PMU outside of the non-preemptible section */
kvm_pmu_vcpu_reset(vcpu);
tmr = TIMER_PTIMER;
treg = TIMER_REG_CVAL;
break;
+ case SYS_CNTPCT_EL0:
+ case SYS_CNTPCTSS_EL0:
+ case SYS_AARCH32_CNTPCT:
+ tmr = TIMER_PTIMER;
+ treg = TIMER_REG_CNT;
+ break;
default:
print_sys_reg_msg(p, "%s", "Unhandled trapped timer register");
kvm_inject_undefined(vcpu);
AMU_AMEVTYPER1_EL0(14),
AMU_AMEVTYPER1_EL0(15),
+ { SYS_DESC(SYS_CNTPCT_EL0), access_arch_timer },
+ { SYS_DESC(SYS_CNTPCTSS_EL0), access_arch_timer },
{ SYS_DESC(SYS_CNTP_TVAL_EL0), access_arch_timer },
{ SYS_DESC(SYS_CNTP_CTL_EL0), access_arch_timer },
{ SYS_DESC(SYS_CNTP_CVAL_EL0), access_arch_timer },
{ Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, TTBR0_EL1 },
{ CP15_PMU_SYS_REG(DIRECT, 0, 0, 9, 0), .access = access_pmu_evcntr },
{ Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI1R */
+ { SYS_DESC(SYS_AARCH32_CNTPCT), access_arch_timer },
{ Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, TTBR1_EL1 },
{ Op1( 1), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_ASGI1R */
{ Op1( 2), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI0R */
{ SYS_DESC(SYS_AARCH32_CNTP_CVAL), access_arch_timer },
+ { SYS_DESC(SYS_AARCH32_CNTPCTSS), access_arch_timer },
};
static bool check_sysreg_table(const struct sys_reg_desc *table, unsigned int n,
__field( unsigned long, vcpu_id )
__field( int, direct_vtimer )
__field( int, direct_ptimer )
+ __field( int, emul_vtimer )
__field( int, emul_ptimer )
),
__entry->direct_vtimer = arch_timer_ctx_index(map->direct_vtimer);
__entry->direct_ptimer =
(map->direct_ptimer) ? arch_timer_ctx_index(map->direct_ptimer) : -1;
+ __entry->emul_vtimer =
+ (map->emul_vtimer) ? arch_timer_ctx_index(map->emul_vtimer) : -1;
__entry->emul_ptimer =
(map->emul_ptimer) ? arch_timer_ctx_index(map->emul_ptimer) : -1;
),
- TP_printk("VCPU: %ld, dv: %d, dp: %d, ep: %d",
+ TP_printk("VCPU: %ld, dv: %d, dp: %d, ev: %d, ep: %d",
__entry->vcpu_id,
__entry->direct_vtimer,
__entry->direct_ptimer,
+ __entry->emul_vtimer,
__entry->emul_ptimer)
);
struct kvm *kvm = s->private;
struct vgic_state_iter *iter;
- mutex_lock(&kvm->lock);
+ mutex_lock(&kvm->arch.config_lock);
iter = kvm->arch.vgic.iter;
if (iter) {
iter = ERR_PTR(-EBUSY);
if (end_of_vgic(iter))
iter = NULL;
out:
- mutex_unlock(&kvm->lock);
+ mutex_unlock(&kvm->arch.config_lock);
return iter;
}
if (IS_ERR(v))
return;
- mutex_lock(&kvm->lock);
+ mutex_lock(&kvm->arch.config_lock);
iter = kvm->arch.vgic.iter;
kfree(iter->lpi_array);
kfree(iter);
kvm->arch.vgic.iter = NULL;
- mutex_unlock(&kvm->lock);
+ mutex_unlock(&kvm->arch.config_lock);
}
static void print_dist_state(struct seq_file *s, struct vgic_dist *dist)
unsigned long i;
int ret;
- if (irqchip_in_kernel(kvm))
- return -EEXIST;
-
/*
* This function is also called by the KVM_CREATE_IRQCHIP handler,
* which had no chance yet to check the availability of the GICv2
!kvm_vgic_global_state.can_emulate_gicv2)
return -ENODEV;
+ /* Must be held to avoid race with vCPU creation */
+ lockdep_assert_held(&kvm->lock);
+
ret = -EBUSY;
if (!lock_all_vcpus(kvm))
return ret;
+ mutex_lock(&kvm->arch.config_lock);
+
+ if (irqchip_in_kernel(kvm)) {
+ ret = -EEXIST;
+ goto out_unlock;
+ }
+
kvm_for_each_vcpu(i, vcpu, kvm) {
if (vcpu_has_run_once(vcpu))
goto out_unlock;
INIT_LIST_HEAD(&kvm->arch.vgic.rd_regions);
out_unlock:
+ mutex_unlock(&kvm->arch.config_lock);
unlock_all_vcpus(kvm);
return ret;
}
* KVM io device for the redistributor that belongs to this VCPU.
*/
if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
- mutex_lock(&vcpu->kvm->lock);
+ mutex_lock(&vcpu->kvm->arch.config_lock);
ret = vgic_register_redist_iodev(vcpu);
- mutex_unlock(&vcpu->kvm->lock);
+ mutex_unlock(&vcpu->kvm->arch.config_lock);
}
return ret;
}
* The function is generally called when nr_spis has been explicitly set
* by the guest through the KVM DEVICE API. If not nr_spis is set to 256.
* vgic_initialized() returns true when this function has succeeded.
- * Must be called with kvm->lock held!
*/
int vgic_init(struct kvm *kvm)
{
int ret = 0, i;
unsigned long idx;
+ lockdep_assert_held(&kvm->arch.config_lock);
+
if (vgic_initialized(kvm))
return 0;
vgic_cpu->rd_iodev.base_addr = VGIC_ADDR_UNDEF;
}
-/* To be called with kvm->lock held */
static void __kvm_vgic_destroy(struct kvm *kvm)
{
struct kvm_vcpu *vcpu;
unsigned long i;
+ lockdep_assert_held(&kvm->arch.config_lock);
+
vgic_debug_destroy(kvm);
kvm_for_each_vcpu(i, vcpu, kvm)
void kvm_vgic_destroy(struct kvm *kvm)
{
- mutex_lock(&kvm->lock);
+ mutex_lock(&kvm->arch.config_lock);
__kvm_vgic_destroy(kvm);
- mutex_unlock(&kvm->lock);
+ mutex_unlock(&kvm->arch.config_lock);
}
/**
if (kvm->arch.vgic.vgic_model != KVM_DEV_TYPE_ARM_VGIC_V2)
return -EBUSY;
- mutex_lock(&kvm->lock);
+ mutex_lock(&kvm->arch.config_lock);
ret = vgic_init(kvm);
- mutex_unlock(&kvm->lock);
+ mutex_unlock(&kvm->arch.config_lock);
}
return ret;
if (likely(vgic_ready(kvm)))
return 0;
- mutex_lock(&kvm->lock);
+ mutex_lock(&kvm->arch.config_lock);
if (vgic_ready(kvm))
goto out;
dist->ready = true;
out:
- mutex_unlock(&kvm->lock);
+ mutex_unlock(&kvm->arch.config_lock);
return ret;
}
mutex_init(&its->its_lock);
mutex_init(&its->cmd_lock);
+ /* Yep, even more trickery for lock ordering... */
+#ifdef CONFIG_LOCKDEP
+ mutex_lock(&dev->kvm->arch.config_lock);
+ mutex_lock(&its->cmd_lock);
+ mutex_lock(&its->its_lock);
+ mutex_unlock(&its->its_lock);
+ mutex_unlock(&its->cmd_lock);
+ mutex_unlock(&dev->kvm->arch.config_lock);
+#endif
+
its->vgic_its_base = VGIC_ADDR_UNDEF;
INIT_LIST_HEAD(&its->device_list);
mutex_lock(&dev->kvm->lock);
+ if (!lock_all_vcpus(dev->kvm)) {
+ mutex_unlock(&dev->kvm->lock);
+ return -EBUSY;
+ }
+
+ mutex_lock(&dev->kvm->arch.config_lock);
+
if (IS_VGIC_ADDR_UNDEF(its->vgic_its_base)) {
ret = -ENXIO;
goto out;
goto out;
}
- if (!lock_all_vcpus(dev->kvm)) {
- ret = -EBUSY;
- goto out;
- }
-
addr = its->vgic_its_base + offset;
len = region->access_flags & VGIC_ACCESS_64bit ? 8 : 4;
} else {
*reg = region->its_read(dev->kvm, its, addr, len);
}
- unlock_all_vcpus(dev->kvm);
out:
+ mutex_unlock(&dev->kvm->arch.config_lock);
+ unlock_all_vcpus(dev->kvm);
mutex_unlock(&dev->kvm->lock);
return ret;
}
return 0;
mutex_lock(&kvm->lock);
- mutex_lock(&its->its_lock);
if (!lock_all_vcpus(kvm)) {
- mutex_unlock(&its->its_lock);
mutex_unlock(&kvm->lock);
return -EBUSY;
}
+ mutex_lock(&kvm->arch.config_lock);
+ mutex_lock(&its->its_lock);
+
switch (attr) {
case KVM_DEV_ARM_ITS_CTRL_RESET:
vgic_its_reset(kvm, its);
break;
}
- unlock_all_vcpus(kvm);
mutex_unlock(&its->its_lock);
+ mutex_unlock(&kvm->arch.config_lock);
+ unlock_all_vcpus(kvm);
mutex_unlock(&kvm->lock);
return ret;
}
struct vgic_dist *vgic = &kvm->arch.vgic;
int r;
- mutex_lock(&kvm->lock);
+ mutex_lock(&kvm->arch.config_lock);
switch (FIELD_GET(KVM_ARM_DEVICE_TYPE_MASK, dev_addr->id)) {
case KVM_VGIC_V2_ADDR_TYPE_DIST:
r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
r = -ENODEV;
}
- mutex_unlock(&kvm->lock);
+ mutex_unlock(&kvm->arch.config_lock);
return r;
}
if (get_user(addr, uaddr))
return -EFAULT;
- mutex_lock(&kvm->lock);
+ mutex_lock(&kvm->arch.config_lock);
switch (attr->attr) {
case KVM_VGIC_V2_ADDR_TYPE_DIST:
r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
}
out:
- mutex_unlock(&kvm->lock);
+ mutex_unlock(&kvm->arch.config_lock);
if (!r && !write)
r = put_user(addr, uaddr);
(val & 31))
return -EINVAL;
- mutex_lock(&dev->kvm->lock);
+ mutex_lock(&dev->kvm->arch.config_lock);
if (vgic_ready(dev->kvm) || dev->kvm->arch.vgic.nr_spis)
ret = -EBUSY;
dev->kvm->arch.vgic.nr_spis =
val - VGIC_NR_PRIVATE_IRQS;
- mutex_unlock(&dev->kvm->lock);
+ mutex_unlock(&dev->kvm->arch.config_lock);
return ret;
}
case KVM_DEV_ARM_VGIC_GRP_CTRL: {
switch (attr->attr) {
case KVM_DEV_ARM_VGIC_CTRL_INIT:
- mutex_lock(&dev->kvm->lock);
+ mutex_lock(&dev->kvm->arch.config_lock);
r = vgic_init(dev->kvm);
- mutex_unlock(&dev->kvm->lock);
+ mutex_unlock(&dev->kvm->arch.config_lock);
return r;
case KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES:
/*
mutex_unlock(&dev->kvm->lock);
return -EBUSY;
}
+
+ mutex_lock(&dev->kvm->arch.config_lock);
r = vgic_v3_save_pending_tables(dev->kvm);
+ mutex_unlock(&dev->kvm->arch.config_lock);
unlock_all_vcpus(dev->kvm);
mutex_unlock(&dev->kvm->lock);
return r;
return 0;
}
-/* unlocks vcpus from @vcpu_lock_idx and smaller */
-static void unlock_vcpus(struct kvm *kvm, int vcpu_lock_idx)
-{
- struct kvm_vcpu *tmp_vcpu;
-
- for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) {
- tmp_vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx);
- mutex_unlock(&tmp_vcpu->mutex);
- }
-}
-
-void unlock_all_vcpus(struct kvm *kvm)
-{
- unlock_vcpus(kvm, atomic_read(&kvm->online_vcpus) - 1);
-}
-
-/* Returns true if all vcpus were locked, false otherwise */
-bool lock_all_vcpus(struct kvm *kvm)
-{
- struct kvm_vcpu *tmp_vcpu;
- unsigned long c;
-
- /*
- * Any time a vcpu is run, vcpu_load is called which tries to grab the
- * vcpu->mutex. By grabbing the vcpu->mutex of all VCPUs we ensure
- * that no other VCPUs are run and fiddle with the vgic state while we
- * access it.
- */
- kvm_for_each_vcpu(c, tmp_vcpu, kvm) {
- if (!mutex_trylock(&tmp_vcpu->mutex)) {
- unlock_vcpus(kvm, c - 1);
- return false;
- }
- }
-
- return true;
-}
-
/**
* vgic_v2_attr_regs_access - allows user space to access VGIC v2 state
*
mutex_lock(&dev->kvm->lock);
+ if (!lock_all_vcpus(dev->kvm)) {
+ mutex_unlock(&dev->kvm->lock);
+ return -EBUSY;
+ }
+
+ mutex_lock(&dev->kvm->arch.config_lock);
+
ret = vgic_init(dev->kvm);
if (ret)
goto out;
- if (!lock_all_vcpus(dev->kvm)) {
- ret = -EBUSY;
- goto out;
- }
-
switch (attr->group) {
case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
ret = vgic_v2_cpuif_uaccess(vcpu, is_write, addr, &val);
break;
}
- unlock_all_vcpus(dev->kvm);
out:
+ mutex_unlock(&dev->kvm->arch.config_lock);
+ unlock_all_vcpus(dev->kvm);
mutex_unlock(&dev->kvm->lock);
if (!ret && !is_write)
mutex_lock(&dev->kvm->lock);
- if (unlikely(!vgic_initialized(dev->kvm))) {
- ret = -EBUSY;
- goto out;
+ if (!lock_all_vcpus(dev->kvm)) {
+ mutex_unlock(&dev->kvm->lock);
+ return -EBUSY;
}
- if (!lock_all_vcpus(dev->kvm)) {
+ mutex_lock(&dev->kvm->arch.config_lock);
+
+ if (unlikely(!vgic_initialized(dev->kvm))) {
ret = -EBUSY;
goto out;
}
break;
}
- unlock_all_vcpus(dev->kvm);
out:
+ mutex_unlock(&dev->kvm->arch.config_lock);
+ unlock_all_vcpus(dev->kvm);
mutex_unlock(&dev->kvm->lock);
if (!ret && uaccess && !is_write) {
case GICD_CTLR: {
bool was_enabled, is_hwsgi;
- mutex_lock(&vcpu->kvm->lock);
+ mutex_lock(&vcpu->kvm->arch.config_lock);
was_enabled = dist->enabled;
is_hwsgi = dist->nassgireq;
else if (!was_enabled && dist->enabled)
vgic_kick_vcpus(vcpu->kvm);
- mutex_unlock(&vcpu->kvm->lock);
+ mutex_unlock(&vcpu->kvm->arch.config_lock);
break;
}
case GICD_TYPER:
u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
u32 val;
- mutex_lock(&vcpu->kvm->lock);
+ mutex_lock(&vcpu->kvm->arch.config_lock);
vgic_access_active_prepare(vcpu, intid);
val = __vgic_mmio_read_active(vcpu, addr, len);
vgic_access_active_finish(vcpu, intid);
- mutex_unlock(&vcpu->kvm->lock);
+ mutex_unlock(&vcpu->kvm->arch.config_lock);
return val;
}
{
u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
- mutex_lock(&vcpu->kvm->lock);
+ mutex_lock(&vcpu->kvm->arch.config_lock);
vgic_access_active_prepare(vcpu, intid);
__vgic_mmio_write_cactive(vcpu, addr, len, val);
vgic_access_active_finish(vcpu, intid);
- mutex_unlock(&vcpu->kvm->lock);
+ mutex_unlock(&vcpu->kvm->arch.config_lock);
}
int vgic_mmio_uaccess_write_cactive(struct kvm_vcpu *vcpu,
{
u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
- mutex_lock(&vcpu->kvm->lock);
+ mutex_lock(&vcpu->kvm->arch.config_lock);
vgic_access_active_prepare(vcpu, intid);
__vgic_mmio_write_sactive(vcpu, addr, len, val);
vgic_access_active_finish(vcpu, intid);
- mutex_unlock(&vcpu->kvm->lock);
+ mutex_unlock(&vcpu->kvm->arch.config_lock);
}
int vgic_mmio_uaccess_write_sactive(struct kvm_vcpu *vcpu,
* @kvm: Pointer to the VM being initialized
*
* We may be called each time a vITS is created, or when the
- * vgic is initialized. This relies on kvm->lock to be
- * held. In both cases, the number of vcpus should now be
- * fixed.
+ * vgic is initialized. In both cases, the number of vcpus
+ * should now be fixed.
*/
int vgic_v4_init(struct kvm *kvm)
{
int nr_vcpus, ret;
unsigned long i;
+ lockdep_assert_held(&kvm->arch.config_lock);
+
if (!kvm_vgic_global_state.has_gicv4)
return 0; /* Nothing to see here... move along. */
/**
* vgic_v4_teardown - Free the GICv4 data structures
* @kvm: Pointer to the VM being destroyed
- *
- * Relies on kvm->lock to be held.
*/
void vgic_v4_teardown(struct kvm *kvm)
{
struct its_vm *its_vm = &kvm->arch.vgic.its_vm;
int i;
+ lockdep_assert_held(&kvm->arch.config_lock);
+
if (!its_vm->vpes)
return;
/*
* Locking order is always:
* kvm->lock (mutex)
- * its->cmd_lock (mutex)
- * its->its_lock (mutex)
- * vgic_cpu->ap_list_lock must be taken with IRQs disabled
- * kvm->lpi_list_lock must be taken with IRQs disabled
- * vgic_irq->irq_lock must be taken with IRQs disabled
+ * vcpu->mutex (mutex)
+ * kvm->arch.config_lock (mutex)
+ * its->cmd_lock (mutex)
+ * its->its_lock (mutex)
+ * vgic_cpu->ap_list_lock must be taken with IRQs disabled
+ * kvm->lpi_list_lock must be taken with IRQs disabled
+ * vgic_irq->irq_lock must be taken with IRQs disabled
*
* As the ap_list_lock might be taken from the timer interrupt handler,
* we have to disable IRQs before taking this lock and everything lower
return 0;
}
+int kvm_vgic_get_map(struct kvm_vcpu *vcpu, unsigned int vintid)
+{
+ struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
+ unsigned long flags;
+ int ret = -1;
+
+ raw_spin_lock_irqsave(&irq->irq_lock, flags);
+ if (irq->hw)
+ ret = irq->hwintid;
+ raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
+
+ vgic_put_irq(vcpu->kvm, irq);
+ return ret;
+}
+
/**
* kvm_vgic_set_owner - Set the owner of an interrupt for a VM
*
void vgic_debug_init(struct kvm *kvm);
void vgic_debug_destroy(struct kvm *kvm);
-bool lock_all_vcpus(struct kvm *kvm);
-void unlock_all_vcpus(struct kvm *kvm);
-
static inline int vgic_v3_max_apr_idx(struct kvm_vcpu *vcpu)
{
struct vgic_cpu *cpu_if = &vcpu->arch.vgic_cpu;
HAS_DIT
HAS_E0PD
HAS_ECV
+HAS_ECV_CNTPOFF
HAS_EPAN
HAS_GENERIC_AUTH
HAS_GENERIC_AUTH_ARCH_QARMA3
Fields CONTEXTIDR_ELx
EndSysreg
+Sysreg CNTPOFF_EL2 3 4 14 0 6
+Field 63:0 PhysicalOffset
+EndSysreg
+
Sysreg CPACR_EL12 3 5 1 0 2
Fields CPACR_ELx
EndSysreg
int (*vcpu_run)(struct kvm_vcpu *vcpu);
void (*vcpu_reenter)(struct kvm_vcpu *vcpu);
};
-extern struct kvm_mips_callbacks *kvm_mips_callbacks;
+extern const struct kvm_mips_callbacks * const kvm_mips_callbacks;
int kvm_mips_emulation_init(void);
/* Debug: dump vcpu state */
};
/* FIXME: Get rid of the callbacks now that trap-and-emulate is gone. */
-struct kvm_mips_callbacks *kvm_mips_callbacks = &kvm_vz_callbacks;
+const struct kvm_mips_callbacks * const kvm_mips_callbacks = &kvm_vz_callbacks;
int kvm_mips_emulation_init(void)
{
#ifndef __ASM_KASAN_H
#define __ASM_KASAN_H
-#ifdef CONFIG_KASAN
+#if defined(CONFIG_KASAN) && !defined(CONFIG_CC_HAS_KASAN_MEMINTRINSIC_PREFIX)
#define _GLOBAL_KASAN(fn) _GLOBAL(__##fn)
#define _GLOBAL_TOC_KASAN(fn) _GLOBAL_TOC(__##fn)
#define EXPORT_SYMBOL_KASAN(fn) EXPORT_SYMBOL(__##fn)
extern void * memchr(const void *,int,__kernel_size_t);
void memcpy_flushcache(void *dest, const void *src, size_t size);
+#ifdef CONFIG_KASAN
+/* __mem variants are used by KASAN to implement instrumented meminstrinsics. */
+#ifdef CONFIG_CC_HAS_KASAN_MEMINTRINSIC_PREFIX
+#define __memset memset
+#define __memcpy memcpy
+#define __memmove memmove
+#else /* CONFIG_CC_HAS_KASAN_MEMINTRINSIC_PREFIX */
void *__memset(void *s, int c, __kernel_size_t count);
void *__memcpy(void *to, const void *from, __kernel_size_t n);
void *__memmove(void *to, const void *from, __kernel_size_t n);
-
-#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)
+#ifndef __SANITIZE_ADDRESS__
/*
* For files that are not instrumented (e.g. mm/slub.c) we
* should use not instrumented version of mem* functions.
#ifndef __NO_FORTIFY
#define __NO_FORTIFY /* FORTIFY_SOURCE uses __builtin_memcpy, etc. */
#endif
-
-#endif
+#endif /* !__SANITIZE_ADDRESS__ */
+#endif /* CONFIG_CC_HAS_KASAN_MEMINTRINSIC_PREFIX */
+#endif /* CONFIG_KASAN */
#ifdef CONFIG_PPC64
#ifndef CONFIG_KASAN
# If you really need to reference something from prom_init.o add
# it to the list below:
-grep "^CONFIG_KASAN=y$" ${KCONFIG_CONFIG} >/dev/null
-if [ $? -eq 0 ]
+has_renamed_memintrinsics()
+{
+ grep -q "^CONFIG_KASAN=y$" ${KCONFIG_CONFIG} && \
+ ! grep -q "^CONFIG_CC_HAS_KASAN_MEMINTRINSIC_PREFIX=y" ${KCONFIG_CONFIG}
+}
+
+if has_renamed_memintrinsics
then
MEM_FUNCS="__memcpy __memset"
else
}
/*
- * Check for a read fault. This could be caused by a read on an
- * inaccessible page (i.e. PROT_NONE), or a Radix MMU execute-only page.
+ * VM_READ, VM_WRITE and VM_EXEC all imply read permissions, as
+ * defined in protection_map[]. Read faults can only be caused by
+ * a PROT_NONE mapping, or with a PROT_EXEC-only mapping on Radix.
*/
- if (unlikely(!(vma->vm_flags & VM_READ)))
+ if (unlikely(!vma_is_accessible(vma)))
return true;
+
+ if (unlikely(radix_enabled() && ((vma->vm_flags & VM_ACCESS_FLAGS) == VM_EXEC)))
+ return true;
+
/*
* We should ideally do the vma pkey access check here. But in the
* fault path, handle_mm_fault() also does the same check. To avoid
select OF_DYNAMIC
select FORCE_PCI
select PCI_MSI
+ select GENERIC_ALLOCATOR
select PPC_XICS
select PPC_XIVE_SPAPR
select PPC_ICP_NATIVE
depends on !32BIT || $(cc-option,-mabi=ilp32 -march=rv32ima_zihintpause)
depends on LLD_VERSION >= 150000 || LD_VERSION >= 23600
+config TOOLCHAIN_NEEDS_EXPLICIT_ZICSR_ZIFENCEI
+ def_bool y
+ # https://sourceware.org/git/?p=binutils-gdb.git;a=commit;h=aed44286efa8ae8717a77d94b51ac3614e2ca6dc
+ depends on AS_IS_GNU && AS_VERSION >= 23800
+ help
+ Newer binutils versions default to ISA spec version 20191213 which
+ moves some instructions from the I extension to the Zicsr and Zifencei
+ extensions.
+
+config TOOLCHAIN_NEEDS_OLD_ISA_SPEC
+ def_bool y
+ depends on TOOLCHAIN_NEEDS_EXPLICIT_ZICSR_ZIFENCEI
+ # https://github.com/llvm/llvm-project/commit/22e199e6afb1263c943c0c0d4498694e15bf8a16
+ depends on CC_IS_CLANG && CLANG_VERSION < 170000
+ help
+ Certain versions of clang do not support zicsr and zifencei via -march
+ but newer versions of binutils require it for the reasons noted in the
+ help text of CONFIG_TOOLCHAIN_NEEDS_EXPLICIT_ZICSR_ZIFENCEI. This
+ option causes an older ISA spec compatible with these older versions
+ of clang to be passed to GAS, which has the same result as passing zicsr
+ and zifencei to -march.
+
config FPU
bool "FPU support"
default y
riscv-march-$(CONFIG_FPU) := $(riscv-march-y)fd
riscv-march-$(CONFIG_RISCV_ISA_C) := $(riscv-march-y)c
-# Newer binutils versions default to ISA spec version 20191213 which moves some
-# instructions from the I extension to the Zicsr and Zifencei extensions.
-toolchain-need-zicsr-zifencei := $(call cc-option-yn, -march=$(riscv-march-y)_zicsr_zifencei)
-riscv-march-$(toolchain-need-zicsr-zifencei) := $(riscv-march-y)_zicsr_zifencei
+ifdef CONFIG_TOOLCHAIN_NEEDS_OLD_ISA_SPEC
+KBUILD_CFLAGS += -Wa,-misa-spec=2.2
+KBUILD_AFLAGS += -Wa,-misa-spec=2.2
+else
+riscv-march-$(CONFIG_TOOLCHAIN_NEEDS_EXPLICIT_ZICSR_ZIFENCEI) := $(riscv-march-y)_zicsr_zifencei
+endif
# Check if the toolchain supports Zihintpause extension
riscv-march-$(CONFIG_TOOLCHAIN_HAS_ZIHINTPAUSE) := $(riscv-march-y)_zihintpause
#ifdef CONFIG_SMP
/* A local icache flush is needed before user execution can resume. */
cpumask_t icache_stale_mask;
- /* A local tlb flush is needed before user execution can resume. */
- cpumask_t tlb_stale_mask;
#endif
} mm_context_t;
#include <asm/errata_list.h>
#ifdef CONFIG_MMU
+extern unsigned long asid_mask;
+
static inline void local_flush_tlb_all(void)
{
__asm__ __volatile__ ("sfence.vma" : : : "memory");
{
ALT_FLUSH_TLB_PAGE(__asm__ __volatile__ ("sfence.vma %0" : : "r" (addr) : "memory"));
}
-
-static inline void local_flush_tlb_all_asid(unsigned long asid)
-{
- __asm__ __volatile__ ("sfence.vma x0, %0"
- :
- : "r" (asid)
- : "memory");
-}
-
-static inline void local_flush_tlb_page_asid(unsigned long addr,
- unsigned long asid)
-{
- __asm__ __volatile__ ("sfence.vma %0, %1"
- :
- : "r" (addr), "r" (asid)
- : "memory");
-}
-
#else /* CONFIG_MMU */
#define local_flush_tlb_all() do { } while (0)
#define local_flush_tlb_page(addr) do { } while (0)
static unsigned long asid_bits;
static unsigned long num_asids;
-static unsigned long asid_mask;
+unsigned long asid_mask;
static atomic_long_t current_version;
if (need_flush_tlb)
local_flush_tlb_all();
-#ifdef CONFIG_SMP
- else {
- cpumask_t *mask = &mm->context.tlb_stale_mask;
-
- if (cpumask_test_cpu(cpu, mask)) {
- cpumask_clear_cpu(cpu, mask);
- local_flush_tlb_all_asid(cntx & asid_mask);
- }
- }
-#endif
}
static void set_mm_noasid(struct mm_struct *mm)
local_flush_tlb_all();
}
-static inline void set_mm(struct mm_struct *mm, unsigned int cpu)
+static inline void set_mm(struct mm_struct *prev,
+ struct mm_struct *next, unsigned int cpu)
{
- if (static_branch_unlikely(&use_asid_allocator))
- set_mm_asid(mm, cpu);
- else
- set_mm_noasid(mm);
+ /*
+ * The mm_cpumask indicates which harts' TLBs contain the virtual
+ * address mapping of the mm. Compared to noasid, using asid
+ * can't guarantee that stale TLB entries are invalidated because
+ * the asid mechanism wouldn't flush TLB for every switch_mm for
+ * performance. So when using asid, keep all CPUs footmarks in
+ * cpumask() until mm reset.
+ */
+ cpumask_set_cpu(cpu, mm_cpumask(next));
+ if (static_branch_unlikely(&use_asid_allocator)) {
+ set_mm_asid(next, cpu);
+ } else {
+ cpumask_clear_cpu(cpu, mm_cpumask(prev));
+ set_mm_noasid(next);
+ }
}
static int __init asids_init(void)
}
early_initcall(asids_init);
#else
-static inline void set_mm(struct mm_struct *mm, unsigned int cpu)
+static inline void set_mm(struct mm_struct *prev,
+ struct mm_struct *next, unsigned int cpu)
{
/* Nothing to do here when there is no MMU */
}
*/
cpu = smp_processor_id();
- cpumask_clear_cpu(cpu, mm_cpumask(prev));
- cpumask_set_cpu(cpu, mm_cpumask(next));
-
- set_mm(next, cpu);
+ set_mm(prev, next, cpu);
flush_icache_deferred(next, cpu);
}
no_context(regs, addr);
return;
}
+ if (pud_leaf(*pud_k))
+ goto flush_tlb;
/*
* Since the vmalloc area is global, it is unnecessary
no_context(regs, addr);
return;
}
+ if (pmd_leaf(*pmd_k))
+ goto flush_tlb;
/*
* Make sure the actual PTE exists as well to
* ordering constraint, not a cache flush; it is
* necessary even after writing invalid entries.
*/
+flush_tlb:
local_flush_tlb_page(addr);
}
#include <linux/sched.h>
#include <asm/sbi.h>
#include <asm/mmu_context.h>
-#include <asm/tlbflush.h>
+
+static inline void local_flush_tlb_all_asid(unsigned long asid)
+{
+ __asm__ __volatile__ ("sfence.vma x0, %0"
+ :
+ : "r" (asid)
+ : "memory");
+}
+
+static inline void local_flush_tlb_page_asid(unsigned long addr,
+ unsigned long asid)
+{
+ __asm__ __volatile__ ("sfence.vma %0, %1"
+ :
+ : "r" (addr), "r" (asid)
+ : "memory");
+}
void flush_tlb_all(void)
{
static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start,
unsigned long size, unsigned long stride)
{
- struct cpumask *pmask = &mm->context.tlb_stale_mask;
struct cpumask *cmask = mm_cpumask(mm);
unsigned int cpuid;
bool broadcast;
/* check if the tlbflush needs to be sent to other CPUs */
broadcast = cpumask_any_but(cmask, cpuid) < nr_cpu_ids;
if (static_branch_unlikely(&use_asid_allocator)) {
- unsigned long asid = atomic_long_read(&mm->context.id);
-
- /*
- * TLB will be immediately flushed on harts concurrently
- * executing this MM context. TLB flush on other harts
- * is deferred until this MM context migrates there.
- */
- cpumask_setall(pmask);
- cpumask_clear_cpu(cpuid, pmask);
- cpumask_andnot(pmask, pmask, cmask);
+ unsigned long asid = atomic_long_read(&mm->context.id) & asid_mask;
if (broadcast) {
sbi_remote_sfence_vma_asid(cmask, start, size, asid);
if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && initrd_data.start && initrd_data.size &&
intersects(initrd_data.start, initrd_data.size, safe_addr, size))
safe_addr = initrd_data.start + initrd_data.size;
+ if (intersects(safe_addr, size, (unsigned long)comps, comps->len)) {
+ safe_addr = (unsigned long)comps + comps->len;
+ goto repeat;
+ }
for_each_rb_entry(comp, comps)
if (intersects(safe_addr, size, comp->addr, comp->len)) {
safe_addr = comp->addr + comp->len;
goto repeat;
}
+ if (intersects(safe_addr, size, (unsigned long)certs, certs->len)) {
+ safe_addr = (unsigned long)certs + certs->len;
+ goto repeat;
+ }
for_each_rb_entry(cert, certs)
if (intersects(safe_addr, size, cert->addr, cert->len)) {
safe_addr = cert->addr + cert->len;
CONFIG_MEMCG=y
CONFIG_BLK_CGROUP=y
CONFIG_CFS_BANDWIDTH=y
-CONFIG_RT_GROUP_SCHED=y
CONFIG_CGROUP_PIDS=y
CONFIG_CGROUP_RDMA=y
CONFIG_CGROUP_FREEZER=y
CONFIG_SOLARIS_X86_PARTITION=y
CONFIG_UNIXWARE_DISKLABEL=y
CONFIG_IOSCHED_BFQ=y
-CONFIG_BFQ_GROUP_IOSCHED=y
CONFIG_BINFMT_MISC=m
CONFIG_ZSWAP=y
CONFIG_ZSMALLOC_STAT=y
CONFIG_IP_NF_NAT=m
CONFIG_IP_NF_TARGET_MASQUERADE=m
CONFIG_IP_NF_MANGLE=m
-CONFIG_IP_NF_TARGET_CLUSTERIP=m
CONFIG_IP_NF_TARGET_ECN=m
CONFIG_IP_NF_TARGET_TTL=m
CONFIG_IP_NF_RAW=m
CONFIG_VLAN_8021Q=m
CONFIG_VLAN_8021Q_GVRP=y
CONFIG_NET_SCHED=y
-CONFIG_NET_SCH_CBQ=m
CONFIG_NET_SCH_HTB=m
CONFIG_NET_SCH_HFSC=m
CONFIG_NET_SCH_PRIO=m
CONFIG_NET_SCH_TEQL=m
CONFIG_NET_SCH_TBF=m
CONFIG_NET_SCH_GRED=m
-CONFIG_NET_SCH_DSMARK=m
CONFIG_NET_SCH_NETEM=m
CONFIG_NET_SCH_DRR=m
CONFIG_NET_SCH_MQPRIO=m
CONFIG_NET_SCH_PLUG=m
CONFIG_NET_SCH_ETS=m
CONFIG_NET_CLS_BASIC=m
-CONFIG_NET_CLS_TCINDEX=m
CONFIG_NET_CLS_ROUTE4=m
CONFIG_NET_CLS_FW=m
CONFIG_NET_CLS_U32=m
CONFIG_CLS_U32_PERF=y
CONFIG_CLS_U32_MARK=y
-CONFIG_NET_CLS_RSVP=m
-CONFIG_NET_CLS_RSVP6=m
CONFIG_NET_CLS_FLOW=m
CONFIG_NET_CLS_CGROUP=y
CONFIG_NET_CLS_BPF=m
CONFIG_FB=y
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
-# CONFIG_HID is not set
+# CONFIG_HID_SUPPORT is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_INFINIBAND=m
CONFIG_INFINIBAND_USER_ACCESS=m
CONFIG_DETECT_HUNG_TASK=y
CONFIG_WQ_WATCHDOG=y
CONFIG_TEST_LOCKUP=m
+CONFIG_DEBUG_PREEMPT=y
CONFIG_PROVE_LOCKING=y
CONFIG_LOCK_STAT=y
CONFIG_DEBUG_ATOMIC_SLEEP=y
# CONFIG_RCU_TRACE is not set
CONFIG_LATENCYTOP=y
CONFIG_BOOTTIME_TRACING=y
+CONFIG_FPROBE=y
CONFIG_FUNCTION_PROFILER=y
CONFIG_STACK_TRACER=y
CONFIG_IRQSOFF_TRACER=y
CONFIG_SAMPLE_TRACE_PRINTK=m
CONFIG_SAMPLE_FTRACE_DIRECT=m
CONFIG_SAMPLE_FTRACE_DIRECT_MULTI=m
+CONFIG_SAMPLE_FTRACE_OPS=m
CONFIG_DEBUG_ENTRY=y
CONFIG_CIO_INJECT=y
CONFIG_KUNIT=m
CONFIG_MEMCG=y
CONFIG_BLK_CGROUP=y
CONFIG_CFS_BANDWIDTH=y
-CONFIG_RT_GROUP_SCHED=y
CONFIG_CGROUP_PIDS=y
CONFIG_CGROUP_RDMA=y
CONFIG_CGROUP_FREEZER=y
CONFIG_SOLARIS_X86_PARTITION=y
CONFIG_UNIXWARE_DISKLABEL=y
CONFIG_IOSCHED_BFQ=y
-CONFIG_BFQ_GROUP_IOSCHED=y
CONFIG_BINFMT_MISC=m
CONFIG_ZSWAP=y
CONFIG_ZSMALLOC_STAT=y
CONFIG_IP_NF_NAT=m
CONFIG_IP_NF_TARGET_MASQUERADE=m
CONFIG_IP_NF_MANGLE=m
-CONFIG_IP_NF_TARGET_CLUSTERIP=m
CONFIG_IP_NF_TARGET_ECN=m
CONFIG_IP_NF_TARGET_TTL=m
CONFIG_IP_NF_RAW=m
CONFIG_VLAN_8021Q=m
CONFIG_VLAN_8021Q_GVRP=y
CONFIG_NET_SCHED=y
-CONFIG_NET_SCH_CBQ=m
CONFIG_NET_SCH_HTB=m
CONFIG_NET_SCH_HFSC=m
CONFIG_NET_SCH_PRIO=m
CONFIG_NET_SCH_TEQL=m
CONFIG_NET_SCH_TBF=m
CONFIG_NET_SCH_GRED=m
-CONFIG_NET_SCH_DSMARK=m
CONFIG_NET_SCH_NETEM=m
CONFIG_NET_SCH_DRR=m
CONFIG_NET_SCH_MQPRIO=m
CONFIG_NET_SCH_PLUG=m
CONFIG_NET_SCH_ETS=m
CONFIG_NET_CLS_BASIC=m
-CONFIG_NET_CLS_TCINDEX=m
CONFIG_NET_CLS_ROUTE4=m
CONFIG_NET_CLS_FW=m
CONFIG_NET_CLS_U32=m
CONFIG_CLS_U32_PERF=y
CONFIG_CLS_U32_MARK=y
-CONFIG_NET_CLS_RSVP=m
-CONFIG_NET_CLS_RSVP6=m
CONFIG_NET_CLS_FLOW=m
CONFIG_NET_CLS_CGROUP=y
CONFIG_NET_CLS_BPF=m
CONFIG_FB=y
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
-# CONFIG_HID is not set
+# CONFIG_HID_SUPPORT is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_INFINIBAND=m
CONFIG_INFINIBAND_USER_ACCESS=m
CONFIG_RCU_CPU_STALL_TIMEOUT=60
CONFIG_LATENCYTOP=y
CONFIG_BOOTTIME_TRACING=y
+CONFIG_FPROBE=y
CONFIG_FUNCTION_PROFILER=y
CONFIG_STACK_TRACER=y
CONFIG_SCHED_TRACER=y
CONFIG_SAMPLE_TRACE_PRINTK=m
CONFIG_SAMPLE_FTRACE_DIRECT=m
CONFIG_SAMPLE_FTRACE_DIRECT_MULTI=m
+CONFIG_SAMPLE_FTRACE_OPS=m
CONFIG_KUNIT=m
CONFIG_KUNIT_DEBUGFS=y
CONFIG_LKDTM=m
# CONFIG_VMCP is not set
# CONFIG_MONWRITER is not set
# CONFIG_S390_VMUR is not set
-# CONFIG_HID is not set
+# CONFIG_HID_SUPPORT is not set
# CONFIG_VIRTIO_MENU is not set
# CONFIG_VHOST_MENU is not set
# CONFIG_IOMMU_SUPPORT is not set
static inline int gisa_in_alert_list(struct kvm_s390_gisa *gisa)
{
- return READ_ONCE(gisa->next_alert) != (u32)(u64)gisa;
+ return READ_ONCE(gisa->next_alert) != (u32)virt_to_phys(gisa);
}
static inline void gisa_set_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gisc)
hrtimer_init(&gi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
gi->timer.function = gisa_vcpu_kicker;
memset(gi->origin, 0, sizeof(struct kvm_s390_gisa));
- gi->origin->next_alert = (u32)(u64)gi->origin;
+ gi->origin->next_alert = (u32)virt_to_phys(gi->origin);
VM_EVENT(kvm, 3, "gisa 0x%pK initialized", gi->origin);
}
return -EINVAL;
aift->sbv = zpci_aif_sbv;
- aift->gait = (struct zpci_gaite *)zpci_aipb->aipb.gait;
+ aift->gait = phys_to_virt(zpci_aipb->aipb.gait);
return 0;
}
}
/* Copy to APCB FORMAT1 from APCB FORMAT0 */
static int setup_apcb10(struct kvm_vcpu *vcpu, struct kvm_s390_apcb1 *apcb_s,
- unsigned long apcb_o, struct kvm_s390_apcb1 *apcb_h)
+ unsigned long crycb_gpa, struct kvm_s390_apcb1 *apcb_h)
{
struct kvm_s390_apcb0 tmp;
+ unsigned long apcb_gpa;
- if (read_guest_real(vcpu, apcb_o, &tmp, sizeof(struct kvm_s390_apcb0)))
+ apcb_gpa = crycb_gpa + offsetof(struct kvm_s390_crypto_cb, apcb0);
+
+ if (read_guest_real(vcpu, apcb_gpa, &tmp,
+ sizeof(struct kvm_s390_apcb0)))
return -EFAULT;
apcb_s->apm[0] = apcb_h->apm[0] & tmp.apm[0];
* setup_apcb00 - Copy to APCB FORMAT0 from APCB FORMAT0
* @vcpu: pointer to the virtual CPU
* @apcb_s: pointer to start of apcb in the shadow crycb
- * @apcb_o: pointer to start of original apcb in the guest2
+ * @crycb_gpa: guest physical address to start of original guest crycb
* @apcb_h: pointer to start of apcb in the guest1
*
* Returns 0 and -EFAULT on error reading guest apcb
*/
static int setup_apcb00(struct kvm_vcpu *vcpu, unsigned long *apcb_s,
- unsigned long apcb_o, unsigned long *apcb_h)
+ unsigned long crycb_gpa, unsigned long *apcb_h)
{
- if (read_guest_real(vcpu, apcb_o, apcb_s,
+ unsigned long apcb_gpa;
+
+ apcb_gpa = crycb_gpa + offsetof(struct kvm_s390_crypto_cb, apcb0);
+
+ if (read_guest_real(vcpu, apcb_gpa, apcb_s,
sizeof(struct kvm_s390_apcb0)))
return -EFAULT;
* setup_apcb11 - Copy the FORMAT1 APCB from the guest to the shadow CRYCB
* @vcpu: pointer to the virtual CPU
* @apcb_s: pointer to start of apcb in the shadow crycb
- * @apcb_o: pointer to start of original guest apcb
+ * @crycb_gpa: guest physical address to start of original guest crycb
* @apcb_h: pointer to start of apcb in the host
*
* Returns 0 and -EFAULT on error reading guest apcb
*/
static int setup_apcb11(struct kvm_vcpu *vcpu, unsigned long *apcb_s,
- unsigned long apcb_o,
+ unsigned long crycb_gpa,
unsigned long *apcb_h)
{
- if (read_guest_real(vcpu, apcb_o, apcb_s,
+ unsigned long apcb_gpa;
+
+ apcb_gpa = crycb_gpa + offsetof(struct kvm_s390_crypto_cb, apcb1);
+
+ if (read_guest_real(vcpu, apcb_gpa, apcb_s,
sizeof(struct kvm_s390_apcb1)))
return -EFAULT;
* setup_apcb - Create a shadow copy of the apcb.
* @vcpu: pointer to the virtual CPU
* @crycb_s: pointer to shadow crycb
- * @crycb_o: pointer to original guest crycb
+ * @crycb_gpa: guest physical address of original guest crycb
* @crycb_h: pointer to the host crycb
* @fmt_o: format of the original guest crycb.
* @fmt_h: format of the host crycb.
* Return 0 or an error number if the guest and host crycb are incompatible.
*/
static int setup_apcb(struct kvm_vcpu *vcpu, struct kvm_s390_crypto_cb *crycb_s,
- const u32 crycb_o,
+ const u32 crycb_gpa,
struct kvm_s390_crypto_cb *crycb_h,
int fmt_o, int fmt_h)
{
- struct kvm_s390_crypto_cb *crycb;
-
- crycb = (struct kvm_s390_crypto_cb *) (unsigned long)crycb_o;
-
switch (fmt_o) {
case CRYCB_FORMAT2:
- if ((crycb_o & PAGE_MASK) != ((crycb_o + 256) & PAGE_MASK))
+ if ((crycb_gpa & PAGE_MASK) != ((crycb_gpa + 256) & PAGE_MASK))
return -EACCES;
if (fmt_h != CRYCB_FORMAT2)
return -EINVAL;
return setup_apcb11(vcpu, (unsigned long *)&crycb_s->apcb1,
- (unsigned long) &crycb->apcb1,
+ crycb_gpa,
(unsigned long *)&crycb_h->apcb1);
case CRYCB_FORMAT1:
switch (fmt_h) {
case CRYCB_FORMAT2:
return setup_apcb10(vcpu, &crycb_s->apcb1,
- (unsigned long) &crycb->apcb0,
+ crycb_gpa,
&crycb_h->apcb1);
case CRYCB_FORMAT1:
return setup_apcb00(vcpu,
(unsigned long *) &crycb_s->apcb0,
- (unsigned long) &crycb->apcb0,
+ crycb_gpa,
(unsigned long *) &crycb_h->apcb0);
}
break;
case CRYCB_FORMAT0:
- if ((crycb_o & PAGE_MASK) != ((crycb_o + 32) & PAGE_MASK))
+ if ((crycb_gpa & PAGE_MASK) != ((crycb_gpa + 32) & PAGE_MASK))
return -EACCES;
switch (fmt_h) {
case CRYCB_FORMAT2:
return setup_apcb10(vcpu, &crycb_s->apcb1,
- (unsigned long) &crycb->apcb0,
+ crycb_gpa,
&crycb_h->apcb1);
case CRYCB_FORMAT1:
case CRYCB_FORMAT0:
return setup_apcb00(vcpu,
(unsigned long *) &crycb_s->apcb0,
- (unsigned long) &crycb->apcb0,
+ crycb_gpa,
(unsigned long *) &crycb_h->apcb0);
}
}
return r;
}
-int zpci_setup_bus_resources(struct zpci_dev *zdev,
- struct list_head *resources)
+int zpci_setup_bus_resources(struct zpci_dev *zdev)
{
unsigned long addr, size, flags;
struct resource *res;
return -ENOMEM;
}
zdev->bars[i].res = res;
- pci_add_resource(resources, res);
}
zdev->has_resources = 1;
static void zpci_cleanup_bus_resources(struct zpci_dev *zdev)
{
+ struct resource *res;
int i;
+ pci_lock_rescan_remove();
for (i = 0; i < PCI_STD_NUM_BARS; i++) {
- if (!zdev->bars[i].size || !zdev->bars[i].res)
+ res = zdev->bars[i].res;
+ if (!res)
continue;
+ release_resource(res);
+ pci_bus_remove_resource(zdev->zbus->bus, res);
zpci_free_iomap(zdev, zdev->bars[i].map_idx);
- release_resource(zdev->bars[i].res);
- kfree(zdev->bars[i].res);
+ zdev->bars[i].res = NULL;
+ kfree(res);
}
zdev->has_resources = 0;
+ pci_unlock_rescan_remove();
}
int pcibios_device_add(struct pci_dev *pdev)
*/
static int zpci_bus_prepare_device(struct zpci_dev *zdev)
{
- struct resource_entry *window, *n;
- struct resource *res;
- int rc;
+ int rc, i;
if (!zdev_enabled(zdev)) {
rc = zpci_enable_device(zdev);
}
if (!zdev->has_resources) {
- zpci_setup_bus_resources(zdev, &zdev->zbus->resources);
- resource_list_for_each_entry_safe(window, n, &zdev->zbus->resources) {
- res = window->res;
- pci_bus_add_resource(zdev->zbus->bus, res, 0);
+ zpci_setup_bus_resources(zdev);
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
+ if (zdev->bars[i].res)
+ pci_bus_add_resource(zdev->zbus->bus, zdev->bars[i].res, 0);
}
}
int zpci_alloc_domain(int domain);
void zpci_free_domain(int domain);
-int zpci_setup_bus_resources(struct zpci_dev *zdev,
- struct list_head *resources);
+int zpci_setup_bus_resources(struct zpci_dev *zdev);
static inline struct zpci_dev *zdev_from_bus(struct pci_bus *bus,
unsigned int devfn)
/* Event overflow */
handled++;
+ status &= ~mask;
perf_sample_data_init(&data, 0, hwc->last_period);
if (!x86_perf_event_set_period(event))
if (perf_event_overflow(event, &data, regs))
x86_pmu_stop(event, 0);
-
- status &= ~mask;
}
/*
#define X86_FEATURE_VIRT_SSBD (13*32+25) /* Virtualized Speculative Store Bypass Disable */
#define X86_FEATURE_AMD_SSB_NO (13*32+26) /* "" Speculative Store Bypass is fixed in hardware. */
#define X86_FEATURE_CPPC (13*32+27) /* Collaborative Processor Performance Control */
+#define X86_FEATURE_AMD_PSFD (13*32+28) /* "" Predictive Store Forwarding Disable */
#define X86_FEATURE_BTC_NO (13*32+29) /* "" Not vulnerable to Branch Type Confusion */
#define X86_FEATURE_BRS (13*32+31) /* Branch Sampling available */
KVM_X86_QUIRK_FIX_HYPERCALL_INSN | \
KVM_X86_QUIRK_MWAIT_NEVER_UD_FAULTS)
+/*
+ * KVM previously used a u32 field in kvm_run to indicate the hypercall was
+ * initiated from long mode. KVM now sets bit 0 to indicate long mode, but the
+ * remaining 31 lower bits must be 0 to preserve ABI.
+ */
+#define KVM_EXIT_HYPERCALL_MBZ GENMASK_ULL(31, 1)
+
#endif /* _ASM_X86_KVM_HOST_H */
struct psc_entry entries[VMGEXIT_PSC_MAX_ENTRY];
} __packed;
-/* Guest message request error code */
+/* Guest message request error codes */
#define SNP_GUEST_REQ_INVALID_LEN BIT_ULL(32)
+#define SNP_GUEST_REQ_ERR_BUSY BIT_ULL(33)
#define GHCB_MSR_TERM_REQ 0x100
#define GHCB_MSR_TERM_REASON_SET_POS 12
* Sub-leaf 2: EAX: host tsc frequency in kHz
*/
+#define XEN_CPUID_TSC_EMULATED (1u << 0)
+#define XEN_CPUID_HOST_TSC_RELIABLE (1u << 1)
+#define XEN_CPUID_RDTSCP_INSTR_AVAIL (1u << 2)
+
+#define XEN_CPUID_TSC_MODE_DEFAULT (0)
+#define XEN_CPUID_TSC_MODE_ALWAYS_EMULATE (1u)
+#define XEN_CPUID_TSC_MODE_NEVER_EMULATE (2u)
+#define XEN_CPUID_TSC_MODE_PVRDTSCP (3u)
+
/*
* Leaf 5 (0x40000x04)
* HVM-specific features
* Sub-leaf 0: EAX: Features
* Sub-leaf 0: EBX: vcpu id (iff EAX has XEN_HVM_CPUID_VCPU_ID_PRESENT flag)
+ * Sub-leaf 0: ECX: domain id (iff EAX has XEN_HVM_CPUID_DOMID_PRESENT flag)
*/
#define XEN_HVM_CPUID_APIC_ACCESS_VIRT (1u << 0) /* Virtualized APIC registers */
#define XEN_HVM_CPUID_X2APIC_VIRT (1u << 1) /* Virtualized x2APIC accesses */
#define XEN_HVM_CPUID_VCPU_ID_PRESENT (1u << 3) /* vcpu id is present in EBX */
#define XEN_HVM_CPUID_DOMID_PRESENT (1u << 4) /* domid is present in ECX */
/*
- * Bits 55:49 from the IO-APIC RTE and bits 11:5 from the MSI address can be
- * used to store high bits for the Destination ID. This expands the Destination
- * ID field from 8 to 15 bits, allowing to target APIC IDs up 32768.
+ * With interrupt format set to 0 (non-remappable) bits 55:49 from the
+ * IO-APIC RTE and bits 11:5 from the MSI address can be used to store
+ * high bits for the Destination ID. This expands the Destination ID
+ * field from 8 to 15 bits, allowing to target APIC IDs up 32768.
*/
#define XEN_HVM_CPUID_EXT_DEST_ID (1u << 5)
-/* Per-vCPU event channel upcalls */
+/*
+ * Per-vCPU event channel upcalls work correctly with physical IRQs
+ * bound to event channels.
+ */
#define XEN_HVM_CPUID_UPCALL_VECTOR (1u << 6)
/*
#define KVM_VCPU_TSC_CTRL 0 /* control group for the timestamp counter (TSC) */
#define KVM_VCPU_TSC_OFFSET 0 /* attribute for the TSC offset */
+/* x86-specific KVM_EXIT_HYPERCALL flags. */
+#define KVM_EXIT_HYPERCALL_LONG_MODE BIT(0)
+
#endif /* _ASM_X86_KVM_H */
{
mce_timer_delete_all();
on_each_cpu(mce_cpu_restart, NULL, 1);
+ mce_schedule_work();
}
/* Toggle features for corrected errors */
{
struct resctrl_schema *s;
struct rdtgroup *rdtgrp;
- struct rdt_domain *dom;
struct rdt_resource *r;
char *tok, *resname;
int ret = 0;
goto out;
}
- list_for_each_entry(s, &resctrl_schema_all, list) {
- list_for_each_entry(dom, &s->res->domains, list)
- memset(dom->staged_config, 0, sizeof(dom->staged_config));
- }
+ rdt_staged_configs_clear();
while ((tok = strsep(&buf, "\n")) != NULL) {
resname = strim(strsep(&tok, ":"));
}
out:
+ rdt_staged_configs_clear();
rdtgroup_kn_unlock(of->kn);
cpus_read_unlock();
return ret ?: nbytes;
void rdt_domain_reconfigure_cdp(struct rdt_resource *r);
void __init thread_throttle_mode_init(void);
void __init mbm_config_rftype_init(const char *config);
+void rdt_staged_configs_clear(void);
#endif /* _ASM_X86_RESCTRL_INTERNAL_H */
va_end(ap);
}
+void rdt_staged_configs_clear(void)
+{
+ struct rdt_resource *r;
+ struct rdt_domain *dom;
+
+ lockdep_assert_held(&rdtgroup_mutex);
+
+ for_each_alloc_capable_rdt_resource(r) {
+ list_for_each_entry(dom, &r->domains, list)
+ memset(dom->staged_config, 0, sizeof(dom->staged_config));
+ }
+}
+
/*
* Trivial allocator for CLOSIDs. Since h/w only supports a small number,
* we can keep a bitmap of free CLOSIDs in a single integer.
{
struct resctrl_schema *s;
struct rdt_resource *r;
- int ret;
+ int ret = 0;
+
+ rdt_staged_configs_clear();
list_for_each_entry(s, &resctrl_schema_all, list) {
r = s->res;
} else {
ret = rdtgroup_init_cat(s, rdtgrp->closid);
if (ret < 0)
- return ret;
+ goto out;
}
ret = resctrl_arch_update_domains(r, rdtgrp->closid);
if (ret < 0) {
rdt_last_cmd_puts("Failed to initialize allocations\n");
- return ret;
+ goto out;
}
}
rdtgrp->mode = RDT_MODE_SHAREABLE;
- return 0;
+out:
+ rdt_staged_configs_clear();
+ return ret;
}
static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
zerofrom = offsetof(struct xregs_state, extended_state_area);
/*
- * The ptrace buffer is in non-compacted XSAVE format. In
- * non-compacted format disabled features still occupy state space,
- * but there is no state to copy from in the compacted
- * init_fpstate. The gap tracking will zero these states.
- */
- mask = fpstate->user_xfeatures;
-
- /*
- * Dynamic features are not present in init_fpstate. When they are
- * in an all zeros init state, remove those from 'mask' to zero
- * those features in the user buffer instead of retrieving them
- * from init_fpstate.
+ * This 'mask' indicates which states to copy from fpstate.
+ * Those extended states that are not present in fpstate are
+ * either disabled or initialized:
+ *
+ * In non-compacted format, disabled features still occupy
+ * state space but there is no state to copy from in the
+ * compacted init_fpstate. The gap tracking will zero these
+ * states.
+ *
+ * The extended features have an all zeroes init state. Thus,
+ * remove them from 'mask' to zero those features in the user
+ * buffer instead of retrieving them from init_fpstate.
*/
- if (fpu_state_size_dynamic())
- mask &= (header.xfeatures | xinit->header.xcomp_bv);
+ mask = header.xfeatures;
for_each_extended_xfeature(i, mask) {
/*
pkru.pkru = pkru_val;
membuf_write(&to, &pkru, sizeof(pkru));
} else {
- copy_feature(header.xfeatures & BIT_ULL(i), &to,
+ membuf_write(&to,
__raw_xsave_addr(xsave, i),
- __raw_xsave_addr(xinit, i),
xstate_sizes[i]);
}
/*
RET
SYM_FUNC_END(ftrace_stub)
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
SYM_TYPED_FUNC_START(ftrace_stub_graph)
CALL_DEPTH_ACCOUNT
RET
SYM_FUNC_END(ftrace_stub_graph)
+#endif
#ifdef CONFIG_DYNAMIC_FTRACE
struct ghcb *ghcb;
int ret;
- if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
- return -ENODEV;
-
if (!fw_err)
return -EINVAL;
if (ret)
goto e_put;
- if (ghcb->save.sw_exit_info_2) {
- /* Number of expected pages are returned in RBX */
- if (exit_code == SVM_VMGEXIT_EXT_GUEST_REQUEST &&
- ghcb->save.sw_exit_info_2 == SNP_GUEST_REQ_INVALID_LEN)
- input->data_npages = ghcb_get_rbx(ghcb);
+ *fw_err = ghcb->save.sw_exit_info_2;
+ switch (*fw_err) {
+ case 0:
+ break;
- *fw_err = ghcb->save.sw_exit_info_2;
+ case SNP_GUEST_REQ_ERR_BUSY:
+ ret = -EAGAIN;
+ break;
+ case SNP_GUEST_REQ_INVALID_LEN:
+ /* Number of expected pages are returned in RBX */
+ if (exit_code == SVM_VMGEXIT_EXT_GUEST_REQUEST) {
+ input->data_npages = ghcb_get_rbx(ghcb);
+ ret = -ENOSPC;
+ break;
+ }
+ fallthrough;
+ default:
ret = -EIO;
+ break;
}
e_put:
return ret;
}
-/*
- * This one is tied to SSB in the user API, and not
- * visible in /proc/cpuinfo.
- */
-#define KVM_X86_FEATURE_AMD_PSFD (13*32+28) /* Predictive Store Forwarding Disable */
-
#define F feature_bit
/* Scattered Flag - For features that are scattered by cpufeatures.h. */
/* Update OSXSAVE bit */
if (boot_cpu_has(X86_FEATURE_XSAVE))
cpuid_entry_change(best, X86_FEATURE_OSXSAVE,
- kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE));
+ kvm_is_cr4_bit_set(vcpu, X86_CR4_OSXSAVE));
cpuid_entry_change(best, X86_FEATURE_APIC,
vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE);
best = cpuid_entry2_find(entries, nent, 7, 0);
if (best && boot_cpu_has(X86_FEATURE_PKU) && best->function == 0x7)
cpuid_entry_change(best, X86_FEATURE_OSPKE,
- kvm_read_cr4_bits(vcpu, X86_CR4_PKE));
+ kvm_is_cr4_bit_set(vcpu, X86_CR4_PKE));
best = cpuid_entry2_find(entries, nent, 0xD, 0);
if (best)
F(CLZERO) | F(XSAVEERPTR) |
F(WBNOINVD) | F(AMD_IBPB) | F(AMD_IBRS) | F(AMD_SSBD) | F(VIRT_SSBD) |
F(AMD_SSB_NO) | F(AMD_STIBP) | F(AMD_STIBP_ALWAYS_ON) |
- __feature_bit(KVM_X86_FEATURE_AMD_PSFD)
+ F(AMD_PSFD)
);
/*
goto exception;
break;
case VCPU_SREG_CS:
+ /*
+ * KVM uses "none" when loading CS as part of emulating Real
+ * Mode exceptions and IRET (handled above). In all other
+ * cases, loading CS without a control transfer is a KVM bug.
+ */
+ if (WARN_ON_ONCE(transfer == X86_TRANSFER_NONE))
+ goto exception;
+
if (!(seg_desc.type & 8))
goto exception;
#include <linux/kvm_host.h>
-#define KVM_POSSIBLE_CR0_GUEST_BITS X86_CR0_TS
+#define KVM_POSSIBLE_CR0_GUEST_BITS (X86_CR0_TS | X86_CR0_WP)
#define KVM_POSSIBLE_CR4_GUEST_BITS \
(X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \
| X86_CR4_OSXMMEXCPT | X86_CR4_PGE | X86_CR4_TSD | X86_CR4_FSGSBASE)
return vcpu->arch.cr0 & mask;
}
+static __always_inline bool kvm_is_cr0_bit_set(struct kvm_vcpu *vcpu,
+ unsigned long cr0_bit)
+{
+ BUILD_BUG_ON(!is_power_of_2(cr0_bit));
+
+ return !!kvm_read_cr0_bits(vcpu, cr0_bit);
+}
+
static inline ulong kvm_read_cr0(struct kvm_vcpu *vcpu)
{
return kvm_read_cr0_bits(vcpu, ~0UL);
return vcpu->arch.cr4 & mask;
}
+static __always_inline bool kvm_is_cr4_bit_set(struct kvm_vcpu *vcpu,
+ unsigned long cr4_bit)
+{
+ BUILD_BUG_ON(!is_power_of_2(cr4_bit));
+
+ return !!kvm_read_cr4_bits(vcpu, cr4_bit);
+}
+
static inline ulong kvm_read_cr3(struct kvm_vcpu *vcpu)
{
if (!kvm_register_is_available(vcpu, VCPU_EXREG_CR3))
bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu);
int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
u64 fault_address, char *insn, int insn_len);
+void __kvm_mmu_refresh_passthrough_bits(struct kvm_vcpu *vcpu,
+ struct kvm_mmu *mmu);
int kvm_mmu_load(struct kvm_vcpu *vcpu);
void kvm_mmu_unload(struct kvm_vcpu *vcpu);
{
BUILD_BUG_ON((X86_CR3_PCID_MASK & PAGE_MASK) != 0);
- return kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE)
+ return kvm_is_cr4_bit_set(vcpu, X86_CR4_PCIDE)
? cr3 & X86_CR3_PCID_MASK
: 0;
}
vcpu->arch.mmu->root_role.level);
}
+static inline void kvm_mmu_refresh_passthrough_bits(struct kvm_vcpu *vcpu,
+ struct kvm_mmu *mmu)
+{
+ /*
+ * When EPT is enabled, KVM may passthrough CR0.WP to the guest, i.e.
+ * @mmu's snapshot of CR0.WP and thus all related paging metadata may
+ * be stale. Refresh CR0.WP and the metadata on-demand when checking
+ * for permission faults. Exempt nested MMUs, i.e. MMUs for shadowing
+ * nEPT and nNPT, as CR0.WP is ignored in both cases. Note, KVM does
+ * need to refresh nested_mmu, a.k.a. the walker used to translate L2
+ * GVAs to GPAs, as that "MMU" needs to honor L2's CR0.WP.
+ */
+ if (!tdp_enabled || mmu == &vcpu->arch.guest_mmu)
+ return;
+
+ __kvm_mmu_refresh_passthrough_bits(vcpu, mmu);
+}
+
/*
* Check if a given access (described through the I/D, W/R and U/S bits of a
* page fault error code pfec) causes a permission fault with the given PTE
u64 implicit_access = access & PFERR_IMPLICIT_ACCESS;
bool not_smap = ((rflags & X86_EFLAGS_AC) | implicit_access) == X86_EFLAGS_AC;
int index = (pfec + (not_smap << PFERR_RSVD_BIT)) >> 1;
- bool fault = (mmu->permissions[index] >> pte_access) & 1;
u32 errcode = PFERR_PRESENT_MASK;
+ bool fault;
+
+ kvm_mmu_refresh_passthrough_bits(vcpu, mmu);
+
+ fault = (mmu->permissions[index] >> pte_access) & 1;
WARN_ON(pfec & (PFERR_PK_MASK | PFERR_RSVD_MASK));
if (unlikely(mmu->pkru_mask)) {
return role;
}
+void __kvm_mmu_refresh_passthrough_bits(struct kvm_vcpu *vcpu,
+ struct kvm_mmu *mmu)
+{
+ const bool cr0_wp = kvm_is_cr0_bit_set(vcpu, X86_CR0_WP);
+
+ BUILD_BUG_ON((KVM_MMU_CR0_ROLE_BITS & KVM_POSSIBLE_CR0_GUEST_BITS) != X86_CR0_WP);
+ BUILD_BUG_ON((KVM_MMU_CR4_ROLE_BITS & KVM_POSSIBLE_CR4_GUEST_BITS));
+
+ if (is_cr0_wp(mmu) == cr0_wp)
+ return;
+
+ mmu->cpu_role.base.cr0_wp = cr0_wp;
+ reset_guest_paging_metadata(vcpu, mmu);
+}
+
static inline int kvm_mmu_get_tdp_level(struct kvm_vcpu *vcpu)
{
/* tdp_root_level is architecture forced level, use it if nonzero */
if (!pmc)
return 1;
- if (!(kvm_read_cr4(vcpu) & X86_CR4_PCE) &&
+ if (!kvm_is_cr4_bit_set(vcpu, X86_CR4_PCE) &&
(static_call(kvm_x86_get_cpl)(vcpu) != 0) &&
- (kvm_read_cr0(vcpu) & X86_CR0_PE))
+ kvm_is_cr0_bit_set(vcpu, X86_CR0_PE))
return 1;
*data = pmc_read_counter(pmc) & mask;
#endif
{ .index = MSR_IA32_SPEC_CTRL, .always = false },
{ .index = MSR_IA32_PRED_CMD, .always = false },
+ { .index = MSR_IA32_FLUSH_CMD, .always = false },
{ .index = MSR_IA32_LASTBRANCHFROMIP, .always = false },
{ .index = MSR_IA32_LASTBRANCHTOIP, .always = false },
{ .index = MSR_IA32_LASTINTFROMIP, .always = false },
return 0;
}
-static int svm_set_msr_ia32_cmd(struct kvm_vcpu *vcpu, struct msr_data *msr,
- bool guest_has_feat, u64 cmd,
- int x86_feature_bit)
-{
- struct vcpu_svm *svm = to_svm(vcpu);
-
- if (!msr->host_initiated && !guest_has_feat)
- return 1;
-
- if (!(msr->data & ~cmd))
- return 1;
- if (!boot_cpu_has(x86_feature_bit))
- return 1;
- if (!msr->data)
- return 0;
-
- wrmsrl(msr->index, cmd);
- set_msr_interception(vcpu, svm->msrpm, msr->index, 0, 1);
-
- return 0;
-}
-
static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
{
struct vcpu_svm *svm = to_svm(vcpu);
- int r;
+ int ret = 0;
u32 ecx = msr->index;
u64 data = msr->data;
*/
set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SPEC_CTRL, 1, 1);
break;
- case MSR_IA32_PRED_CMD:
- r = svm_set_msr_ia32_cmd(vcpu, msr,
- guest_has_pred_cmd_msr(vcpu),
- PRED_CMD_IBPB, X86_FEATURE_IBPB);
- break;
- case MSR_IA32_FLUSH_CMD:
- r = svm_set_msr_ia32_cmd(vcpu, msr,
- guest_cpuid_has(vcpu, X86_FEATURE_FLUSH_L1D),
- L1D_FLUSH, X86_FEATURE_FLUSH_L1D);
- break;
case MSR_AMD64_VIRT_SPEC_CTRL:
if (!msr->host_initiated &&
!guest_cpuid_has(vcpu, X86_FEATURE_VIRT_SSBD))
* guest via direct_access_msrs, and switch it via user return.
*/
preempt_disable();
- r = kvm_set_user_return_msr(tsc_aux_uret_slot, data, -1ull);
+ ret = kvm_set_user_return_msr(tsc_aux_uret_slot, data, -1ull);
preempt_enable();
- if (r)
- return 1;
+ if (ret)
+ break;
svm->tsc_aux = data;
break;
default:
return kvm_set_msr_common(vcpu, msr);
}
- return 0;
+ return ret;
}
static int msr_interception(struct kvm_vcpu *vcpu)
svm_recalc_instruction_intercepts(vcpu, svm);
+ if (boot_cpu_has(X86_FEATURE_IBPB))
+ set_msr_interception(vcpu, svm->msrpm, MSR_IA32_PRED_CMD, 0,
+ !!guest_has_pred_cmd_msr(vcpu));
+
+ if (boot_cpu_has(X86_FEATURE_FLUSH_L1D))
+ set_msr_interception(vcpu, svm->msrpm, MSR_IA32_FLUSH_CMD, 0,
+ !!guest_cpuid_has(vcpu, X86_FEATURE_FLUSH_L1D));
+
/* For sev guests, the memory encryption bit is not reserved in CR3. */
if (sev_guest(vcpu->kvm)) {
best = kvm_find_cpuid_entry(vcpu, 0x8000001F);
void *insn, int insn_len)
{
bool smep, smap, is_user;
- unsigned long cr4;
u64 error_code;
/* Emulation is always possible when KVM has access to all guest state. */
if (error_code & (PFERR_GUEST_PAGE_MASK | PFERR_FETCH_MASK))
goto resume_guest;
- cr4 = kvm_read_cr4(vcpu);
- smep = cr4 & X86_CR4_SMEP;
- smap = cr4 & X86_CR4_SMAP;
+ smep = kvm_is_cr4_bit_set(vcpu, X86_CR4_SMEP);
+ smap = kvm_is_cr4_bit_set(vcpu, X86_CR4_SMAP);
is_user = svm_get_cpl(vcpu) == 3;
if (smap && (!smep || is_user)) {
pr_err_ratelimited("SEV Guest triggered AMD Erratum 1096\n");
* CR0_GUEST_HOST_MASK is already set in the original vmcs01
* (KVM doesn't change it);
*/
- vcpu->arch.cr0_guest_owned_bits = KVM_POSSIBLE_CR0_GUEST_BITS;
+ vcpu->arch.cr0_guest_owned_bits = vmx_l1_guest_owned_cr0_bits();
vmx_set_cr0(vcpu, vmcs12->host_cr0);
/* Same as above - no reason to call set_cr4_guest_host_mask(). */
*/
vmx_set_efer(vcpu, nested_vmx_get_vmcs01_guest_efer(vmx));
- vcpu->arch.cr0_guest_owned_bits = KVM_POSSIBLE_CR0_GUEST_BITS;
+ vcpu->arch.cr0_guest_owned_bits = vmx_l1_guest_owned_cr0_bits();
vmx_set_cr0(vcpu, vmcs_readl(CR0_READ_SHADOW));
vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
* does force CR0.PE=1, but only to also force VM86 in order to emulate
* Real Mode, and so there's no need to check CR0.PE manually.
*/
- if (!kvm_read_cr4_bits(vcpu, X86_CR4_VMXE)) {
+ if (!kvm_is_cr4_bit_set(vcpu, X86_CR4_VMXE)) {
kvm_queue_exception(vcpu, UD_VECTOR);
return 1;
}
static u32 vmx_possible_passthrough_msrs[MAX_POSSIBLE_PASSTHROUGH_MSRS] = {
MSR_IA32_SPEC_CTRL,
MSR_IA32_PRED_CMD,
+ MSR_IA32_FLUSH_CMD,
MSR_IA32_TSC,
#ifdef CONFIG_X86_64
MSR_FS_BASE,
return debugctl;
}
-static int vmx_set_msr_ia32_cmd(struct kvm_vcpu *vcpu,
- struct msr_data *msr_info,
- bool guest_has_feat, u64 cmd,
- int x86_feature_bit)
-{
- if (!msr_info->host_initiated && !guest_has_feat)
- return 1;
-
- if (!(msr_info->data & ~cmd))
- return 1;
- if (!boot_cpu_has(x86_feature_bit))
- return 1;
- if (!msr_info->data)
- return 0;
-
- wrmsrl(msr_info->index, cmd);
-
- /*
- * For non-nested:
- * When it's written (to non-zero) for the first time, pass
- * it through.
- *
- * For nested:
- * The handling of the MSR bitmap for L2 guests is done in
- * nested_vmx_prepare_msr_bitmap. We should not touch the
- * vmcs02.msr_bitmap here since it gets completely overwritten
- * in the merging.
- */
- vmx_disable_intercept_for_msr(vcpu, msr_info->index, MSR_TYPE_W);
-
- return 0;
-}
-
/*
* Writes msr value into the appropriate "register".
* Returns 0 on success, non-0 otherwise.
if (data & ~(TSX_CTRL_RTM_DISABLE | TSX_CTRL_CPUID_CLEAR))
return 1;
goto find_uret_msr;
- case MSR_IA32_PRED_CMD:
- ret = vmx_set_msr_ia32_cmd(vcpu, msr_info,
- guest_has_pred_cmd_msr(vcpu),
- PRED_CMD_IBPB,
- X86_FEATURE_IBPB);
- break;
- case MSR_IA32_FLUSH_CMD:
- ret = vmx_set_msr_ia32_cmd(vcpu, msr_info,
- guest_cpuid_has(vcpu, X86_FEATURE_FLUSH_L1D),
- L1D_FLUSH,
- X86_FEATURE_FLUSH_L1D);
- break;
case MSR_IA32_CR_PAT:
if (!kvm_pat_valid(data))
return 1;
/* 22.2.1, 20.8.1 */
vm_entry_controls_set(vmx, vmx_vmentry_ctrl());
- vmx->vcpu.arch.cr0_guest_owned_bits = KVM_POSSIBLE_CR0_GUEST_BITS;
+ vmx->vcpu.arch.cr0_guest_owned_bits = vmx_l1_guest_owned_cr0_bits();
vmcs_writel(CR0_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr0_guest_owned_bits);
set_cr4_guest_host_mask(vmx);
if (!boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT))
return true;
- return vmx_get_cpl(vcpu) == 3 && kvm_read_cr0_bits(vcpu, X86_CR0_AM) &&
+ return vmx_get_cpl(vcpu) == 3 && kvm_is_cr0_bit_set(vcpu, X86_CR0_AM) &&
(kvm_get_rflags(vcpu) & X86_EFLAGS_AC);
}
break;
case 3: /* lmsw */
val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f;
- trace_kvm_cr_write(0, (kvm_read_cr0(vcpu) & ~0xful) | val);
+ trace_kvm_cr_write(0, (kvm_read_cr0_bits(vcpu, ~0xful) | val));
kvm_lmsw(vcpu, val);
return kvm_skip_emulated_instruction(vcpu);
if (!kvm_arch_has_noncoherent_dma(vcpu->kvm))
return (MTRR_TYPE_WRBACK << VMX_EPT_MT_EPTE_SHIFT) | VMX_EPT_IPAT_BIT;
- if (kvm_read_cr0(vcpu) & X86_CR0_CD) {
+ if (kvm_read_cr0_bits(vcpu, X86_CR0_CD)) {
if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
cache = MTRR_TYPE_WRBACK;
else
vmx_set_intercept_for_msr(vcpu, MSR_IA32_XFD_ERR, MSR_TYPE_R,
!guest_cpuid_has(vcpu, X86_FEATURE_XFD));
+ if (boot_cpu_has(X86_FEATURE_IBPB))
+ vmx_set_intercept_for_msr(vcpu, MSR_IA32_PRED_CMD, MSR_TYPE_W,
+ !guest_has_pred_cmd_msr(vcpu));
+
+ if (boot_cpu_has(X86_FEATURE_FLUSH_L1D))
+ vmx_set_intercept_for_msr(vcpu, MSR_IA32_FLUSH_CMD, MSR_TYPE_W,
+ !guest_cpuid_has(vcpu, X86_FEATURE_FLUSH_L1D));
set_cr4_guest_host_mask(vmx);
struct lbr_desc lbr_desc;
/* Save desired MSR intercept (read: pass-through) state */
-#define MAX_POSSIBLE_PASSTHROUGH_MSRS 15
+#define MAX_POSSIBLE_PASSTHROUGH_MSRS 16
struct {
DECLARE_BITMAP(read, MAX_POSSIBLE_PASSTHROUGH_MSRS);
DECLARE_BITMAP(write, MAX_POSSIBLE_PASSTHROUGH_MSRS);
(1 << VCPU_EXREG_EXIT_INFO_1) | \
(1 << VCPU_EXREG_EXIT_INFO_2))
+static inline unsigned long vmx_l1_guest_owned_cr0_bits(void)
+{
+ unsigned long bits = KVM_POSSIBLE_CR0_GUEST_BITS;
+
+ /*
+ * CR0.WP needs to be intercepted when KVM is shadowing legacy paging
+ * in order to construct shadow PTEs with the correct protections.
+ * Note! CR0.WP technically can be passed through to the guest if
+ * paging is disabled, but checking CR0.PG would generate a cyclical
+ * dependency of sorts due to forcing the caller to ensure CR0 holds
+ * the correct value prior to determining which CR0 bits can be owned
+ * by L1. Keep it simple and limit the optimization to EPT.
+ */
+ if (!enable_ept)
+ bits &= ~X86_CR0_WP;
+ return bits;
+}
+
static __always_inline struct kvm_vmx *to_kvm_vmx(struct kvm *kvm)
{
return container_of(kvm, struct kvm_vmx, kvm);
module_param(eager_page_split, bool, 0644);
/* Enable/disable SMT_RSB bug mitigation */
-bool __read_mostly mitigate_smt_rsb;
+static bool __read_mostly mitigate_smt_rsb;
module_param(mitigate_smt_rsb, bool, 0444);
/*
bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr)
{
- if ((dr != 4 && dr != 5) || !kvm_read_cr4_bits(vcpu, X86_CR4_DE))
+ if ((dr != 4 && dr != 5) || !kvm_is_cr4_bit_set(vcpu, X86_CR4_DE))
return true;
kvm_queue_exception(vcpu, UD_VECTOR);
void kvm_post_set_cr0(struct kvm_vcpu *vcpu, unsigned long old_cr0, unsigned long cr0)
{
+ /*
+ * CR0.WP is incorporated into the MMU role, but only for non-nested,
+ * indirect shadow MMUs. If paging is disabled, no updates are needed
+ * as there are no permission bits to emulate. If TDP is enabled, the
+ * MMU's metadata needs to be updated, e.g. so that emulating guest
+ * translations does the right thing, but there's no need to unload the
+ * root as CR0.WP doesn't affect SPTEs.
+ */
+ if ((cr0 ^ old_cr0) == X86_CR0_WP) {
+ if (!(cr0 & X86_CR0_PG))
+ return;
+
+ if (tdp_enabled) {
+ kvm_init_mmu(vcpu);
+ return;
+ }
+ }
+
if ((cr0 ^ old_cr0) & X86_CR0_PG) {
kvm_clear_async_pf_completion_queue(vcpu);
kvm_async_pf_hash_reset(vcpu);
return 1;
if (!(cr0 & X86_CR0_PG) &&
- (is_64_bit_mode(vcpu) || kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE)))
+ (is_64_bit_mode(vcpu) || kvm_is_cr4_bit_set(vcpu, X86_CR4_PCIDE)))
return 1;
static_call(kvm_x86_set_cr0)(vcpu, cr0);
if (vcpu->arch.guest_state_protected)
return;
- if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE)) {
+ if (kvm_is_cr4_bit_set(vcpu, X86_CR4_OSXSAVE)) {
if (vcpu->arch.xcr0 != host_xcr0)
xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0);
if (static_cpu_has(X86_FEATURE_PKU) &&
vcpu->arch.pkru != vcpu->arch.host_pkru &&
((vcpu->arch.xcr0 & XFEATURE_MASK_PKRU) ||
- kvm_read_cr4_bits(vcpu, X86_CR4_PKE)))
+ kvm_is_cr4_bit_set(vcpu, X86_CR4_PKE)))
write_pkru(vcpu->arch.pkru);
#endif /* CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS */
}
#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
if (static_cpu_has(X86_FEATURE_PKU) &&
((vcpu->arch.xcr0 & XFEATURE_MASK_PKRU) ||
- kvm_read_cr4_bits(vcpu, X86_CR4_PKE))) {
+ kvm_is_cr4_bit_set(vcpu, X86_CR4_PKE))) {
vcpu->arch.pkru = rdpkru();
if (vcpu->arch.pkru != vcpu->arch.host_pkru)
write_pkru(vcpu->arch.host_pkru);
}
#endif /* CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS */
- if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE)) {
+ if (kvm_is_cr4_bit_set(vcpu, X86_CR4_OSXSAVE)) {
if (vcpu->arch.xcr0 != host_xcr0)
xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0);
return 1;
if ((cr4 & X86_CR4_PCIDE) && !(old_cr4 & X86_CR4_PCIDE)) {
- if (!guest_cpuid_has(vcpu, X86_FEATURE_PCID))
- return 1;
-
/* PCID can not be enabled when cr3[11:0]!=000H or EFER.LMA=0 */
if ((kvm_read_cr3(vcpu) & X86_CR3_PCID_MASK) || !is_long_mode(vcpu))
return 1;
* PCIDs for them are also 0, because MOV to CR3 always flushes the TLB
* with PCIDE=0.
*/
- if (!kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE))
+ if (!kvm_is_cr4_bit_set(vcpu, X86_CR4_PCIDE))
return;
for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
bool skip_tlb_flush = false;
unsigned long pcid = 0;
#ifdef CONFIG_X86_64
- bool pcid_enabled = kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE);
-
- if (pcid_enabled) {
+ if (kvm_is_cr4_bit_set(vcpu, X86_CR4_PCIDE)) {
skip_tlb_flush = cr3 & X86_CR3_PCID_NOFLUSH;
cr3 &= ~X86_CR3_PCID_NOFLUSH;
pcid = cr3 & X86_CR3_PCID_MASK;
vcpu->arch.perf_capabilities = data;
kvm_pmu_refresh(vcpu);
return 0;
+ case MSR_IA32_PRED_CMD:
+ if (!msr_info->host_initiated && !guest_has_pred_cmd_msr(vcpu))
+ return 1;
+
+ if (!boot_cpu_has(X86_FEATURE_IBPB) || (data & ~PRED_CMD_IBPB))
+ return 1;
+ if (!data)
+ break;
+
+ wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB);
+ break;
+ case MSR_IA32_FLUSH_CMD:
+ if (!msr_info->host_initiated &&
+ !guest_cpuid_has(vcpu, X86_FEATURE_FLUSH_L1D))
+ return 1;
+
+ if (!boot_cpu_has(X86_FEATURE_FLUSH_L1D) || (data & ~L1D_FLUSH))
+ return 1;
+ if (!data)
+ break;
+
+ wrmsrl(MSR_IA32_FLUSH_CMD, L1D_FLUSH);
+ break;
case MSR_EFER:
return set_efer(vcpu, msr_info);
case MSR_K7_HWCR:
return 0;
if (mce->status & MCI_STATUS_UC) {
if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) ||
- !kvm_read_cr4_bits(vcpu, X86_CR4_MCE)) {
+ !kvm_is_cr4_bit_set(vcpu, X86_CR4_MCE)) {
kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
return 0;
}
vcpu->run->hypercall.args[0] = gpa;
vcpu->run->hypercall.args[1] = npages;
vcpu->run->hypercall.args[2] = attrs;
- vcpu->run->hypercall.longmode = op_64_bit;
+ vcpu->run->hypercall.flags = 0;
+ if (op_64_bit)
+ vcpu->run->hypercall.flags |= KVM_EXIT_HYPERCALL_LONG_MODE;
+
+ WARN_ON_ONCE(vcpu->run->hypercall.flags & KVM_EXIT_HYPERCALL_MBZ);
vcpu->arch.complete_userspace_io = complete_hypercall_exit;
return 0;
}
return 1;
}
- pcid_enabled = kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE);
+ pcid_enabled = kvm_is_cr4_bit_set(vcpu, X86_CR4_PCIDE);
switch (type) {
case INVPCID_TYPE_INDIV_ADDR:
static inline bool is_protmode(struct kvm_vcpu *vcpu)
{
- return kvm_read_cr0_bits(vcpu, X86_CR0_PE);
+ return kvm_is_cr0_bit_set(vcpu, X86_CR0_PE);
}
-static inline int is_long_mode(struct kvm_vcpu *vcpu)
+static inline bool is_long_mode(struct kvm_vcpu *vcpu)
{
#ifdef CONFIG_X86_64
- return vcpu->arch.efer & EFER_LMA;
+ return !!(vcpu->arch.efer & EFER_LMA);
#else
- return 0;
+ return false;
#endif
}
return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu;
}
-static inline int is_pae(struct kvm_vcpu *vcpu)
+static inline bool is_pae(struct kvm_vcpu *vcpu)
{
- return kvm_read_cr4_bits(vcpu, X86_CR4_PAE);
+ return kvm_is_cr4_bit_set(vcpu, X86_CR4_PAE);
}
-static inline int is_pse(struct kvm_vcpu *vcpu)
+static inline bool is_pse(struct kvm_vcpu *vcpu)
{
- return kvm_read_cr4_bits(vcpu, X86_CR4_PSE);
+ return kvm_is_cr4_bit_set(vcpu, X86_CR4_PSE);
}
-static inline int is_paging(struct kvm_vcpu *vcpu)
+static inline bool is_paging(struct kvm_vcpu *vcpu)
{
- return likely(kvm_read_cr0_bits(vcpu, X86_CR0_PG));
+ return likely(kvm_is_cr0_bit_set(vcpu, X86_CR0_PG));
}
static inline bool is_pae_paging(struct kvm_vcpu *vcpu)
static inline u8 vcpu_virt_addr_bits(struct kvm_vcpu *vcpu)
{
- return kvm_read_cr4_bits(vcpu, X86_CR4_LA57) ? 57 : 48;
+ return kvm_is_cr4_bit_set(vcpu, X86_CR4_LA57) ? 57 : 48;
}
static inline bool is_noncanonical_address(u64 la, struct kvm_vcpu *vcpu)
#include <asm/fixmap.h>
#include <asm/desc.h>
#include <asm/kasan.h>
+#include <asm/setup.h>
static DEFINE_PER_CPU_PAGE_ALIGNED(struct entry_stack_page, entry_stack_storage);
unsigned int max_cea;
unsigned int i, j;
+ if (!kaslr_enabled()) {
+ for_each_possible_cpu(i)
+ per_cpu(_cea_offset, i) = i;
+ return;
+ }
+
max_cea = (CPU_ENTRY_AREA_MAP_SIZE - PAGE_SIZE) / CPU_ENTRY_AREA_SIZE;
/* O(sodding terrible) */
cmdline_ptr = (const char *)((u64)bp->hdr.cmd_line_ptr |
((u64)bp->ext_cmd_line_ptr << 32));
- cmdline_find_option(cmdline_ptr, cmdline_arg, buffer, sizeof(buffer));
+ if (cmdline_find_option(cmdline_ptr, cmdline_arg, buffer, sizeof(buffer)) < 0)
+ return;
if (!strncmp(buffer, cmdline_on, sizeof(buffer)))
sme_me_mask = me_mask;
obj-$(CONFIG_XEN_DEBUG_FS) += debugfs.o
-obj-$(CONFIG_XEN_PV_DOM0) += vga.o
+obj-$(CONFIG_XEN_DOM0) += vga.o
obj-$(CONFIG_XEN_EFI) += efi.o
x86_platform.set_legacy_features =
xen_dom0_set_legacy_features;
- xen_init_vga(info, xen_start_info->console.dom0.info_size);
+ xen_init_vga(info, xen_start_info->console.dom0.info_size,
+ &boot_params.screen_info);
xen_start_info->console.domU.mfn = 0;
xen_start_info->console.domU.evtchn = 0;
x86_init.oem.banner = xen_banner;
xen_efi_init(boot_params);
+
+ if (xen_initial_domain()) {
+ struct xen_platform_op op = {
+ .cmd = XENPF_get_dom0_console,
+ };
+ int ret = HYPERVISOR_platform_op(&op);
+
+ if (ret > 0)
+ xen_init_vga(&op.u.dom0_console,
+ min(ret * sizeof(char),
+ sizeof(op.u.dom0_console)),
+ &boot_params->screen_info);
+ }
}
void __init mem_map_via_hcall(struct boot_params *boot_params_p)
#include <asm/pvclock.h>
#include <asm/xen/hypervisor.h>
#include <asm/xen/hypercall.h>
+#include <asm/xen/cpuid.h>
#include <xen/events.h>
#include <xen/features.h>
/* Leaf 4, sub-leaf 0 (0x40000x03) */
cpuid_count(xen_cpuid_base() + 3, 0, &eax, &ebx, &ecx, &edx);
- /* tsc_mode = no_emulate (2) */
- if (ebx != 2)
- return 0;
-
- return 1;
+ return ebx == XEN_CPUID_TSC_MODE_NEVER_EMULATE;
}
static void __init xen_time_init(void)
#include "xen-ops.h"
-void __init xen_init_vga(const struct dom0_vga_console_info *info, size_t size)
+void __init xen_init_vga(const struct dom0_vga_console_info *info, size_t size,
+ struct screen_info *screen_info)
{
- struct screen_info *screen_info = &boot_params.screen_info;
-
/* This is drawn from a dump from vgacon:startup in
* standard Linux. */
screen_info->orig_video_mode = 3;
struct dom0_vga_console_info;
-#ifdef CONFIG_XEN_PV_DOM0
-void __init xen_init_vga(const struct dom0_vga_console_info *, size_t size);
+#ifdef CONFIG_XEN_DOM0
+void __init xen_init_vga(const struct dom0_vga_console_info *, size_t size,
+ struct screen_info *);
#else
static inline void __init xen_init_vga(const struct dom0_vga_console_info *info,
- size_t size)
+ size_t size, struct screen_info *si)
{
}
#endif
source "block/partitions/Kconfig"
-config BLOCK_COMPAT
- def_bool COMPAT
-
config BLK_MQ_PCI
def_bool PCI
}
}
-unsigned long bdev_start_io_acct(struct block_device *bdev,
- unsigned int sectors, enum req_op op,
+unsigned long bdev_start_io_acct(struct block_device *bdev, enum req_op op,
unsigned long start_time)
{
- const int sgrp = op_stat_group(op);
-
part_stat_lock();
update_io_ticks(bdev, start_time, false);
- part_stat_inc(bdev, ios[sgrp]);
- part_stat_add(bdev, sectors[sgrp], sectors);
part_stat_local_inc(bdev, in_flight[op_is_write(op)]);
part_stat_unlock();
*/
unsigned long bio_start_io_acct(struct bio *bio)
{
- return bdev_start_io_acct(bio->bi_bdev, bio_sectors(bio),
- bio_op(bio), jiffies);
+ return bdev_start_io_acct(bio->bi_bdev, bio_op(bio), jiffies);
}
EXPORT_SYMBOL_GPL(bio_start_io_acct);
void bdev_end_io_acct(struct block_device *bdev, enum req_op op,
- unsigned long start_time)
+ unsigned int sectors, unsigned long start_time)
{
const int sgrp = op_stat_group(op);
unsigned long now = READ_ONCE(jiffies);
part_stat_lock();
update_io_ticks(bdev, now, true);
+ part_stat_inc(bdev, ios[sgrp]);
+ part_stat_add(bdev, sectors[sgrp], sectors);
part_stat_add(bdev, nsecs[sgrp], jiffies_to_nsecs(duration));
part_stat_local_dec(bdev, in_flight[op_is_write(op)]);
part_stat_unlock();
void bio_end_io_acct_remapped(struct bio *bio, unsigned long start_time,
struct block_device *orig_bdev)
{
- bdev_end_io_acct(orig_bdev, bio_op(bio), start_time);
+ bdev_end_io_acct(orig_bdev, bio_op(bio), bio_sectors(bio), start_time);
}
EXPORT_SYMBOL_GPL(bio_end_io_acct_remapped);
struct blk_mq_hw_ctx *this_hctx = NULL;
struct blk_mq_ctx *this_ctx = NULL;
struct request *requeue_list = NULL;
+ struct request **requeue_lastp = &requeue_list;
unsigned int depth = 0;
LIST_HEAD(list);
this_hctx = rq->mq_hctx;
this_ctx = rq->mq_ctx;
} else if (this_hctx != rq->mq_hctx || this_ctx != rq->mq_ctx) {
- rq_list_add(&requeue_list, rq);
+ rq_list_add_tail(&requeue_lastp, rq);
continue;
}
- list_add_tail(&rq->queuelist, &list);
+ list_add(&rq->queuelist, &list);
depth++;
} while (!rq_list_empty(plug->mq_list));
#define __blk_mq_run_dispatch_ops(q, check_sleep, dispatch_ops) \
do { \
if ((q)->tag_set->flags & BLK_MQ_F_BLOCKING) { \
+ struct blk_mq_tag_set *__tag_set = (q)->tag_set; \
int srcu_idx; \
\
might_sleep_if(check_sleep); \
- srcu_idx = srcu_read_lock((q)->tag_set->srcu); \
+ srcu_idx = srcu_read_lock(__tag_set->srcu); \
(dispatch_ops); \
- srcu_read_unlock((q)->tag_set->srcu, srcu_idx); \
+ srcu_read_unlock(__tag_set->srcu, srcu_idx); \
} else { \
rcu_read_lock(); \
(dispatch_ops); \
}
if (sinfo->msgdigest_len != sig->digest_size) {
- pr_debug("Sig %u: Invalid digest size (%u)\n",
- sinfo->index, sinfo->msgdigest_len);
+ pr_warn("Sig %u: Invalid digest size (%u)\n",
+ sinfo->index, sinfo->msgdigest_len);
ret = -EBADMSG;
goto error;
}
if (memcmp(sig->digest, sinfo->msgdigest,
sinfo->msgdigest_len) != 0) {
- pr_debug("Sig %u: Message digest doesn't match\n",
- sinfo->index);
+ pr_warn("Sig %u: Message digest doesn't match\n",
+ sinfo->index);
ret = -EKEYREJECTED;
goto error;
}
const void *data, size_t datalen)
{
if (pkcs7->data) {
- pr_debug("Data already supplied\n");
+ pr_warn("Data already supplied\n");
return -EINVAL;
}
pkcs7->data = data;
break;
default:
- pr_debug("Unknown PEOPT magic = %04hx\n", pe32->magic);
+ pr_warn("Unknown PEOPT magic = %04hx\n", pe32->magic);
return -ELIBBAD;
}
ctx->certs_size = ddir->certs.size;
if (!ddir->certs.virtual_address || !ddir->certs.size) {
- pr_debug("Unsigned PE binary\n");
+ pr_warn("Unsigned PE binary\n");
return -ENODATA;
}
unsigned len;
if (ctx->sig_len < sizeof(wrapper)) {
- pr_debug("Signature wrapper too short\n");
+ pr_warn("Signature wrapper too short\n");
return -ELIBBAD;
}
pr_debug("sig wrapper = { %x, %x, %x }\n",
wrapper.length, wrapper.revision, wrapper.cert_type);
- /* Both pesign and sbsign round up the length of certificate table
- * (in optional header data directories) to 8 byte alignment.
+ /* sbsign rounds up the length of certificate table (in optional
+ * header data directories) to 8 byte alignment. However, the PE
+ * specification states that while entries are 8-byte aligned, this is
+ * not included in their length, and as a result, pesign has not
+ * rounded up since 0.110.
*/
- if (round_up(wrapper.length, 8) != ctx->sig_len) {
- pr_debug("Signature wrapper len wrong\n");
+ if (wrapper.length > ctx->sig_len) {
+ pr_warn("Signature wrapper bigger than sig len (%x > %x)\n",
+ ctx->sig_len, wrapper.length);
return -ELIBBAD;
}
if (wrapper.revision != WIN_CERT_REVISION_2_0) {
- pr_debug("Signature is not revision 2.0\n");
+ pr_warn("Signature is not revision 2.0\n");
return -ENOTSUPP;
}
if (wrapper.cert_type != WIN_CERT_TYPE_PKCS_SIGNED_DATA) {
- pr_debug("Signature certificate type is not PKCS\n");
+ pr_warn("Signature certificate type is not PKCS\n");
return -ENOTSUPP;
}
ctx->sig_offset += sizeof(wrapper);
ctx->sig_len -= sizeof(wrapper);
if (ctx->sig_len < 4) {
- pr_debug("Signature data missing\n");
+ pr_warn("Signature data missing\n");
return -EKEYREJECTED;
}
return 0;
}
not_pkcs7:
- pr_debug("Signature data not PKCS#7\n");
+ pr_warn("Signature data not PKCS#7\n");
return -ELIBBAD;
}
digest_size = crypto_shash_digestsize(tfm);
if (digest_size != ctx->digest_len) {
- pr_debug("Digest size mismatch (%zx != %x)\n",
- digest_size, ctx->digest_len);
+ pr_warn("Digest size mismatch (%zx != %x)\n",
+ digest_size, ctx->digest_len);
ret = -EBADMSG;
goto error_no_desc;
}
* PKCS#7 certificate.
*/
if (memcmp(digest, ctx->digest, ctx->digest_len) != 0) {
- pr_debug("Digest mismatch\n");
+ pr_warn("Digest mismatch\n");
ret = -EKEYREJECTED;
} else {
pr_debug("The digests match!\n");
# SPDX-License-Identifier: GPL-2.0-only
-obj-y += habanalabs/
-obj-y += ivpu/
+obj-$(CONFIG_DRM_ACCEL_HABANALABS) += habanalabs/
+obj-$(CONFIG_DRM_ACCEL_IVPU) += ivpu/
static struct acpi_table_header *acpi_get_pptt(void)
{
static struct acpi_table_header *pptt;
+ static bool is_pptt_checked;
acpi_status status;
/*
* PPTT will be used at runtime on every CPU hotplug in path, so we
* don't need to call acpi_put_table() to release the table mapping.
*/
- if (!pptt) {
+ if (!pptt && !is_pptt_checked) {
status = acpi_get_table(ACPI_SIG_PPTT, 0, &pptt);
if (ACPI_FAILURE(status))
acpi_pptt_warn_missing();
+
+ is_pptt_checked = true;
}
return pptt;
if (acpi_disabled)
return 0;
+ if (!cpufreq_register_notifier(&acpi_processor_notifier_block,
+ CPUFREQ_POLICY_NOTIFIER)) {
+ acpi_processor_cpufreq_init = true;
+ acpi_processor_ignore_ppc_init();
+ }
+
result = driver_register(&acpi_processor_driver);
if (result < 0)
return result;
cpuhp_setup_state_nocalls(CPUHP_ACPI_CPUDRV_DEAD, "acpi/cpu-drv:dead",
NULL, acpi_soft_cpu_dead);
- if (!cpufreq_register_notifier(&acpi_processor_notifier_block,
- CPUFREQ_POLICY_NOTIFIER)) {
- acpi_processor_cpufreq_init = true;
- acpi_processor_ignore_ppc_init();
- }
-
acpi_processor_throttling_init();
return 0;
err:
ret = freq_qos_add_request(&policy->constraints,
&pr->thermal_req,
FREQ_QOS_MAX, INT_MAX);
- if (ret < 0)
+ if (ret < 0) {
pr_err("Failed to add freq constraint for CPU%d (%d)\n",
cpu, ret);
+ continue;
+ }
+
+ thermal_cooling_device_update(pr->cdev);
}
}
for_each_cpu(cpu, policy->related_cpus) {
struct acpi_processor *pr = per_cpu(processors, cpu);
- if (pr)
- freq_qos_remove_request(&pr->thermal_req);
+ if (!pr)
+ continue;
+
+ freq_qos_remove_request(&pr->thermal_req);
+
+ thermal_cooling_device_update(pr->cdev);
}
}
#else /* ! CONFIG_CPU_FREQ */
DMI_MATCH(DMI_BOARD_NAME, "M17T"),
},
},
+ {
+ .ident = "MEDION S17413",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "MEDION"),
+ DMI_MATCH(DMI_BOARD_NAME, "M1xA"),
+ },
+ },
{ }
};
},
{
.callback = video_detect_force_native,
+ /* Acer Aspire 3830TG */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 3830TG"),
+ },
+ },
+ {
+ .callback = video_detect_force_native,
/* Acer Aspire 4810T */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
DMI_MATCH(DMI_PRODUCT_NAME, "Dell G15 5515"),
},
},
+ {
+ .callback = video_detect_force_native,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 15 3535"),
+ },
+ },
/*
* Desktops which falsely report a backlight and which our heuristics
#define ACPI_QUIRK_UART1_TTY_UART2_SKIP BIT(1)
#define ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY BIT(2)
#define ACPI_QUIRK_USE_ACPI_AC_AND_BATTERY BIT(3)
+#define ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS BIT(4)
static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = {
/*
*/
#if IS_ENABLED(CONFIG_X86_ANDROID_TABLETS)
{
+ /* Acer Iconia One 7 B1-750 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "VESPA2"),
+ },
+ .driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
+ ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY |
+ ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS),
+ },
+ {
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "ME176C"),
},
.driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
ACPI_QUIRK_UART1_TTY_UART2_SKIP |
- ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY),
+ ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY |
+ ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS),
+ },
+ {
+ /* Lenovo Yoga Book X90F/L */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "YETI-11"),
+ },
+ .driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
+ ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY |
+ ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS),
},
{
.matches = {
DMI_MATCH(DMI_PRODUCT_NAME, "TF103C"),
},
.driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
- ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY),
+ ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY |
+ ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS),
},
{
/* Lenovo Yoga Tablet 2 1050F/L */
DMI_MATCH(DMI_PRODUCT_NAME, "M890BAP"),
},
.driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
- ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY),
+ ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY |
+ ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS),
},
{
/* Whitelabel (sold as various brands) TM800A550L */
return 0;
}
EXPORT_SYMBOL_GPL(acpi_quirk_skip_serdev_enumeration);
+
+bool acpi_quirk_skip_gpio_event_handlers(void)
+{
+ const struct dmi_system_id *dmi_id;
+ long quirks;
+
+ dmi_id = dmi_first_match(acpi_quirk_skip_dmi_ids);
+ if (!dmi_id)
+ return false;
+
+ quirks = (unsigned long)dmi_id->driver_data;
+ return (quirks & ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS);
+}
+EXPORT_SYMBOL_GPL(acpi_quirk_skip_gpio_event_handlers);
#endif
/* Lists of PMIC ACPI HIDs with an (often better) native charger driver */
{
struct pi_adapter *pi = container_of(dev, struct pi_adapter, dev);
+ ida_free(&pata_parport_bus_dev_ids, dev->id);
kfree(pi);
}
if (bus_for_each_dev(&pata_parport_bus_type, NULL, &match, pi_find_dev))
return NULL;
+ id = ida_alloc(&pata_parport_bus_dev_ids, GFP_KERNEL);
+ if (id < 0)
+ return NULL;
+
pi = kzalloc(sizeof(struct pi_adapter), GFP_KERNEL);
- if (!pi)
+ if (!pi) {
+ ida_free(&pata_parport_bus_dev_ids, id);
return NULL;
+ }
/* set up pi->dev before pi_probe_unit() so it can use dev_printk() */
pi->dev.parent = &pata_parport_bus;
pi->dev.bus = &pata_parport_bus_type;
pi->dev.driver = &pr->driver;
pi->dev.release = pata_parport_dev_release;
- id = ida_alloc(&pata_parport_bus_dev_ids, GFP_KERNEL);
- if (id < 0)
- return NULL; /* pata_parport_dev_release will do kfree(pi) */
pi->dev.id = id;
dev_set_name(&pi->dev, "pata_parport.%u", pi->dev.id);
if (device_register(&pi->dev)) {
put_device(&pi->dev);
- goto out_ida_free;
+ /* pata_parport_dev_release will do ida_free(dev->id) and kfree(pi) */
+ return NULL;
}
pi->proto = pr;
pi->port = parport->base;
par_cb.private = pi;
- pi->pardev = parport_register_dev_model(parport, DRV_NAME, &par_cb,
- pi->dev.id);
+ pi->pardev = parport_register_dev_model(parport, DRV_NAME, &par_cb, id);
if (!pi->pardev)
goto out_module_put;
pi_connect(pi);
if (ata_host_activate(host, 0, NULL, 0, &pata_parport_sht))
- goto out_unreg_parport;
+ goto out_disconnect;
return pi;
-out_unreg_parport:
+out_disconnect:
pi_disconnect(pi);
+out_unreg_parport:
parport_unregister_device(pi->pardev);
if (pi->proto->release_proto)
pi->proto->release_proto(pi);
module_put(pi->proto->owner);
out_unreg_dev:
device_unregister(&pi->dev);
-out_ida_free:
- ida_free(&pata_parport_bus_dev_ids, pi->dev.id);
+ /* pata_parport_dev_release will do ida_free(dev->id) and kfree(pi) */
return NULL;
}
pi_disconnect(pi);
pi_release(pi);
device_unregister(dev);
- ida_free(&pata_parport_bus_dev_ids, dev->id);
- /* pata_parport_dev_release will do kfree(pi) */
+ /* pata_parport_dev_release will do ida_free(dev->id) and kfree(pi) */
}
static ssize_t delete_device_store(struct bus_type *bus, const char *buf,
}
pi_remove_one(dev);
+ put_device(dev);
mutex_unlock(&pi_mutex);
return count;
recycle_rx_pool_skb(card, &vc->rcv.rx_pool);
}
+ kfree(vc);
}
}
}
return 0;
}
+static void
+close_card_ubr0(struct idt77252_dev *card)
+{
+ struct vc_map *vc = card->vcs[0];
+
+ free_scq(card, vc->scq);
+ kfree(vc);
+}
+
static int
idt77252_dev_open(struct idt77252_dev *card)
{
struct idt77252_dev *card = dev->dev_data;
u32 conf;
+ close_card_ubr0(card);
close_card_oam(card);
conf = SAR_CFG_RXPTH | /* enable receive path */
static void loop_handle_cmd(struct loop_cmd *cmd)
{
+ struct cgroup_subsys_state *cmd_blkcg_css = cmd->blkcg_css;
+ struct cgroup_subsys_state *cmd_memcg_css = cmd->memcg_css;
struct request *rq = blk_mq_rq_from_pdu(cmd);
const bool write = op_is_write(req_op(rq));
struct loop_device *lo = rq->q->queuedata;
int ret = 0;
struct mem_cgroup *old_memcg = NULL;
+ const bool use_aio = cmd->use_aio;
if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY)) {
ret = -EIO;
goto failed;
}
- if (cmd->blkcg_css)
- kthread_associate_blkcg(cmd->blkcg_css);
- if (cmd->memcg_css)
+ if (cmd_blkcg_css)
+ kthread_associate_blkcg(cmd_blkcg_css);
+ if (cmd_memcg_css)
old_memcg = set_active_memcg(
- mem_cgroup_from_css(cmd->memcg_css));
+ mem_cgroup_from_css(cmd_memcg_css));
+ /*
+ * do_req_filebacked() may call blk_mq_complete_request() synchronously
+ * or asynchronously if using aio. Hence, do not touch 'cmd' after
+ * do_req_filebacked() has returned unless we are sure that 'cmd' has
+ * not yet been completed.
+ */
ret = do_req_filebacked(lo, rq);
- if (cmd->blkcg_css)
+ if (cmd_blkcg_css)
kthread_associate_blkcg(NULL);
- if (cmd->memcg_css) {
+ if (cmd_memcg_css) {
set_active_memcg(old_memcg);
- css_put(cmd->memcg_css);
+ css_put(cmd_memcg_css);
}
failed:
/* complete non-aio request */
- if (!cmd->use_aio || ret) {
+ if (!use_aio || ret) {
if (ret == -EOPNOTSUPP)
cmd->ret = ret;
else
case NULL_IRQ_SOFTIRQ:
switch (cmd->nq->dev->queue_mode) {
case NULL_Q_MQ:
- if (likely(!blk_should_fake_timeout(cmd->rq->q)))
- blk_mq_complete_request(cmd->rq);
+ blk_mq_complete_request(cmd->rq);
break;
case NULL_Q_BIO:
/*
}
static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
- const struct blk_mq_queue_data *bd)
+ const struct blk_mq_queue_data *bd)
{
- struct nullb_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
+ struct request *rq = bd->rq;
+ struct nullb_cmd *cmd = blk_mq_rq_to_pdu(rq);
struct nullb_queue *nq = hctx->driver_data;
- sector_t nr_sectors = blk_rq_sectors(bd->rq);
- sector_t sector = blk_rq_pos(bd->rq);
+ sector_t nr_sectors = blk_rq_sectors(rq);
+ sector_t sector = blk_rq_pos(rq);
const bool is_poll = hctx->type == HCTX_TYPE_POLL;
might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
cmd->timer.function = null_cmd_timer_expired;
}
- cmd->rq = bd->rq;
+ cmd->rq = rq;
cmd->error = BLK_STS_OK;
cmd->nq = nq;
- cmd->fake_timeout = should_timeout_request(bd->rq);
+ cmd->fake_timeout = should_timeout_request(rq) ||
+ blk_should_fake_timeout(rq->q);
- blk_mq_start_request(bd->rq);
+ blk_mq_start_request(rq);
- if (should_requeue_request(bd->rq)) {
+ if (should_requeue_request(rq)) {
/*
* Alternate between hitting the core BUSY path, and the
* driver driven requeue path
nq->requeue_selection++;
if (nq->requeue_selection & 1)
return BLK_STS_RESOURCE;
- else {
- blk_mq_requeue_request(bd->rq, true);
- return BLK_STS_OK;
- }
+ blk_mq_requeue_request(rq, true);
+ return BLK_STS_OK;
}
if (is_poll) {
spin_lock(&nq->poll_lock);
- list_add_tail(&bd->rq->queuelist, &nq->poll_list);
+ list_add_tail(&rq->queuelist, &nq->poll_list);
spin_unlock(&nq->poll_lock);
return BLK_STS_OK;
}
if (cmd->fake_timeout)
return BLK_STS_OK;
- return null_handle_cmd(cmd, sector, nr_sectors, req_op(bd->rq));
+ return null_handle_cmd(cmd, sector, nr_sectors, req_op(rq));
}
static void cleanup_queue(struct nullb_queue *nq)
print_version();
hp = mdesc_grab();
+ if (!hp)
+ return -ENODEV;
err = -ENODEV;
if ((vdev->dev_no << PARTITION_SHIFT) & ~(u64)MINORMASK) {
}
}
-static void ubq_complete_io_cmd(struct ublk_io *io, int res)
+static void ubq_complete_io_cmd(struct ublk_io *io, int res,
+ unsigned issue_flags)
{
/* mark this cmd owned by ublksrv */
io->flags |= UBLK_IO_FLAG_OWNED_BY_SRV;
io->flags &= ~UBLK_IO_FLAG_ACTIVE;
/* tell ublksrv one io request is coming */
- io_uring_cmd_done(io->cmd, res, 0);
+ io_uring_cmd_done(io->cmd, res, 0, issue_flags);
}
#define UBLK_REQUEUE_DELAY_MS 3
mod_delayed_work(system_wq, &ubq->dev->monitor_work, 0);
}
-static inline void __ublk_rq_task_work(struct request *req)
+static inline void __ublk_rq_task_work(struct request *req,
+ unsigned issue_flags)
{
struct ublk_queue *ubq = req->mq_hctx->driver_data;
int tag = req->tag;
pr_devel("%s: need get data. op %d, qid %d tag %d io_flags %x\n",
__func__, io->cmd->cmd_op, ubq->q_id,
req->tag, io->flags);
- ubq_complete_io_cmd(io, UBLK_IO_RES_NEED_GET_DATA);
+ ubq_complete_io_cmd(io, UBLK_IO_RES_NEED_GET_DATA, issue_flags);
return;
}
/*
mapped_bytes >> 9;
}
- ubq_complete_io_cmd(io, UBLK_IO_RES_OK);
+ ubq_complete_io_cmd(io, UBLK_IO_RES_OK, issue_flags);
}
-static inline void ublk_forward_io_cmds(struct ublk_queue *ubq)
+static inline void ublk_forward_io_cmds(struct ublk_queue *ubq,
+ unsigned issue_flags)
{
struct llist_node *io_cmds = llist_del_all(&ubq->io_cmds);
struct ublk_rq_data *data, *tmp;
io_cmds = llist_reverse_order(io_cmds);
llist_for_each_entry_safe(data, tmp, io_cmds, node)
- __ublk_rq_task_work(blk_mq_rq_from_pdu(data));
+ __ublk_rq_task_work(blk_mq_rq_from_pdu(data), issue_flags);
}
static inline void ublk_abort_io_cmds(struct ublk_queue *ubq)
__ublk_abort_rq(ubq, blk_mq_rq_from_pdu(data));
}
-static void ublk_rq_task_work_cb(struct io_uring_cmd *cmd)
+static void ublk_rq_task_work_cb(struct io_uring_cmd *cmd, unsigned issue_flags)
{
struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
struct ublk_queue *ubq = pdu->ubq;
- ublk_forward_io_cmds(ubq);
+ ublk_forward_io_cmds(ubq, issue_flags);
}
static void ublk_rq_task_work_fn(struct callback_head *work)
struct ublk_rq_data, work);
struct request *req = blk_mq_rq_from_pdu(data);
struct ublk_queue *ubq = req->mq_hctx->driver_data;
+ unsigned issue_flags = IO_URING_F_UNLOCKED;
- ublk_forward_io_cmds(ubq);
+ ublk_forward_io_cmds(ubq, issue_flags);
}
static void ublk_queue_cmd(struct ublk_queue *ubq, struct request *rq)
struct ublk_io *io = &ubq->ios[i];
if (io->flags & UBLK_IO_FLAG_ACTIVE)
- io_uring_cmd_done(io->cmd, UBLK_IO_RES_ABORT, 0);
+ io_uring_cmd_done(io->cmd, UBLK_IO_RES_ABORT, 0,
+ IO_URING_F_UNLOCKED);
}
/* all io commands are canceled */
return -EIOCBQUEUED;
out:
- io_uring_cmd_done(cmd, ret, 0);
+ io_uring_cmd_done(cmd, ret, 0, issue_flags);
pr_devel("%s: complete: cmd op %d, tag %d ret %x io_flags %x\n",
__func__, cmd_op, tag, ret, io->flags);
return -EIOCBQUEUED;
set_bit(GD_SUPPRESS_PART_SCAN, &disk->state);
get_device(&ub->cdev_dev);
+ ub->dev_info.state = UBLK_S_DEV_LIVE;
ret = add_disk(disk);
if (ret) {
/*
* Has to drop the reference since ->free_disk won't be
* called in case of add_disk failure.
*/
+ ub->dev_info.state = UBLK_S_DEV_DEAD;
ublk_put_device(ub);
goto out_put_disk;
}
set_bit(UB_STATE_USED, &ub->state);
- ub->dev_info.state = UBLK_S_DEV_LIVE;
out_put_disk:
if (ret)
put_disk(disk);
if (ub)
ublk_put_device(ub);
out:
- io_uring_cmd_done(cmd, ret, 0);
+ io_uring_cmd_done(cmd, ret, 0, issue_flags);
pr_devel("%s: cmd done ret %d cmd_op %x, dev id %d qid %d\n",
__func__, ret, cmd->cmd_op, header->dev_id, header->queue_id);
return -EIOCBQUEUED;
#define ECDSA_HEADER_LEN 320
#define BTINTEL_PPAG_NAME "PPAG"
-#define BTINTEL_PPAG_PREFIX "\\_SB_.PCI0.XHCI.RHUB"
+
+/* structure to store the PPAG data read from ACPI table */
+struct btintel_ppag {
+ u32 domain;
+ u32 mode;
+ acpi_status status;
+ struct hci_dev *hdev;
+};
#define CMD_WRITE_BOOT_PARAMS 0xfc0e
struct cmd_write_boot_params {
status = acpi_get_name(handle, ACPI_FULL_PATHNAME, &string);
if (ACPI_FAILURE(status)) {
- bt_dev_warn(hdev, "ACPI Failure: %s", acpi_format_exception(status));
+ bt_dev_warn(hdev, "PPAG-BT: ACPI Failure: %s", acpi_format_exception(status));
return status;
}
- if (strncmp(BTINTEL_PPAG_PREFIX, string.pointer,
- strlen(BTINTEL_PPAG_PREFIX))) {
+ len = strlen(string.pointer);
+ if (len < strlen(BTINTEL_PPAG_NAME)) {
kfree(string.pointer);
return AE_OK;
}
- len = strlen(string.pointer);
if (strncmp((char *)string.pointer + len - 4, BTINTEL_PPAG_NAME, 4)) {
kfree(string.pointer);
return AE_OK;
status = acpi_evaluate_object(handle, NULL, NULL, &buffer);
if (ACPI_FAILURE(status)) {
- bt_dev_warn(hdev, "ACPI Failure: %s", acpi_format_exception(status));
+ ppag->status = status;
+ bt_dev_warn(hdev, "PPAG-BT: ACPI Failure: %s", acpi_format_exception(status));
return status;
}
if (p->type != ACPI_TYPE_PACKAGE || p->package.count != 2) {
kfree(buffer.pointer);
- bt_dev_warn(hdev, "Invalid object type: %d or package count: %d",
+ bt_dev_warn(hdev, "PPAG-BT: Invalid object type: %d or package count: %d",
p->type, p->package.count);
+ ppag->status = AE_ERROR;
return AE_ERROR;
}
ppag->domain = (u32)p->package.elements[0].integer.value;
ppag->mode = (u32)p->package.elements[1].integer.value;
+ ppag->status = AE_OK;
kfree(buffer.pointer);
return AE_CTRL_TERMINATE;
}
static void btintel_set_ppag(struct hci_dev *hdev, struct intel_version_tlv *ver)
{
- acpi_status status;
struct btintel_ppag ppag;
struct sk_buff *skb;
struct btintel_loc_aware_reg ppag_cmd;
+ acpi_handle handle;
- /* PPAG is not supported if CRF is HrP2, Jfp2, JfP1 */
+ /* PPAG is not supported if CRF is HrP2, Jfp2, JfP1 */
switch (ver->cnvr_top & 0xFFF) {
case 0x504: /* Hrp2 */
case 0x202: /* Jfp2 */
return;
}
+ handle = ACPI_HANDLE(GET_HCIDEV_DEV(hdev));
+ if (!handle) {
+ bt_dev_info(hdev, "No support for BT device in ACPI firmware");
+ return;
+ }
+
memset(&ppag, 0, sizeof(ppag));
ppag.hdev = hdev;
- status = acpi_walk_namespace(ACPI_TYPE_ANY, ACPI_ROOT_OBJECT,
- ACPI_UINT32_MAX, NULL,
- btintel_ppag_callback, &ppag, NULL);
+ ppag.status = AE_NOT_FOUND;
+ acpi_walk_namespace(ACPI_TYPE_PACKAGE, handle, 1, NULL,
+ btintel_ppag_callback, &ppag, NULL);
- if (ACPI_FAILURE(status)) {
- /* Do not log warning message if ACPI entry is not found */
- if (status == AE_NOT_FOUND)
+ if (ACPI_FAILURE(ppag.status)) {
+ if (ppag.status == AE_NOT_FOUND) {
+ bt_dev_dbg(hdev, "PPAG-BT: ACPI entry not found");
return;
- bt_dev_warn(hdev, "PPAG: ACPI Failure: %s", acpi_format_exception(status));
+ }
return;
}
if (ppag.domain != 0x12) {
- bt_dev_warn(hdev, "PPAG-BT Domain disabled");
+ bt_dev_warn(hdev, "PPAG-BT: domain is not bluetooth");
return;
}
/* PPAG mode, BIT0 = 0 Disabled, BIT0 = 1 Enabled */
if (!(ppag.mode & BIT(0))) {
- bt_dev_dbg(hdev, "PPAG disabled");
+ bt_dev_dbg(hdev, "PPAG-BT: disabled");
return;
}
__u8 preset[8];
} __packed;
-/* structure to store the PPAG data read from ACPI table */
-struct btintel_ppag {
- u32 domain;
- u32 mode;
- struct hci_dev *hdev;
-};
-
struct btintel_loc_aware_reg {
__le32 mcc;
__le32 sel;
return 0;
}
+static int btqcomsmd_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
+{
+ int ret;
+
+ ret = qca_set_bdaddr_rome(hdev, bdaddr);
+ if (ret)
+ return ret;
+
+ /* The firmware stops responding for a while after setting the bdaddr,
+ * causing timeouts for subsequent commands. Sleep a bit to avoid this.
+ */
+ usleep_range(1000, 10000);
+ return 0;
+}
+
static int btqcomsmd_probe(struct platform_device *pdev)
{
struct btqcomsmd *btq;
hdev->close = btqcomsmd_close;
hdev->send = btqcomsmd_send;
hdev->setup = btqcomsmd_setup;
- hdev->set_bdaddr = qca_set_bdaddr_rome;
+ hdev->set_bdaddr = btqcomsmd_set_bdaddr;
ret = hci_register_dev(hdev);
if (ret < 0)
BT_DBG("func %p", func);
+ cancel_work_sync(&data->work);
if (!data)
return;
hci_skb_expect(skb) -= len;
if (skb->len == HCI_ACL_HDR_SIZE) {
- __u16 handle = __le16_to_cpu(hci_acl_hdr(skb)->handle);
__le16 dlen = hci_acl_hdr(skb)->dlen;
- __u8 type;
/* Complete ACL header */
hci_skb_expect(skb) = __le16_to_cpu(dlen);
- /* Detect if ISO packet has been sent over bulk */
- if (hci_conn_num(data->hdev, ISO_LINK)) {
- type = hci_conn_lookup_type(data->hdev,
- hci_handle(handle));
- if (type == ISO_LINK)
- hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
- }
-
if (skb_tailroom(skb) < hci_skb_expect(skb)) {
kfree_skb(skb);
skb = NULL;
const struct of_device_id *of_id = of_match_device(weim_id_table,
&pdev->dev);
const struct imx_weim_devtype *devtype = of_id->data;
+ int ret = 0, have_child = 0;
struct device_node *child;
- int ret, have_child = 0;
struct weim_priv *priv;
void __iomem *base;
u32 reg;
config COMMON_CLK_HI655X
tristate "Clock driver for Hi655x" if EXPERT
depends on (MFD_HI655X_PMIC || COMPILE_TEST)
- depends on REGMAP
+ select REGMAP
default MFD_HI655X_PMIC
help
This driver supports the hi655x PMIC clock. This
MODULE_AUTHOR("Eric Anholt <eric@anholt.net>");
MODULE_DESCRIPTION("BCM2835 auxiliary peripheral clock driver");
-MODULE_LICENSE("GPL");
MODULE_AUTHOR("Eric Anholt <eric@anholt.net>");
MODULE_DESCRIPTION("BCM2835 clock driver");
-MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jan Kotas <jank@cadence.com>");
MODULE_DESCRIPTION("Memory Mapped IO Fixed clock driver");
-MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Freescale SAI bitclock-as-a-clock driver");
MODULE_AUTHOR("Michael Walle <michael@walle.cc>");
-MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:fsl-sai-clk");
f = FIELD_GET(K210_PLL_CLKF, reg) + 1;
od = FIELD_GET(K210_PLL_CLKOD, reg) + 1;
- return (u64)parent_rate * f / (r * od);
+ return div_u64((u64)parent_rate * f, r * od);
}
static const struct clk_ops k210_pll_ops = {
module_exit(hi3559av100_crg_exit);
-MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("HiSilicon Hi3559AV100 CRG Driver");
MODULE_DESCRIPTION("Microchip PolarFire SoC Clock Conditioning Circuitry Driver");
MODULE_AUTHOR("Conor Dooley <conor.dooley@microchip.com>");
-MODULE_LICENSE("GPL");
struct psci_pd_provider *pd_provider, *it;
struct generic_pm_domain *genpd;
- list_for_each_entry_safe(pd_provider, it, &psci_pd_providers, link) {
+ list_for_each_entry_safe_reverse(pd_provider, it,
+ &psci_pd_providers, link) {
of_genpd_del_provider(pd_provider->node);
genpd = of_genpd_remove_last(pd_provider->node);
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/device.h>
-#include <linux/of.h>
#include "common.h"
/* Nothing to do. */
if (!phead) {
mutex_unlock(&scmi_requested_devices_mtx);
- return scmi_dev;
+ return NULL;
}
/* Walk the list of requested devices for protocol and create them */
hash_init(info->pending_xfers);
/* Allocate a bitmask sized to hold MSG_TOKEN_MAX tokens */
- info->xfer_alloc_table = devm_kcalloc(dev, BITS_TO_LONGS(MSG_TOKEN_MAX),
- sizeof(long), GFP_KERNEL);
+ info->xfer_alloc_table = devm_bitmap_zalloc(dev, MSG_TOKEN_MAX,
+ GFP_KERNEL);
if (!info->xfer_alloc_table)
return -ENOMEM;
struct scmi_handle *handle;
const struct scmi_desc *desc;
struct scmi_info *info;
+ bool coex = IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT_COEX);
struct device *dev = &pdev->dev;
struct device_node *child, *np = dev->of_node;
dev_warn(dev, "Failed to setup SCMI debugfs.\n");
if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) {
- bool coex =
- IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT_COEX);
-
ret = scmi_debugfs_raw_mode_setup(info);
if (!coex) {
if (ret)
goto clear_dev_req_notifier;
- /* Bail out anyway when coex enabled */
- return ret;
+ /* Bail out anyway when coex disabled. */
+ return 0;
}
/* Coex enabled, carry on in any case. */
ret = scmi_protocol_acquire(handle, SCMI_PROTOCOL_BASE);
if (ret) {
dev_err(dev, "unable to communicate with SCMI\n");
+ if (coex)
+ return 0;
goto notification_exit;
}
"#mbox-cells", idx, NULL);
}
+static int mailbox_chan_validate(struct device *cdev)
+{
+ int num_mb, num_sh, ret = 0;
+ struct device_node *np = cdev->of_node;
+
+ num_mb = of_count_phandle_with_args(np, "mboxes", "#mbox-cells");
+ num_sh = of_count_phandle_with_args(np, "shmem", NULL);
+ /* Bail out if mboxes and shmem descriptors are inconsistent */
+ if (num_mb <= 0 || num_sh > 2 || num_mb != num_sh) {
+ dev_warn(cdev, "Invalid channel descriptor for '%s'\n",
+ of_node_full_name(np));
+ return -EINVAL;
+ }
+
+ if (num_sh > 1) {
+ struct device_node *np_tx, *np_rx;
+
+ np_tx = of_parse_phandle(np, "shmem", 0);
+ np_rx = of_parse_phandle(np, "shmem", 1);
+ /* SCMI Tx and Rx shared mem areas have to be distinct */
+ if (!np_tx || !np_rx || np_tx == np_rx) {
+ dev_warn(cdev, "Invalid shmem descriptor for '%s'\n",
+ of_node_full_name(np));
+ ret = -EINVAL;
+ }
+
+ of_node_put(np_tx);
+ of_node_put(np_rx);
+ }
+
+ return ret;
+}
+
static int mailbox_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
bool tx)
{
resource_size_t size;
struct resource res;
+ ret = mailbox_chan_validate(cdev);
+ if (ret)
+ return ret;
+
smbox = devm_kzalloc(dev, sizeof(*smbox), GFP_KERNEL);
if (!smbox)
return -ENOMEM;
}
}
+static bool __initdata fb_probed;
+
+void __init efi_earlycon_reprobe(void)
+{
+ if (fb_probed)
+ setup_earlycon("efifb");
+}
+
static int __init efi_earlycon_setup(struct earlycon_device *device,
const char *opt)
{
u16 xres, yres;
u32 i;
- if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI)
+ fb_wb = opt && !strcmp(opt, "ram");
+
+ if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI) {
+ fb_probed = true;
return -ENODEV;
+ }
fb_base = screen_info.lfb_base;
if (screen_info.capabilities & VIDEO_CAPABILITY_64BIT_BASE)
fb_base |= (u64)screen_info.ext_lfb_base << 32;
- fb_wb = opt && !strcmp(opt, "ram");
-
si = &screen_info;
xres = si->lfb_width;
yres = si->lfb_height;
if (memblock_is_map_memory(screen_info.lfb_base))
memblock_mark_nomap(screen_info.lfb_base,
screen_info.lfb_size);
+
+ if (IS_ENABLED(CONFIG_EFI_EARLYCON))
+ efi_earlycon_reprobe();
}
}
$(obj)/vmlinuz.efi: $(obj)/vmlinuz.efi.elf FORCE
$(call if_changed,objcopy)
-targets += zboot-header.o vmlinuz.o vmlinuz.efi.elf vmlinuz.efi
+targets += zboot-header.o vmlinuz vmlinuz.o vmlinuz.efi.elf vmlinuz.efi
}
}
- if (image->image_base != _text)
+ if (image->image_base != _text) {
efi_err("FIRMWARE BUG: efi_loaded_image_t::image_base has bogus value\n");
+ image->image_base = _text;
+ }
if (!IS_ALIGNED((u64)_text, SEGMENT_ALIGN))
efi_err("FIRMWARE BUG: kernel image not aligned on %dk boundary\n",
*image_addr = *reserve_addr;
memcpy((void *)*image_addr, _text, kernel_size);
caches_clean_inval_pou(*image_addr, *image_addr + kernel_codesize);
+ efi_remap_image(*image_addr, *reserve_size, kernel_codesize);
return EFI_SUCCESS;
}
static bool system_needs_vamap(void)
{
- const u8 *type1_family = efi_get_smbios_string(1, family);
+ const struct efi_smbios_type4_record *record;
+ const u32 __aligned(1) *socid;
+ const u8 *version;
/*
* Ampere eMAG, Altra, and Altra Max machines crash in SetTime() if
- * SetVirtualAddressMap() has not been called prior.
+ * SetVirtualAddressMap() has not been called prior. Most Altra systems
+ * can be identified by the SMCCC soc ID, which is conveniently exposed
+ * via the type 4 SMBIOS records. Otherwise, test the processor version
+ * field. eMAG systems all appear to have the processor version field
+ * set to "eMAG".
*/
- if (!type1_family || (
- strcmp(type1_family, "eMAG") &&
- strcmp(type1_family, "Altra") &&
- strcmp(type1_family, "Altra Max")))
+ record = (struct efi_smbios_type4_record *)efi_get_smbios_record(4);
+ if (!record)
return false;
- efi_warn("Working around broken SetVirtualAddressMap()\n");
- return true;
+ socid = (u32 *)record->processor_id;
+ switch (*socid & 0xffff000f) {
+ static char const altra[] = "Ampere(TM) Altra(TM) Processor";
+ static char const emag[] = "eMAG";
+
+ default:
+ version = efi_get_smbios_string(&record->header, 4,
+ processor_version);
+ if (!version || (strncmp(version, altra, sizeof(altra) - 1) &&
+ strncmp(version, emag, sizeof(emag) - 1)))
+ break;
+
+ fallthrough;
+
+ case 0x0a160001: // Altra
+ case 0x0a160002: // Altra Max
+ efi_warn("Working around broken SetVirtualAddressMap()\n");
+ return true;
+ }
+
+ return false;
}
efi_status_t check_platform_features(void)
#include "efistub.h"
+static unsigned long screen_info_offset;
+
+struct screen_info *alloc_screen_info(void)
+{
+ if (IS_ENABLED(CONFIG_ARM))
+ return __alloc_screen_info();
+ return (void *)&screen_info + screen_info_offset;
+}
+
/*
* EFI entry point for the generic EFI stub used by ARM, arm64, RISC-V and
* LoongArch. This is the entrypoint that is described in the PE/COFF header
return status;
}
+ screen_info_offset = image_addr - (unsigned long)image->image_base;
+
status = efi_stub_common(handle, image, image_addr, cmdline_ptr);
efi_free(image_size, image_addr);
static u64 virtmap_base = EFI_RT_VIRTUAL_BASE;
static bool flat_va_mapping = (EFI_RT_VIRTUAL_OFFSET != 0);
-struct screen_info * __weak alloc_screen_info(void)
-{
- return &screen_info;
-}
-
void __weak free_screen_info(struct screen_info *si)
{
}
void efi_retrieve_tpm2_eventlog(void);
struct screen_info *alloc_screen_info(void);
+struct screen_info *__alloc_screen_info(void);
void free_screen_info(struct screen_info *si);
void efi_cache_sync_image(unsigned long image_base,
u16 handle;
};
+const struct efi_smbios_record *efi_get_smbios_record(u8 type);
+
struct efi_smbios_type1_record {
struct efi_smbios_record header;
u8 family;
};
-#define efi_get_smbios_string(__type, __name) ({ \
- int size = sizeof(struct efi_smbios_type ## __type ## _record); \
+struct efi_smbios_type4_record {
+ struct efi_smbios_record header;
+
+ u8 socket;
+ u8 processor_type;
+ u8 processor_family;
+ u8 processor_manufacturer;
+ u8 processor_id[8];
+ u8 processor_version;
+ u8 voltage;
+ u16 external_clock;
+ u16 max_speed;
+ u16 current_speed;
+ u8 status;
+ u8 processor_upgrade;
+ u16 l1_cache_handle;
+ u16 l2_cache_handle;
+ u16 l3_cache_handle;
+ u8 serial_number;
+ u8 asset_tag;
+ u8 part_number;
+ u8 core_count;
+ u8 enabled_core_count;
+ u8 thread_count;
+ u16 processor_characteristics;
+ u16 processor_family2;
+ u16 core_count2;
+ u16 enabled_core_count2;
+ u16 thread_count2;
+ u16 thread_enabled;
+};
+
+#define efi_get_smbios_string(__record, __type, __name) ({ \
int off = offsetof(struct efi_smbios_type ## __type ## _record, \
__name); \
- __efi_get_smbios_string(__type, off, size); \
+ __efi_get_smbios_string((__record), __type, off); \
})
-const u8 *__efi_get_smbios_string(u8 type, int offset, int recsize);
+const u8 *__efi_get_smbios_string(const struct efi_smbios_record *record,
+ u8 type, int offset);
void efi_remap_image(unsigned long image_base, unsigned alloc_size,
unsigned long code_size);
* to calculate the randomly chosen address, and allocate it directly
* using EFI_ALLOCATE_ADDRESS.
*/
+ status = EFI_OUT_OF_RESOURCES;
for (map_offset = 0; map_offset < map->map_size; map_offset += map->desc_size) {
efi_memory_desc_t *md = (void *)map->map + map_offset;
efi_physical_addr_t target;
* early, but it only works if the EFI stub is part of the core kernel image
* itself. The zboot decompressor can only use the configuration table
* approach.
- *
- * In order to support both methods from the same build of the EFI stub
- * library, provide this dummy global definition of struct screen_info. If it
- * is required to satisfy a link dependency, it means we need to override the
- * __weak alloc and free methods with the ones below, and those will be pulled
- * in as well.
*/
-struct screen_info screen_info;
static efi_guid_t screen_info_guid = LINUX_EFI_SCREEN_INFO_TABLE_GUID;
-struct screen_info *alloc_screen_info(void)
+struct screen_info *__alloc_screen_info(void)
{
struct screen_info *si;
efi_status_t status;
u8 minor_version;
};
-const u8 *__efi_get_smbios_string(u8 type, int offset, int recsize)
+const struct efi_smbios_record *efi_get_smbios_record(u8 type)
{
struct efi_smbios_record *record;
efi_smbios_protocol_t *smbios;
efi_status_t status;
u16 handle = 0xfffe;
- const u8 *strtable;
status = efi_bs_call(locate_protocol, &EFI_SMBIOS_PROTOCOL_GUID, NULL,
(void **)&smbios) ?:
efi_call_proto(smbios, get_next, &handle, &type, &record, NULL);
if (status != EFI_SUCCESS)
return NULL;
+ return record;
+}
+
+const u8 *__efi_get_smbios_string(const struct efi_smbios_record *record,
+ u8 type, int offset)
+{
+ const u8 *strtable;
+
+ if (!record)
+ return NULL;
- strtable = (u8 *)record + recsize;
+ strtable = (u8 *)record + record->length;
for (int i = 1; i < ((u8 *)record)[offset]; i++) {
int len = strlen(strtable);
.long .Lefi_header_end - .Ldoshdr
.long 0
.short IMAGE_SUBSYSTEM_EFI_APPLICATION
- .short 0
+ .short IMAGE_DLL_CHARACTERISTICS_NX_COMPAT
#ifdef CONFIG_64BIT
.quad 0, 0, 0, 0
#else
// executable code loaded into memory to be safe for execution.
}
+struct screen_info *alloc_screen_info(void)
+{
+ return __alloc_screen_info();
+}
+
asmlinkage efi_status_t __efiapi
efi_zboot_entry(efi_handle_t handle, efi_system_table_t *systab)
{
"IdeaPad Duet 3 10IGL5"),
},
},
+ {
+ /* Lenovo Yoga Book X91F / X91L */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ /* Non exact match to match F + L versions */
+ DMI_MATCH(DMI_PRODUCT_NAME, "Lenovo YB1-X91"),
+ },
+ },
{},
};
#ifdef CONFIG_EFI
static struct fwnode_handle efifb_fwnode;
-__init void sysfb_apply_efi_quirks(struct platform_device *pd)
+__init void sysfb_apply_efi_quirks(void)
{
if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI ||
!(screen_info.capabilities & VIDEO_CAPABILITY_SKIP_QUIRKS))
screen_info.lfb_height = temp;
screen_info.lfb_linelength = 4 * screen_info.lfb_width;
}
+}
+__init void sysfb_set_efifb_fwnode(struct platform_device *pd)
+{
if (screen_info.orig_video_isVGA == VIDEO_TYPE_EFI && IS_ENABLED(CONFIG_PCI)) {
fwnode_init(&efifb_fwnode, &efifb_fwnode_ops);
pd->dev.fwnode = &efifb_fwnode;
init_completion(&__scm->waitq_comp);
- irq = platform_get_irq(pdev, 0);
+ irq = platform_get_irq_optional(pdev, 0);
if (irq < 0) {
if (irq != -ENXIO)
return irq;
if (disabled)
goto unlock_mutex;
+ sysfb_apply_efi_quirks();
+
/* try to create a simple-framebuffer device */
compatible = sysfb_parse_mode(si, &mode);
if (compatible) {
goto unlock_mutex;
}
- sysfb_apply_efi_quirks(pd);
+ sysfb_set_efifb_fwnode(pd);
ret = platform_device_add_data(pd, si, sizeof(*si));
if (ret)
if (!pd)
return ERR_PTR(-ENOMEM);
- sysfb_apply_efi_quirks(pd);
+ sysfb_set_efifb_fwnode(pd);
ret = platform_device_add_resources(pd, &res, 1);
if (ret)
}
/* Add new entry if not present */
- feature_data = kmalloc(sizeof(*feature_data), GFP_KERNEL);
+ feature_data = kmalloc(sizeof(*feature_data), GFP_ATOMIC);
if (!feature_data)
return -ENOMEM;
if (ACPI_FAILURE(status))
return;
+ if (acpi_quirk_skip_gpio_event_handlers())
+ return;
+
acpi_walk_resources(handle, METHOD_NAME__AEI,
acpi_gpiochip_alloc_event, acpi_gpio);
int amdgpu_device_pci_reset(struct amdgpu_device *adev);
bool amdgpu_device_need_post(struct amdgpu_device *adev);
bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev);
+bool amdgpu_device_aspm_support_quirk(void);
void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
u64 num_vis_bytes);
int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev);
void amdgpu_acpi_get_backlight_caps(struct amdgpu_dm_backlight_caps *caps);
+bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev);
void amdgpu_acpi_detect(void);
#else
static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; }
static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { }
+static inline bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev) { return false; }
static inline void amdgpu_acpi_detect(void) { }
static inline bool amdgpu_acpi_is_power_shift_control_supported(void) { return false; }
static inline int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev,
#if defined(CONFIG_ACPI) && defined(CONFIG_SUSPEND)
bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev);
-bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev);
bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev);
#else
static inline bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { return false; }
-static inline bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev) { return false; }
static inline bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev) { return false; }
#endif
return true;
}
+
+/**
+ * amdgpu_acpi_should_gpu_reset
+ *
+ * @adev: amdgpu_device_pointer
+ *
+ * returns true if should reset GPU, false if not
+ */
+bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev)
+{
+ if (adev->flags & AMD_IS_APU)
+ return false;
+
+ if (amdgpu_sriov_vf(adev))
+ return false;
+
+#if IS_ENABLED(CONFIG_SUSPEND)
+ return pm_suspend_target_state != PM_SUSPEND_TO_IDLE;
+#else
+ return true;
+#endif
+}
+
/*
* amdgpu_acpi_detect - detect ACPI ATIF/ATCS methods
*
}
/**
- * amdgpu_acpi_should_gpu_reset
- *
- * @adev: amdgpu_device_pointer
- *
- * returns true if should reset GPU, false if not
- */
-bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev)
-{
- if (adev->flags & AMD_IS_APU)
- return false;
-
- if (amdgpu_sriov_vf(adev))
- return false;
-
- return pm_suspend_target_state != PM_SUSPEND_TO_IDLE;
-}
-
-/**
* amdgpu_acpi_is_s0ix_active
*
* @adev: amdgpu_device_pointer
#include <drm/drm_drv.h>
+#if IS_ENABLED(CONFIG_X86)
+#include <asm/intel-family.h>
+#endif
+
MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
return pcie_aspm_enabled(adev->pdev);
}
+bool amdgpu_device_aspm_support_quirk(void)
+{
+#if IS_ENABLED(CONFIG_X86)
+ struct cpuinfo_x86 *c = &cpu_data(0);
+
+ return !(c->x86 == 6 && c->x86_model == INTEL_FAM6_ALDERLAKE);
+#else
+ return true;
+#endif
+}
+
/* if we get transitioned to only one device, take VGA back */
/**
* amdgpu_device_vga_set_decode - enable/disable vga decode
if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D3))
DRM_WARN("smart shift update failed\n");
- drm_kms_helper_poll_disable(dev);
-
if (fbcon)
drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true);
if (fbcon)
drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, false);
- drm_kms_helper_poll_enable(dev);
-
amdgpu_ras_resume(adev);
if (adev->mode_info.num_crtc) {
struct drm_connector_list_iter iter;
int r;
+ drm_kms_helper_poll_disable(dev);
+
/* turn off display hw */
drm_modeset_lock_all(dev);
drm_connector_list_iter_begin(dev, &iter);
drm_modeset_unlock_all(dev);
+ drm_kms_helper_poll_enable(dev);
+
return 0;
}
adev->in_s4 = false;
if (r)
return r;
- return amdgpu_asic_reset(adev);
+
+ if (amdgpu_acpi_should_gpu_reset(adev))
+ return amdgpu_asic_reset(adev);
+ return 0;
}
static int amdgpu_pmops_thaw(struct device *dev)
ptr = &ring->fence_drv.fences[i];
old = rcu_dereference_protected(*ptr, 1);
if (old && old->ops == &amdgpu_job_fence_ops) {
+ struct amdgpu_job *job;
+
+ /* For non-scheduler bad job, i.e. failed ib test, we need to signal
+ * it right here or we won't be able to track them in fence_drv
+ * and they will remain unsignaled during sa_bo free.
+ */
+ job = container_of(old, struct amdgpu_job, hw_fence);
+ if (!job->base.s_fence && !dma_fence_is_signaled(old))
+ dma_fence_signal(old);
RCU_INIT_POINTER(*ptr, NULL);
dma_fence_put(old);
}
#include <linux/firmware.h>
#include <linux/module.h>
+#include <linux/dmi.h>
#include <linux/pci.h>
#include <linux/debugfs.h>
#include <drm/drm_drv.h>
(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
adev->vcn.indirect_sram = true;
+ /*
+ * Some Steam Deck's BIOS versions are incompatible with the
+ * indirect SRAM mode, leading to amdgpu being unable to get
+ * properly probed (and even potentially crashing the kernel).
+ * Hence, check for these versions here - notice this is
+ * restricted to Vangogh (Deck's APU).
+ */
+ if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(3, 0, 2)) {
+ const char *bios_ver = dmi_get_system_info(DMI_BIOS_VERSION);
+
+ if (bios_ver && (!strncmp("F7A0113", bios_ver, 7) ||
+ !strncmp("F7A0114", bios_ver, 7))) {
+ adev->vcn.indirect_sram = false;
+ dev_info(adev->dev,
+ "Steam Deck quirk: indirect SRAM disabled on BIOS %s\n", bios_ver);
+ }
+ }
+
hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version);
AMDGIM_FEATURE_PP_ONE_VF = (1 << 4),
/* Indirect Reg Access enabled */
AMDGIM_FEATURE_INDIRECT_REG_ACCESS = (1 << 5),
+ /* AV1 Support MODE*/
+ AMDGIM_FEATURE_AV1_SUPPORT = (1 << 6),
};
enum AMDGIM_REG_ACCESS_FLAG {
((!amdgpu_in_reset(adev)) && adev->virt.tdr_debug)
#define amdgpu_sriov_is_normal(adev) \
((!amdgpu_in_reset(adev)) && (!adev->virt.tdr_debug))
+#define amdgpu_sriov_is_av1_support(adev) \
+ ((adev)->virt.gim_feature & AMDGIM_FEATURE_AV1_SUPPORT)
bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev);
void amdgpu_virt_init_setting(struct amdgpu_device *adev);
void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
uint32_t mm_bw_management : 1;
uint32_t pp_one_vf_mode : 1;
uint32_t reg_indirect_acc : 1;
- uint32_t reserved : 26;
+ uint32_t av1_support : 1;
+ uint32_t reserved : 25;
} flags;
uint32_t all;
};
break;
}
+ /* Enable CG flag in one VF mode for enabling RLC safe mode enter/exit */
+ if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(11, 0, 3) &&
+ amdgpu_sriov_is_pp_one_vf(adev))
+ adev->cg_flags = AMD_CG_SUPPORT_GFX_CGCG;
+
/* EOP Event */
r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP,
GFX_11_0_0__SRCID__CP_EOP_INTERRUPT,
return false;
}
+static int gfx_v11_0_post_soft_reset(void *handle)
+{
+ /**
+ * GFX soft reset will impact MES, need resume MES when do GFX soft reset
+ */
+ return amdgpu_mes_resume((struct amdgpu_device *)handle);
+}
+
static uint64_t gfx_v11_0_get_gpu_clock_counter(struct amdgpu_device *adev)
{
uint64_t clock;
.wait_for_idle = gfx_v11_0_wait_for_idle,
.soft_reset = gfx_v11_0_soft_reset,
.check_soft_reset = gfx_v11_0_check_soft_reset,
+ .post_soft_reset = gfx_v11_0_post_soft_reset,
.set_clockgating_state = gfx_v11_0_set_clockgating_state,
.set_powergating_state = gfx_v11_0_set_powergating_state,
.get_clockgating_state = gfx_v11_0_get_clockgating_state,
static void nv_program_aspm(struct amdgpu_device *adev)
{
- if (!amdgpu_device_should_use_aspm(adev))
+ if (!amdgpu_device_should_use_aspm(adev) || !amdgpu_device_aspm_support_quirk())
return;
if (!(adev->flags & AMD_IS_APU) &&
amdgpu_virt_update_sriov_video_codec(adev,
sriov_sc_video_codecs_encode_array,
ARRAY_SIZE(sriov_sc_video_codecs_encode_array),
- sriov_sc_video_codecs_decode_array_vcn1,
- ARRAY_SIZE(sriov_sc_video_codecs_decode_array_vcn1));
+ sriov_sc_video_codecs_decode_array_vcn0,
+ ARRAY_SIZE(sriov_sc_video_codecs_decode_array_vcn0));
}
}
.codec_array = vcn_4_0_0_video_codecs_decode_array_vcn1,
};
+/* SRIOV SOC21, not const since data is controlled by host */
+static struct amdgpu_video_codec_info sriov_vcn_4_0_0_video_codecs_encode_array_vcn0[] = {
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
+};
+
+static struct amdgpu_video_codec_info sriov_vcn_4_0_0_video_codecs_encode_array_vcn1[] = {
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
+};
+
+static struct amdgpu_video_codecs sriov_vcn_4_0_0_video_codecs_encode_vcn0 = {
+ .codec_count = ARRAY_SIZE(sriov_vcn_4_0_0_video_codecs_encode_array_vcn0),
+ .codec_array = sriov_vcn_4_0_0_video_codecs_encode_array_vcn0,
+};
+
+static struct amdgpu_video_codecs sriov_vcn_4_0_0_video_codecs_encode_vcn1 = {
+ .codec_count = ARRAY_SIZE(sriov_vcn_4_0_0_video_codecs_encode_array_vcn1),
+ .codec_array = sriov_vcn_4_0_0_video_codecs_encode_array_vcn1,
+};
+
+static struct amdgpu_video_codec_info sriov_vcn_4_0_0_video_codecs_decode_array_vcn0[] = {
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
+};
+
+static struct amdgpu_video_codec_info sriov_vcn_4_0_0_video_codecs_decode_array_vcn1[] = {
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
+};
+
+static struct amdgpu_video_codecs sriov_vcn_4_0_0_video_codecs_decode_vcn0 = {
+ .codec_count = ARRAY_SIZE(sriov_vcn_4_0_0_video_codecs_decode_array_vcn0),
+ .codec_array = sriov_vcn_4_0_0_video_codecs_decode_array_vcn0,
+};
+
+static struct amdgpu_video_codecs sriov_vcn_4_0_0_video_codecs_decode_vcn1 = {
+ .codec_count = ARRAY_SIZE(sriov_vcn_4_0_0_video_codecs_decode_array_vcn1),
+ .codec_array = sriov_vcn_4_0_0_video_codecs_decode_array_vcn1,
+};
+
static int soc21_query_video_codecs(struct amdgpu_device *adev, bool encode,
const struct amdgpu_video_codecs **codecs)
{
case IP_VERSION(4, 0, 0):
case IP_VERSION(4, 0, 2):
case IP_VERSION(4, 0, 4):
- if (adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0) {
- if (encode)
- *codecs = &vcn_4_0_0_video_codecs_encode_vcn1;
- else
- *codecs = &vcn_4_0_0_video_codecs_decode_vcn1;
+ if (amdgpu_sriov_vf(adev)) {
+ if ((adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0) ||
+ !amdgpu_sriov_is_av1_support(adev)) {
+ if (encode)
+ *codecs = &sriov_vcn_4_0_0_video_codecs_encode_vcn1;
+ else
+ *codecs = &sriov_vcn_4_0_0_video_codecs_decode_vcn1;
+ } else {
+ if (encode)
+ *codecs = &sriov_vcn_4_0_0_video_codecs_encode_vcn0;
+ else
+ *codecs = &sriov_vcn_4_0_0_video_codecs_decode_vcn0;
+ }
} else {
- if (encode)
- *codecs = &vcn_4_0_0_video_codecs_encode_vcn0;
- else
- *codecs = &vcn_4_0_0_video_codecs_decode_vcn0;
+ if ((adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0)) {
+ if (encode)
+ *codecs = &vcn_4_0_0_video_codecs_encode_vcn1;
+ else
+ *codecs = &vcn_4_0_0_video_codecs_decode_vcn1;
+ } else {
+ if (encode)
+ *codecs = &vcn_4_0_0_video_codecs_encode_vcn0;
+ else
+ *codecs = &vcn_4_0_0_video_codecs_decode_vcn0;
+ }
}
return 0;
default:
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (amdgpu_sriov_vf(adev))
+ if (amdgpu_sriov_vf(adev)) {
xgpu_nv_mailbox_get_irq(adev);
+ if ((adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0) ||
+ !amdgpu_sriov_is_av1_support(adev)) {
+ amdgpu_virt_update_sriov_video_codec(adev,
+ sriov_vcn_4_0_0_video_codecs_encode_array_vcn1,
+ ARRAY_SIZE(sriov_vcn_4_0_0_video_codecs_encode_array_vcn1),
+ sriov_vcn_4_0_0_video_codecs_decode_array_vcn1,
+ ARRAY_SIZE(sriov_vcn_4_0_0_video_codecs_decode_array_vcn1));
+ } else {
+ amdgpu_virt_update_sriov_video_codec(adev,
+ sriov_vcn_4_0_0_video_codecs_encode_array_vcn0,
+ ARRAY_SIZE(sriov_vcn_4_0_0_video_codecs_encode_array_vcn0),
+ sriov_vcn_4_0_0_video_codecs_decode_array_vcn0,
+ ARRAY_SIZE(sriov_vcn_4_0_0_video_codecs_decode_array_vcn0));
+ }
+ }
return 0;
}
#include "mxgpu_vi.h"
#include "amdgpu_dm.h"
-#if IS_ENABLED(CONFIG_X86)
-#include <asm/intel-family.h>
-#endif
-
#define ixPCIE_LC_L1_PM_SUBSTATE 0x100100C6
#define PCIE_LC_L1_PM_SUBSTATE__LC_L1_SUBSTATES_OVERRIDE_EN_MASK 0x00000001L
#define PCIE_LC_L1_PM_SUBSTATE__LC_PCI_PM_L1_2_OVERRIDE_MASK 0x00000002L
WREG32_PCIE(ixPCIE_LC_CNTL, data);
}
-static bool aspm_support_quirk_check(void)
-{
-#if IS_ENABLED(CONFIG_X86)
- struct cpuinfo_x86 *c = &cpu_data(0);
-
- return !(c->x86 == 6 && c->x86_model == INTEL_FAM6_ALDERLAKE);
-#else
- return true;
-#endif
-}
-
static void vi_program_aspm(struct amdgpu_device *adev)
{
u32 data, data1, orig;
bool bL1SS = false;
bool bClkReqSupport = true;
- if (!amdgpu_device_should_use_aspm(adev) || !aspm_support_quirk_check())
+ if (!amdgpu_device_should_use_aspm(adev) || !amdgpu_device_aspm_support_quirk())
return;
if (adev->flags & AMD_IS_APU ||
args->n_success = i+1;
}
- mutex_unlock(&p->mutex);
-
err = amdgpu_amdkfd_gpuvm_sync_memory(dev->adev, (struct kgd_mem *) mem, true);
if (err) {
pr_debug("Sync memory failed, wait interrupted by user signal\n");
goto sync_memory_failed;
}
+ mutex_unlock(&p->mutex);
+
/* Flush TLBs after waiting for the page table updates to complete */
for (i = 0; i < args->n_devices; i++) {
peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]);
bind_process_to_device_failed:
get_mem_obj_from_handle_failed:
map_memory_to_gpu_failed:
+sync_memory_failed:
mutex_unlock(&p->mutex);
copy_from_user_failed:
-sync_memory_failed:
kfree(devices_arr);
return err;
void *mem;
long err = 0;
uint32_t *devices_arr = NULL, i;
+ bool flush_tlb;
if (!args->n_devices) {
pr_debug("Device IDs array empty\n");
}
args->n_success = i+1;
}
- mutex_unlock(&p->mutex);
- if (kfd_flush_tlb_after_unmap(pdd->dev)) {
+ flush_tlb = kfd_flush_tlb_after_unmap(pdd->dev);
+ if (flush_tlb) {
err = amdgpu_amdkfd_gpuvm_sync_memory(pdd->dev->adev,
(struct kgd_mem *) mem, true);
if (err) {
pr_debug("Sync memory failed, wait interrupted by user signal\n");
goto sync_memory_failed;
}
+ }
+ mutex_unlock(&p->mutex);
+ if (flush_tlb) {
/* Flush TLBs after waiting for the page table updates to complete */
for (i = 0; i < args->n_devices; i++) {
peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]);
bind_process_to_device_failed:
get_mem_obj_from_handle_failed:
unmap_memory_from_gpu_failed:
+sync_memory_failed:
mutex_unlock(&p->mutex);
copy_from_user_failed:
-sync_memory_failed:
kfree(devices_arr);
return err;
}
unsigned int chunk_size);
static void kfd_gtt_sa_fini(struct kfd_dev *kfd);
+static int kfd_resume_iommu(struct kfd_dev *kfd);
static int kfd_resume(struct kfd_dev *kfd);
static void kfd_device_info_set_sdma_info(struct kfd_dev *kfd)
svm_migrate_init(kfd->adev);
- if (kgd2kfd_resume_iommu(kfd))
+ if (kfd_resume_iommu(kfd))
goto device_iommu_error;
if (kfd_resume(kfd))
int kgd2kfd_resume_iommu(struct kfd_dev *kfd)
{
+ if (!kfd->init_complete)
+ return 0;
+
+ return kfd_resume_iommu(kfd);
+}
+
+static int kfd_resume_iommu(struct kfd_dev *kfd)
+{
int err = 0;
err = kfd_iommu_resume(kfd);
static int
svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
struct migrate_vma *migrate, struct dma_fence **mfence,
- dma_addr_t *scratch)
+ dma_addr_t *scratch, uint64_t ttm_res_offset)
{
uint64_t npages = migrate->npages;
struct device *dev = adev->dev;
uint64_t i, j;
int r;
- pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms, prange->start,
- prange->last);
+ pr_debug("svms 0x%p [0x%lx 0x%lx 0x%llx]\n", prange->svms, prange->start,
+ prange->last, ttm_res_offset);
src = scratch;
dst = (uint64_t *)(scratch + npages);
- r = svm_range_vram_node_new(adev, prange, true);
- if (r) {
- dev_dbg(adev->dev, "fail %d to alloc vram\n", r);
- goto out;
- }
-
- amdgpu_res_first(prange->ttm_res, prange->offset << PAGE_SHIFT,
+ amdgpu_res_first(prange->ttm_res, ttm_res_offset,
npages << PAGE_SHIFT, &cursor);
for (i = j = 0; i < npages; i++) {
struct page *spage;
migrate->dst[i + 3] = 0;
}
#endif
-out:
+
return r;
}
static long
svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
struct vm_area_struct *vma, uint64_t start,
- uint64_t end, uint32_t trigger)
+ uint64_t end, uint32_t trigger, uint64_t ttm_res_offset)
{
struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms);
uint64_t npages = (end - start) >> PAGE_SHIFT;
else
pr_debug("0x%lx pages migrated\n", cpages);
- r = svm_migrate_copy_to_vram(adev, prange, &migrate, &mfence, scratch);
+ r = svm_migrate_copy_to_vram(adev, prange, &migrate, &mfence, scratch, ttm_res_offset);
migrate_vma_pages(&migrate);
pr_debug("successful/cpages/npages 0x%lx/0x%lx/0x%lx\n",
unsigned long addr, start, end;
struct vm_area_struct *vma;
struct amdgpu_device *adev;
+ uint64_t ttm_res_offset;
unsigned long cpages = 0;
long r = 0;
start = prange->start << PAGE_SHIFT;
end = (prange->last + 1) << PAGE_SHIFT;
+ r = svm_range_vram_node_new(adev, prange, true);
+ if (r) {
+ dev_dbg(adev->dev, "fail %ld to alloc vram\n", r);
+ return r;
+ }
+ ttm_res_offset = prange->offset << PAGE_SHIFT;
+
for (addr = start; addr < end;) {
unsigned long next;
break;
next = min(vma->vm_end, end);
- r = svm_migrate_vma_to_vram(adev, prange, vma, addr, next, trigger);
+ r = svm_migrate_vma_to_vram(adev, prange, vma, addr, next, trigger, ttm_res_offset);
if (r < 0) {
pr_debug("failed %ld to migrate\n", r);
break;
} else {
cpages += r;
}
+ ttm_res_offset += next - addr;
addr = next;
}
if (cpages)
prange->actual_loc = best_loc;
+ else
+ svm_range_vram_node_free(prange);
return r < 0 ? r : 0;
}
static void kfd_exit(void)
{
+ kfd_cleanup_processes();
kfd_debugfs_fini();
kfd_process_destroy_wq();
kfd_procfs_shutdown();
int kfd_process_create_wq(void);
void kfd_process_destroy_wq(void);
+void kfd_cleanup_processes(void);
struct kfd_process *kfd_create_process(struct file *filep);
struct kfd_process *kfd_get_process(const struct task_struct *task);
struct kfd_process *kfd_lookup_process_by_pasid(u32 pasid);
kfd_unref_process(container_of(mn, struct kfd_process, mmu_notifier));
}
+static void kfd_process_notifier_release_internal(struct kfd_process *p)
+{
+ cancel_delayed_work_sync(&p->eviction_work);
+ cancel_delayed_work_sync(&p->restore_work);
+
+ /* Indicate to other users that MM is no longer valid */
+ p->mm = NULL;
+
+ mmu_notifier_put(&p->mmu_notifier);
+}
+
static void kfd_process_notifier_release(struct mmu_notifier *mn,
struct mm_struct *mm)
{
return;
mutex_lock(&kfd_processes_mutex);
+ /*
+ * Do early return if table is empty.
+ *
+ * This could potentially happen if this function is called concurrently
+ * by mmu_notifier and by kfd_cleanup_pocesses.
+ *
+ */
+ if (hash_empty(kfd_processes_table)) {
+ mutex_unlock(&kfd_processes_mutex);
+ return;
+ }
hash_del_rcu(&p->kfd_processes);
mutex_unlock(&kfd_processes_mutex);
synchronize_srcu(&kfd_processes_srcu);
- cancel_delayed_work_sync(&p->eviction_work);
- cancel_delayed_work_sync(&p->restore_work);
-
- /* Indicate to other users that MM is no longer valid */
- p->mm = NULL;
-
- mmu_notifier_put(&p->mmu_notifier);
+ kfd_process_notifier_release_internal(p);
}
static const struct mmu_notifier_ops kfd_process_mmu_notifier_ops = {
.free_notifier = kfd_process_free_notifier,
};
+/*
+ * This code handles the case when driver is being unloaded before all
+ * mm_struct are released. We need to safely free the kfd_process and
+ * avoid race conditions with mmu_notifier that might try to free them.
+ *
+ */
+void kfd_cleanup_processes(void)
+{
+ struct kfd_process *p;
+ struct hlist_node *p_temp;
+ unsigned int temp;
+ HLIST_HEAD(cleanup_list);
+
+ /*
+ * Move all remaining kfd_process from the process table to a
+ * temp list for processing. Once done, callback from mmu_notifier
+ * release will not see the kfd_process in the table and do early return,
+ * avoiding double free issues.
+ */
+ mutex_lock(&kfd_processes_mutex);
+ hash_for_each_safe(kfd_processes_table, temp, p_temp, p, kfd_processes) {
+ hash_del_rcu(&p->kfd_processes);
+ synchronize_srcu(&kfd_processes_srcu);
+ hlist_add_head(&p->kfd_processes, &cleanup_list);
+ }
+ mutex_unlock(&kfd_processes_mutex);
+
+ hlist_for_each_entry_safe(p, p_temp, &cleanup_list, kfd_processes)
+ kfd_process_notifier_release_internal(p);
+
+ /*
+ * Ensures that all outstanding free_notifier get called, triggering
+ * the release of the kfd_process struct.
+ */
+ mmu_notifier_synchronize();
+}
+
static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep)
{
unsigned long offset;
return 0;
cleanup:
- if (dev->shared_resources.enable_mes)
- uninit_queue(*q);
+ uninit_queue(*q);
+ *q = NULL;
return retval;
}
for (; flip_addrs->dirty_rect_count < num_clips; clips++)
fill_dc_dirty_rect(new_plane_state->plane,
- &dirty_rects[i], clips->x1,
- clips->y1, clips->x2 - clips->x1,
- clips->y2 - clips->y1,
+ &dirty_rects[flip_addrs->dirty_rect_count],
+ clips->x1, clips->y1,
+ clips->x2 - clips->x1, clips->y2 - clips->y1,
&flip_addrs->dirty_rect_count,
false);
return;
if (!aconnector->mst_root)
drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
- /* This defaults to the max in the range, but we want 8bpc for non-edp. */
aconnector->base.state->max_bpc = 16;
aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
link->dp.mst_enabled = config->mst_enabled;
link->dp.usb4_enabled = config->usb4_enabled;
display->adjust.disable = MOD_HDCP_DISPLAY_DISABLE_AUTHENTICATION;
- link->adjust.auth_delay = 0;
+ link->adjust.auth_delay = 2;
link->adjust.hdcp1.disable = 0;
conn_state = aconnector->base.state;
dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz);
dcn20_prepare_bandwidth(dc, context);
-
- dc_dmub_srv_p_state_delegate(dc,
- context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching, context);
}
dccg32_set_dtbclk_p_src(dccg, src, otg_inst);
/* enabled to select one of the DTBCLKs for pipe */
- switch (otg_inst)
- {
+ switch (dp_hpo_inst) {
case 0:
REG_UPDATE_2(DPSTREAMCLK_CNTL,
DPSTREAMCLK0_EN,
*k2_div = PIXEL_RATE_DIV_BY_2;
else
*k2_div = PIXEL_RATE_DIV_BY_4;
- } else if (dc_is_dp_signal(stream->signal) || dc_is_virtual_signal(stream->signal)) {
+ } else if (dc_is_dp_signal(stream->signal)) {
if (two_pix_per_container) {
*k1_div = PIXEL_RATE_DIV_BY_1;
*k2_div = PIXEL_RATE_DIV_BY_2;
bool subvp_in_use = false;
uint8_t is_pipe_split_expected[MAX_PIPES] = {0};
struct dc_crtc_timing *timing;
+ bool vsr_odm_support = false;
dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
timing = &pipe->stream->timing;
pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_dal;
+ vsr_odm_support = (res_ctx->pipe_ctx[i].stream->src.width >= 5120 &&
+ res_ctx->pipe_ctx[i].stream->src.width > res_ctx->pipe_ctx[i].stream->dst.width);
if (context->stream_count == 1 &&
context->stream_status[0].plane_count == 1 &&
!dc_is_hdmi_signal(res_ctx->pipe_ctx[i].stream->signal) &&
is_h_timing_divisible_by_2(res_ctx->pipe_ctx[i].stream) &&
pipe->stream->timing.pix_clk_100hz * 100 > DCN3_2_VMIN_DISPCLK_HZ &&
- dc->debug.enable_single_display_2to1_odm_policy) {
+ dc->debug.enable_single_display_2to1_odm_policy &&
+ !vsr_odm_support) { //excluding 2to1 ODM combine on >= 5k vsr
pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_2to1;
}
pipe_cnt++;
dc->caps.edp_dsc_support = true;
dc->caps.extended_aux_timeout_support = true;
dc->caps.dmcub_support = true;
+ dc->caps.seamless_odm = true;
/* Color pipeline capabilities */
dc->caps.color.dpp.dcn_arch = 1;
struct dc_sink *prev_sink = NULL;
struct dpcd_caps prev_dpcd_caps;
enum dc_connection_type new_connection_type = dc_connection_none;
+ enum dc_connection_type pre_connection_type = link->type;
const uint32_t post_oui_delay = 30; // 30ms
DC_LOGGER_INIT(link->ctx->logger);
}
if (!detect_dp(link, &sink_caps, reason)) {
+ link->type = pre_connection_type;
+
if (prev_sink)
dc_sink_release(prev_sink);
return false;
bool is_delegated_to_mst_top_mgr = false;
enum dc_connection_type pre_link_type = link->type;
+ DC_LOGGER_INIT(link->ctx->logger);
+
is_local_sink_detect_success = detect_link_and_local_sink(link, reason);
if (is_local_sink_detect_success && link->local_sink)
verify_link_capability(link, link->local_sink, reason);
+ DC_LOG_DC("%s: link_index=%d is_local_sink_detect_success=%d pre_link_type=%d link_type=%d\n", __func__,
+ link->link_index, is_local_sink_detect_success, pre_link_type, link->type);
+
if (is_local_sink_detect_success && link->local_sink &&
dc_is_dp_signal(link->local_sink->sink_signal) &&
link->dpcd_caps.is_mst_capable)
// *** IMPORTANT ***
// SMU TEAM: Always increment the interface version if
// any structure is changed in this file
-#define PMFW_DRIVER_IF_VERSION 7
+#define PMFW_DRIVER_IF_VERSION 8
typedef struct {
int32_t value;
uint16_t SkinTemp;
uint16_t DeviceState;
uint16_t CurTemp; //[centi-Celsius]
- uint16_t spare2;
+ uint16_t FilterAlphaValue;
uint16_t AverageGfxclkFrequency;
uint16_t AverageFclkFrequency;
#define SMU13_DRIVER_IF_VERSION_YELLOW_CARP 0x04
#define SMU13_DRIVER_IF_VERSION_ALDE 0x08
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0_0 0x37
-#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_4 0x07
+#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_4 0x08
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_5 0x04
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0_10 0x32
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_7 0x37
(OverDriveTable_t *)smu->smu_table.boot_overdrive_table;
OverDriveTable_t *user_od_table =
(OverDriveTable_t *)smu->smu_table.user_overdrive_table;
+ OverDriveTable_t user_od_table_bak;
int ret = 0;
- /*
- * For S3/S4/Runpm resume, no need to setup those overdrive tables again as
- * - either they already have the default OD settings got during cold bootup
- * - or they have some user customized OD settings which cannot be overwritten
- */
- if (smu->adev->in_suspend)
- return 0;
-
ret = smu_cmn_update_table(smu, SMU_TABLE_OVERDRIVE,
0, (void *)boot_od_table, false);
if (ret) {
sienna_cichlid_dump_od_table(smu, boot_od_table);
memcpy(od_table, boot_od_table, sizeof(OverDriveTable_t));
- memcpy(user_od_table, boot_od_table, sizeof(OverDriveTable_t));
+
+ /*
+ * For S3/S4/Runpm resume, we need to setup those overdrive tables again,
+ * but we have to preserve user defined values in "user_od_table".
+ */
+ if (!smu->adev->in_suspend) {
+ memcpy(user_od_table, boot_od_table, sizeof(OverDriveTable_t));
+ smu->user_dpm_profile.user_od = false;
+ } else if (smu->user_dpm_profile.user_od) {
+ memcpy(&user_od_table_bak, user_od_table, sizeof(OverDriveTable_t));
+ memcpy(user_od_table, boot_od_table, sizeof(OverDriveTable_t));
+ user_od_table->GfxclkFmin = user_od_table_bak.GfxclkFmin;
+ user_od_table->GfxclkFmax = user_od_table_bak.GfxclkFmax;
+ user_od_table->UclkFmin = user_od_table_bak.UclkFmin;
+ user_od_table->UclkFmax = user_od_table_bak.UclkFmax;
+ user_od_table->VddGfxOffset = user_od_table_bak.VddGfxOffset;
+ }
return 0;
}
return ret;
}
+static int sienna_cichlid_restore_user_od_settings(struct smu_context *smu)
+{
+ struct smu_table_context *table_context = &smu->smu_table;
+ OverDriveTable_t *od_table = table_context->overdrive_table;
+ OverDriveTable_t *user_od_table = table_context->user_overdrive_table;
+ int res;
+
+ res = smu_v11_0_restore_user_od_settings(smu);
+ if (res == 0)
+ memcpy(od_table, user_od_table, sizeof(OverDriveTable_t));
+
+ return res;
+}
+
static int sienna_cichlid_run_btc(struct smu_context *smu)
{
int res;
.set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range,
.set_default_od_settings = sienna_cichlid_set_default_od_settings,
.od_edit_dpm_table = sienna_cichlid_od_edit_dpm_table,
- .restore_user_od_settings = smu_v11_0_restore_user_od_settings,
+ .restore_user_od_settings = sienna_cichlid_restore_user_od_settings,
.run_btc = sienna_cichlid_run_btc,
.set_power_source = smu_v11_0_set_power_source,
.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
lt->hdmi_port = of_drm_find_bridge(port_node);
if (!lt->hdmi_port) {
- dev_err(lt->dev, "%s: Failed to get hdmi port\n", __func__);
- ret = -ENODEV;
+ ret = -EPROBE_DEFER;
+ dev_err_probe(lt->dev, ret, "%s: Failed to get hdmi port\n", __func__);
goto err_free_host_node;
}
* the EDID then we'll just return 0.
*/
- base_block = kmalloc(EDID_LENGTH, GFP_KERNEL);
+ base_block = kzalloc(EDID_LENGTH, GFP_KERNEL);
if (!base_block)
return 0;
*
* @lru: The LRU to scan
* @nr_to_scan: The number of pages to try to reclaim
+ * @remaining: The number of pages left to reclaim, should be initialized by caller
* @shrink: Callback to try to shrink/reclaim the object.
*/
unsigned long
-drm_gem_lru_scan(struct drm_gem_lru *lru, unsigned nr_to_scan,
+drm_gem_lru_scan(struct drm_gem_lru *lru,
+ unsigned int nr_to_scan,
+ unsigned long *remaining,
bool (*shrink)(struct drm_gem_object *obj))
{
struct drm_gem_lru still_in_lru;
* hit shrinker in response to trying to get backing pages
* for this obj (ie. while it's lock is already held)
*/
- if (!dma_resv_trylock(obj->resv))
+ if (!dma_resv_trylock(obj->resv)) {
+ *remaining += obj->size >> PAGE_SHIFT;
goto tail;
+ }
if (shrink(obj)) {
freed += obj->size >> PAGE_SHIFT;
int ret;
if (obj->import_attach) {
- /* Drop the reference drm_gem_mmap_obj() acquired.*/
- drm_gem_object_put(obj);
vma->vm_private_data = NULL;
+ ret = dma_buf_mmap(obj->dma_buf, vma, 0);
+
+ /* Drop the reference drm_gem_mmap_obj() acquired.*/
+ if (!ret)
+ drm_gem_object_put(obj);
- return dma_buf_mmap(obj->dma_buf, vma, 0);
+ return ret;
}
ret = drm_gem_shmem_get_pages(shmem);
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "IdeaPad Duet 3 10IGL5"),
},
.driver_data = (void *)&lcd1200x1920_rightside_up,
- }, { /* Lenovo Yoga Book X90F / X91F / X91L */
+ }, { /* Lenovo Yoga Book X90F / X90L */
.matches = {
- /* Non exact match to match all versions */
- DMI_MATCH(DMI_PRODUCT_NAME, "Lenovo YB1-X9"),
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "YETI-11"),
+ },
+ .driver_data = (void *)&lcd1200x1920_rightside_up,
+ }, { /* Lenovo Yoga Book X91F / X91L */
+ .matches = {
+ /* Non exact match to match F + L versions */
+ DMI_MATCH(DMI_PRODUCT_NAME, "Lenovo YB1-X91"),
},
.driver_data = (void *)&lcd1200x1920_rightside_up,
}, { /* Lenovo Yoga Tablet 2 830F / 830L */
*/
intel_vrr_send_push(new_crtc_state);
+ /*
+ * Seamless M/N update may need to update frame timings.
+ *
+ * FIXME Should be synchronized with the start of vblank somehow...
+ */
+ if (new_crtc_state->seamless_m_n && intel_crtc_needs_fastset(new_crtc_state))
+ intel_crtc_update_active_timings(new_crtc_state);
+
local_irq_enable();
if (intel_vgpu_active(dev_priv))
* only fields that are know to not cause problems are preserved. */
saved_state->uapi = crtc_state->uapi;
+ saved_state->inherited = crtc_state->inherited;
saved_state->scaler_state = crtc_state->scaler_state;
saved_state->shared_dpll = crtc_state->shared_dpll;
saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
bool psr2_sel_fetch_cff_enabled;
bool req_psr2_sdp_prior_scanline;
u8 sink_sync_latency;
+ u8 io_wake_lines;
+ u8 fast_wake_lines;
ktime_t last_entry_attempt;
ktime_t last_exit;
bool sink_not_reliable;
}
}
-static void pipedmc_clock_gating_wa(struct drm_i915_private *i915, bool enable)
+static void adlp_pipedmc_clock_gating_wa(struct drm_i915_private *i915, bool enable)
{
enum pipe pipe;
- if (DISPLAY_VER(i915) < 13)
- return;
-
/*
- * Wa_16015201720:adl-p,dg2, mtl
+ * Wa_16015201720:adl-p,dg2
* The WA requires clock gating to be disabled all the time
* for pipe A and B.
* For pipe C and D clock gating needs to be disabled only
PIPEDMC_GATING_DIS, 0);
}
+static void mtl_pipedmc_clock_gating_wa(struct drm_i915_private *i915)
+{
+ /*
+ * Wa_16015201720
+ * The WA requires clock gating to be disabled all the time
+ * for pipe A and B.
+ */
+ intel_de_rmw(i915, GEN9_CLKGATE_DIS_0, 0,
+ MTL_PIPEDMC_GATING_DIS_A | MTL_PIPEDMC_GATING_DIS_B);
+}
+
+static void pipedmc_clock_gating_wa(struct drm_i915_private *i915, bool enable)
+{
+ if (DISPLAY_VER(i915) >= 14 && enable)
+ mtl_pipedmc_clock_gating_wa(i915);
+ else if (DISPLAY_VER(i915) == 13)
+ adlp_pipedmc_clock_gating_wa(i915, enable);
+}
+
void intel_dmc_enable_pipe(struct drm_i915_private *i915, enum pipe pipe)
{
if (!has_dmc_id_fw(i915, PIPE_TO_DMC_ID(pipe)))
return 0;
}
+static bool intel_dp_mst_has_audio(const struct drm_connector_state *conn_state)
+{
+ const struct intel_digital_connector_state *intel_conn_state =
+ to_intel_digital_connector_state(conn_state);
+ struct intel_connector *connector =
+ to_intel_connector(conn_state->connector);
+
+ if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO)
+ return connector->port->has_audio;
+ else
+ return intel_conn_state->force_audio == HDMI_AUDIO_ON;
+}
+
static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
struct intel_dp *intel_dp = &intel_mst->primary->dp;
- struct intel_connector *connector =
- to_intel_connector(conn_state->connector);
- struct intel_digital_connector_state *intel_conn_state =
- to_intel_digital_connector_state(conn_state);
const struct drm_display_mode *adjusted_mode =
&pipe_config->hw.adjusted_mode;
struct link_config_limits limits;
pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
pipe_config->has_pch_encoder = false;
- if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO)
- pipe_config->has_audio = connector->port->has_audio;
- else
- pipe_config->has_audio =
- intel_conn_state->force_audio == HDMI_AUDIO_ON;
+ pipe_config->has_audio =
+ intel_dp_mst_has_audio(conn_state) &&
+ intel_audio_compute_config(encoder, pipe_config, conn_state);
/*
* for MST we always configure max link bw - the spec doesn't
bool prealloc = false;
void __iomem *vaddr;
struct drm_i915_gem_object *obj;
+ struct i915_gem_ww_ctx ww;
int ret;
mutex_lock(&ifbdev->hpd_lock);
info->fix.smem_len = vma->size;
}
- vaddr = i915_vma_pin_iomap(vma);
- if (IS_ERR(vaddr)) {
- drm_err(&dev_priv->drm,
- "Failed to remap framebuffer into virtual memory (%pe)\n", vaddr);
- ret = PTR_ERR(vaddr);
- goto out_unpin;
+ for_i915_gem_ww(&ww, ret, false) {
+ ret = i915_gem_object_lock(vma->obj, &ww);
+
+ if (ret)
+ continue;
+
+ vaddr = i915_vma_pin_iomap(vma);
+ if (IS_ERR(vaddr)) {
+ drm_err(&dev_priv->drm,
+ "Failed to remap framebuffer into virtual memory (%pe)\n", vaddr);
+ ret = PTR_ERR(vaddr);
+ continue;
+ }
}
+
+ if (ret)
+ goto out_unpin;
+
info->screen_base = vaddr;
info->screen_size = vma->size;
val |= EDP_PSR2_FRAME_BEFORE_SU(max_t(u8, intel_dp->psr.sink_sync_latency + 1, 2));
val |= intel_psr2_get_tp_time(intel_dp);
+ if (DISPLAY_VER(dev_priv) >= 12) {
+ if (intel_dp->psr.io_wake_lines < 9 &&
+ intel_dp->psr.fast_wake_lines < 9)
+ val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2;
+ else
+ val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_3;
+ }
+
/* Wa_22012278275:adl-p */
if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_E0)) {
static const u8 map[] = {
* Still using the default IO_BUFFER_WAKE and FAST_WAKE, see
* comments bellow for more information
*/
- u32 tmp, lines = 7;
-
- val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2;
+ u32 tmp;
- tmp = map[lines - TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES];
+ tmp = map[intel_dp->psr.io_wake_lines - TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES];
tmp = tmp << TGL_EDP_PSR2_IO_BUFFER_WAKE_SHIFT;
val |= tmp;
- tmp = map[lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES];
+ tmp = map[intel_dp->psr.fast_wake_lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES];
tmp = tmp << TGL_EDP_PSR2_FAST_WAKE_MIN_SHIFT;
val |= tmp;
} else if (DISPLAY_VER(dev_priv) >= 12) {
- /*
- * TODO: 7 lines of IO_BUFFER_WAKE and FAST_WAKE are default
- * values from BSpec. In order to setting an optimal power
- * consumption, lower than 4k resolution mode needs to decrease
- * IO_BUFFER_WAKE and FAST_WAKE. And higher than 4K resolution
- * mode needs to increase IO_BUFFER_WAKE and FAST_WAKE.
- */
- val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2;
- val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(7);
- val |= TGL_EDP_PSR2_FAST_WAKE(7);
+ val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines);
+ val |= TGL_EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines);
} else if (DISPLAY_VER(dev_priv) >= 9) {
- val |= EDP_PSR2_IO_BUFFER_WAKE(7);
- val |= EDP_PSR2_FAST_WAKE(7);
+ val |= EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines);
+ val |= EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines);
}
if (intel_dp->psr.req_psr2_sdp_prior_scanline)
return true;
}
+static bool _compute_psr2_wake_times(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ int io_wake_lines, io_wake_time, fast_wake_lines, fast_wake_time;
+ u8 max_wake_lines;
+
+ if (DISPLAY_VER(i915) >= 12) {
+ io_wake_time = 42;
+ /*
+ * According to Bspec it's 42us, but based on testing
+ * it is not enough -> use 45 us.
+ */
+ fast_wake_time = 45;
+ max_wake_lines = 12;
+ } else {
+ io_wake_time = 50;
+ fast_wake_time = 32;
+ max_wake_lines = 8;
+ }
+
+ io_wake_lines = intel_usecs_to_scanlines(
+ &crtc_state->uapi.adjusted_mode, io_wake_time);
+ fast_wake_lines = intel_usecs_to_scanlines(
+ &crtc_state->uapi.adjusted_mode, fast_wake_time);
+
+ if (io_wake_lines > max_wake_lines ||
+ fast_wake_lines > max_wake_lines)
+ return false;
+
+ if (i915->params.psr_safest_params)
+ io_wake_lines = fast_wake_lines = max_wake_lines;
+
+ /* According to Bspec lower limit should be set as 7 lines. */
+ intel_dp->psr.io_wake_lines = max(io_wake_lines, 7);
+ intel_dp->psr.fast_wake_lines = max(fast_wake_lines, 7);
+
+ return true;
+}
+
static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state)
{
return false;
}
+ if (!_compute_psr2_wake_times(intel_dp, crtc_state)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR2 not enabled, Unable to use long enough wake times\n");
+ return false;
+ }
+
if (HAS_PSR2_SEL_FETCH(dev_priv)) {
if (!intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) &&
!HAS_PSR_HW_TRACKING(dev_priv)) {
REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
};
+static const struct intel_mpllb_state dg2_hdmi_267300 = {
+ .clock = 267300,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 7) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 74) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 30146) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 36699),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
static const struct intel_mpllb_state dg2_hdmi_268500 = {
.clock = 268500,
.ref_control =
REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
};
+static const struct intel_mpllb_state dg2_hdmi_319890 = {
+ .clock = 319890,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 2),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 94) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 64094) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 13631),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
static const struct intel_mpllb_state dg2_hdmi_497750 = {
.clock = 497750,
.ref_control =
&dg2_hdmi_209800,
&dg2_hdmi_241500,
&dg2_hdmi_262750,
+ &dg2_hdmi_267300,
&dg2_hdmi_268500,
&dg2_hdmi_296703,
+ &dg2_hdmi_319890,
&dg2_hdmi_497750,
&dg2_hdmi_592000,
&dg2_hdmi_593407,
if (err)
goto err_gt;
- intel_uc_init_late(>->uc);
-
err = i915_inject_probe_error(gt->i915, -EIO);
if (err)
goto err_gt;
+ intel_uc_init_late(>->uc);
+
intel_migrate_init(>->migrate, gt);
goto out_fw;
#include "intel_rc6.h"
#include "intel_rps.h"
#include "intel_wakeref.h"
-#include "intel_pcode.h"
#include "pxp/intel_pxp_pm.h"
#define I915_GT_SUSPEND_IDLE_TIMEOUT (HZ / 2)
-static void mtl_media_busy(struct intel_gt *gt)
-{
- /* Wa_14017073508: mtl */
- if (IS_MTL_GRAPHICS_STEP(gt->i915, P, STEP_A0, STEP_B0) &&
- gt->type == GT_MEDIA)
- snb_pcode_write_p(gt->uncore, PCODE_MBOX_GT_STATE,
- PCODE_MBOX_GT_STATE_MEDIA_BUSY,
- PCODE_MBOX_GT_STATE_DOMAIN_MEDIA, 0);
-}
-
-static void mtl_media_idle(struct intel_gt *gt)
-{
- /* Wa_14017073508: mtl */
- if (IS_MTL_GRAPHICS_STEP(gt->i915, P, STEP_A0, STEP_B0) &&
- gt->type == GT_MEDIA)
- snb_pcode_write_p(gt->uncore, PCODE_MBOX_GT_STATE,
- PCODE_MBOX_GT_STATE_MEDIA_NOT_BUSY,
- PCODE_MBOX_GT_STATE_DOMAIN_MEDIA, 0);
-}
-
static void user_forcewake(struct intel_gt *gt, bool suspend)
{
int count = atomic_read(>->user_wakeref);
GT_TRACE(gt, "\n");
- /* Wa_14017073508: mtl */
- mtl_media_busy(gt);
-
/*
* It seems that the DMC likes to transition between the DC states a lot
* when there are no connected displays (no active power domains) during
GEM_BUG_ON(!wakeref);
intel_display_power_put_async(i915, POWER_DOMAIN_GT_IRQ, wakeref);
- /* Wa_14017073508: mtl */
- mtl_media_idle(gt);
-
return 0;
}
}
DEFINE_SIMPLE_ATTRIBUTE(perf_limit_reasons_fops, perf_limit_reasons_get,
- perf_limit_reasons_clear, "%llu\n");
+ perf_limit_reasons_clear, "0x%llx\n");
void intel_gt_pm_debugfs_register(struct intel_gt *gt, struct dentry *root)
{
static bool rc6_supported(struct intel_rc6 *rc6)
{
struct drm_i915_private *i915 = rc6_to_i915(rc6);
+ struct intel_gt *gt = rc6_to_gt(rc6);
if (!HAS_RC6(i915))
return false;
return false;
}
+ if (IS_MTL_MEDIA_STEP(gt->i915, STEP_A0, STEP_B0) &&
+ gt->type == GT_MEDIA) {
+ drm_notice(&i915->drm,
+ "Media RC6 disabled on A step\n");
+ return false;
+ }
+
return true;
}
* is only relevant to pre-Xe_HP platforms (Xe_HP and beyond use the
* I915_MAX_SS_FUSE_BITS value below).
*/
-#define GEN_MAX_SS_PER_HSW_SLICE 6
+#define GEN_MAX_SS_PER_HSW_SLICE 8
/*
* Maximum number of 32-bit registers used by hardware to express the
#endif //CONFIG_DRM_I915_CAPTURE_ERROR
+static void guc_capture_find_ecode(struct intel_engine_coredump *ee)
+{
+ struct gcap_reg_list_info *reginfo;
+ struct guc_mmio_reg *regs;
+ i915_reg_t reg_ipehr = RING_IPEHR(0);
+ i915_reg_t reg_instdone = RING_INSTDONE(0);
+ int i;
+
+ if (!ee->guc_capture_node)
+ return;
+
+ reginfo = ee->guc_capture_node->reginfo + GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE;
+ regs = reginfo->regs;
+ for (i = 0; i < reginfo->num_regs; i++) {
+ if (regs[i].offset == reg_ipehr.reg)
+ ee->ipehr = regs[i].value;
+ else if (regs[i].offset == reg_instdone.reg)
+ ee->instdone.instdone = regs[i].value;
+ }
+}
+
void intel_guc_capture_free_node(struct intel_engine_coredump *ee)
{
if (!ee || !ee->guc_capture_node)
list_del(&n->link);
ee->guc_capture_node = n;
ee->guc_capture = guc->capture;
+ guc_capture_find_ecode(ee);
return;
}
}
static bool __guc_rc_supported(struct intel_guc *guc)
{
- struct intel_gt *gt = guc_to_gt(guc);
-
- /*
- * Wa_14017073508: mtl
- * Do not enable gucrc to avoid additional interrupts which
- * may disrupt pcode wa.
- */
- if (IS_MTL_GRAPHICS_STEP(gt->i915, P, STEP_A0, STEP_B0) &&
- gt->type == GT_MEDIA)
- return false;
-
/* GuC RC is unavailable for pre-Gen12 */
return guc->submission_supported &&
- GRAPHICS_VER(gt->i915) >= 12;
+ GRAPHICS_VER(guc_to_gt(guc)->i915) >= 12;
}
static bool __guc_rc_selected(struct intel_guc *guc)
static void debug_active_activate(struct i915_active *ref)
{
lockdep_assert_held(&ref->tree_lock);
- if (!atomic_read(&ref->count)) /* before the first inc */
- debug_object_activate(ref, &active_debug_desc);
+ debug_object_activate(ref, &active_debug_desc);
}
static void debug_active_deactivate(struct i915_active *ref)
* we can use it to substitute for the pending idle-barrer
* request that we want to emit on the kernel_context.
*/
- __active_del_barrier(ref, node_from_active(active));
- return true;
+ return __active_del_barrier(ref, node_from_active(active));
}
int i915_active_add_request(struct i915_active *ref, struct i915_request *rq)
{
+ u64 idx = i915_request_timeline(rq)->fence_context;
struct dma_fence *fence = &rq->fence;
struct i915_active_fence *active;
int err;
if (err)
return err;
- active = active_instance(ref, i915_request_timeline(rq)->fence_context);
- if (!active) {
- err = -ENOMEM;
- goto out;
- }
+ do {
+ active = active_instance(ref, idx);
+ if (!active) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ if (replace_barrier(ref, active)) {
+ RCU_INIT_POINTER(active->fence, NULL);
+ atomic_dec(&ref->count);
+ }
+ } while (unlikely(is_barrier(active)));
- if (replace_barrier(ref, active)) {
- RCU_INIT_POINTER(active->fence, NULL);
- atomic_dec(&ref->count);
- }
if (!__i915_active_fence_set(active, fence))
__i915_active_acquire(ref);
* GEN9 clock gating regs
*/
#define GEN9_CLKGATE_DIS_0 _MMIO(0x46530)
-#define DARBF_GATING_DIS (1 << 27)
-#define PWM2_GATING_DIS (1 << 14)
-#define PWM1_GATING_DIS (1 << 13)
+#define DARBF_GATING_DIS REG_BIT(27)
+#define MTL_PIPEDMC_GATING_DIS_A REG_BIT(15)
+#define MTL_PIPEDMC_GATING_DIS_B REG_BIT(14)
+#define PWM2_GATING_DIS REG_BIT(14)
+#define PWM1_GATING_DIS REG_BIT(13)
#define GEN9_CLKGATE_DIS_3 _MMIO(0x46538)
#define TGL_VRH_GATING_DIS REG_BIT(31)
/* XEHP_PCODE_FREQUENCY_CONFIG param2 */
#define PCODE_MBOX_DOMAIN_NONE 0x0
#define PCODE_MBOX_DOMAIN_MEDIAFF 0x3
-
-/* Wa_14017210380: mtl */
-#define PCODE_MBOX_GT_STATE 0x50
-/* sub-commands (param1) */
-#define PCODE_MBOX_GT_STATE_MEDIA_BUSY 0x1
-#define PCODE_MBOX_GT_STATE_MEDIA_NOT_BUSY 0x2
-/* param2 */
-#define PCODE_MBOX_GT_STATE_DOMAIN_MEDIA 0x1
-
#define GEN6_PCODE_DATA _MMIO(0x138128)
#define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8
#define GEN6_PCODE_FREQ_RING_RATIO_SHIFT 16
ret = meson_encoder_hdmi_init(priv);
if (ret)
- goto exit_afbcd;
+ goto unbind_all;
ret = meson_plane_create(priv);
if (ret)
- goto exit_afbcd;
+ goto unbind_all;
ret = meson_overlay_create(priv);
if (ret)
- goto exit_afbcd;
+ goto unbind_all;
ret = meson_crtc_create(priv);
if (ret)
- goto exit_afbcd;
+ goto unbind_all;
ret = request_irq(priv->vsync_irq, meson_irq, 0, drm->driver->name, drm);
if (ret)
- goto exit_afbcd;
+ goto unbind_all;
drm_mode_config_reset(drm);
uninstall_irq:
free_irq(priv->vsync_irq, drm);
+unbind_all:
+ if (has_components)
+ component_unbind_all(drm->dev, drm);
exit_afbcd:
if (priv->afbcd.ops)
priv->afbcd.ops->exit(priv);
dw_plat_data = &meson_dw_hdmi->dw_plat_data;
ret = devm_regulator_get_enable_optional(dev, "hdmi");
- if (ret < 0)
+ if (ret < 0 && ret != -ENODEV)
return ret;
meson_dw_hdmi->hdmitx_apb = devm_reset_control_get_exclusive(dev,
priv->io_base + _REG(VPP_DOLBY_CTRL));
writel_relaxed(0x1020080,
priv->io_base + _REG(VPP_DUMMY_DATA1));
+ writel_relaxed(0x42020,
+ priv->io_base + _REG(VPP_DUMMY_DATA));
} else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A))
writel_relaxed(0xf, priv->io_base + _REG(DOLBY_PATH_CTRL));
bool (*shrink)(struct drm_gem_object *obj);
bool cond;
unsigned long freed;
+ unsigned long remaining;
} stages[] = {
/* Stages of progressively more aggressive/expensive reclaim: */
{ &priv->lru.dontneed, purge, true },
};
long nr = sc->nr_to_scan;
unsigned long freed = 0;
+ unsigned long remaining = 0;
for (unsigned i = 0; (nr > 0) && (i < ARRAY_SIZE(stages)); i++) {
if (!stages[i].cond)
continue;
stages[i].freed =
- drm_gem_lru_scan(stages[i].lru, nr, stages[i].shrink);
+ drm_gem_lru_scan(stages[i].lru, nr,
+ &stages[i].remaining,
+ stages[i].shrink);
nr -= stages[i].freed;
freed += stages[i].freed;
+ remaining += stages[i].remaining;
}
if (freed) {
stages[3].freed);
}
- return (freed > 0) ? freed : SHRINK_STOP;
+ return (freed > 0 && remaining > 0) ? freed : SHRINK_STOP;
}
#ifdef CONFIG_DEBUG_FS
NULL,
};
unsigned idx, unmapped = 0;
+ unsigned long remaining = 0;
for (idx = 0; lrus[idx] && unmapped < vmap_shrink_limit; idx++) {
unmapped += drm_gem_lru_scan(lrus[idx],
vmap_shrink_limit - unmapped,
+ &remaining,
vmap_shrink);
}
if (pm_runtime_active(pfdev->dev))
mmu_hw_do_operation(pfdev, mmu, iova, size, AS_COMMAND_FLUSH_PT);
- pm_runtime_put_sync_autosuspend(pfdev->dev);
+ pm_runtime_put_autosuspend(pfdev->dev);
}
static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu,
/* drm_vblank_init calls kcalloc, which can fail */
ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
if (ret)
- goto cleanup_mode_config;
+ goto unbind_all;
/* Remove early framebuffers (ie. simplefb) */
ret = drm_aperture_remove_framebuffers(false, &sun4i_drv_driver);
if (ret)
- goto cleanup_mode_config;
+ goto unbind_all;
sun4i_framebuffer_init(drm);
finish_poll:
drm_kms_helper_poll_fini(drm);
+unbind_all:
+ component_unbind_all(dev, NULL);
cleanup_mode_config:
drm_mode_config_cleanup(drm);
of_reserved_mem_device_release(dev);
if (unlock_resv)
dma_resv_unlock(bo->base.resv);
- ttm_bo_put(bo);
-
return 0;
}
struct ttm_buffer_object *bo = res->bo;
uint32_t num_pages;
- if (!bo)
+ if (!bo || bo->resource != res)
continue;
num_pages = PFN_UP(bo->base.size);
bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
if (virtio_gpu_is_shmem(bo) && use_dma_api)
- dma_sync_sgtable_for_device(&vgdev->vdev->dev,
+ dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
bo->base.sgt, DMA_TO_DEVICE);
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
if (virtio_gpu_is_shmem(bo) && use_dma_api)
- dma_sync_sgtable_for_device(&vgdev->vdev->dev,
+ dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
bo->base.sgt, DMA_TO_DEVICE);
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
static int host1x_probe(struct platform_device *pdev)
{
struct host1x *host;
- int syncpt_irq;
int err;
host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
}
host->syncpt_irq = platform_get_irq(pdev, 0);
- if (syncpt_irq < 0)
- return syncpt_irq;
+ if (host->syncpt_irq < 0)
+ return host->syncpt_irq;
mutex_init(&host->devices_lock);
INIT_LIST_HEAD(&host->devices);
val = (temp - val) / 1000;
if (sattr->index != 1) {
- data->temp[HYSTERSIS][sattr->index] &= 0xF0;
+ data->temp[HYSTERSIS][sattr->index] &= 0x0F;
data->temp[HYSTERSIS][sattr->index] |= (val & 0xF) << 4;
} else {
- data->temp[HYSTERSIS][sattr->index] &= 0x0F;
+ data->temp[HYSTERSIS][sattr->index] &= 0xF0;
data->temp[HYSTERSIS][sattr->index] |= (val & 0xF);
}
val = data->enh_acoustics[0] & 0xf;
break;
case 1:
- val = (data->enh_acoustics[1] >> 4) & 0xf;
+ val = data->enh_acoustics[1] & 0xf;
break;
case 2:
default:
- val = data->enh_acoustics[1] & 0xf;
+ val = (data->enh_acoustics[1] >> 4) & 0xf;
break;
}
struct hwmon_device *hwdev;
const char *label;
struct device *hdev;
+ struct device *tdev = dev;
int i, err, id;
/* Complain about invalid characters in hwmon name attribute */
hwdev->name = name;
hdev->class = &hwmon_class;
hdev->parent = dev;
- hdev->of_node = dev ? dev->of_node : NULL;
+ while (tdev && !tdev->of_node)
+ tdev = tdev->parent;
+ hdev->of_node = tdev ? tdev->of_node : NULL;
hwdev->chip = chip;
dev_set_drvdata(hdev, drvdata);
dev_set_name(hdev, HWMON_ID_FORMAT, id);
INIT_LIST_HEAD(&hwdev->tzdata);
- if (dev && dev->of_node && chip && chip->ops->read &&
+ if (hdev->of_node && chip && chip->ops->read &&
chip->info[0]->type == hwmon_chip &&
(chip->info[0]->config[0] & HWMON_C_REGISTER_TZ)) {
err = hwmon_thermal_register_sensors(hdev);
return ret;
} else if (val > INA3221_CHANNEL3) {
dev_err(dev, "invalid reg %d of %pOFn\n", val, child);
- return ret;
+ return -EINVAL;
}
input = &ina->inputs[val];
#define has_six_temp(data) ((data)->features & FEAT_SIX_TEMP)
#define has_vin3_5v(data) ((data)->features & FEAT_VIN3_5V)
#define has_conf_noexit(data) ((data)->features & FEAT_CONF_NOEXIT)
+#define has_scaling(data) ((data)->features & (FEAT_12MV_ADC | \
+ FEAT_10_9MV_ADC))
struct it87_sio_data {
int sioaddr;
"Detected broken BIOS defaults, disabling PWM interface\n");
/* Starting with IT8721F, we handle scaling of internal voltages */
- if (has_12mv_adc(data)) {
+ if (has_scaling(data)) {
if (sio_data->internal & BIT(0))
data->in_scaled |= BIT(3); /* in3 is AVCC */
if (sio_data->internal & BIT(1))
st->gc.label = name;
st->gc.parent = &st->client->dev;
st->gc.owner = THIS_MODULE;
+ st->gc.can_sleep = true;
st->gc.base = -1;
st->gc.names = st->gpio_names;
st->gc.ngpio = ARRAY_SIZE(st->gpio_names);
.thermal_margin_to_millidegree = &dts_eight_dot_eight_to_millidegree,
};
+static const struct cpu_info cpu_skx = {
+ .reg = &resolved_cores_reg_hsx,
+ .min_peci_revision = 0x33,
+ .thermal_margin_to_millidegree = &dts_ten_dot_six_to_millidegree,
+};
+
static const struct cpu_info cpu_icx = {
.reg = &resolved_cores_reg_icx,
.min_peci_revision = 0x40,
},
{
.name = "peci_cpu.cputemp.skx",
- .driver_data = (kernel_ulong_t)&cpu_hsx,
+ .driver_data = (kernel_ulong_t)&cpu_skx,
},
{
.name = "peci_cpu.cputemp.icx",
data->gc.label = name;
data->gc.parent = &data->client->dev;
data->gc.owner = THIS_MODULE;
+ data->gc.can_sleep = true;
data->gc.base = -1;
data->gc.names = data->gpio_names;
data->gc.ngpio = ARRAY_SIZE(data->gpio_names);
*/
#include <linux/debugfs.h>
+#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/i2c.h>
#include <linux/pmbus.h>
#include <linux/gpio/driver.h>
+#include <linux/timekeeping.h>
#include "pmbus.h"
enum chips { ucd9000, ucd90120, ucd90124, ucd90160, ucd90320, ucd9090,
struct gpio_chip gpio;
#endif
struct dentry *debugfs;
+ ktime_t write_time;
};
#define to_ucd9000_data(_info) container_of(_info, struct ucd9000_data, info)
u8 index;
};
+/*
+ * It has been observed that the UCD90320 randomly fails register access when
+ * doing another access right on the back of a register write. To mitigate this
+ * make sure that there is a minimum delay between a write access and the
+ * following access. The 250us is based on experimental data. At a delay of
+ * 200us the issue seems to go away. Add a bit of extra margin to allow for
+ * system to system differences.
+ */
+#define UCD90320_WAIT_DELAY_US 250
+
+static inline void ucd90320_wait(const struct ucd9000_data *data)
+{
+ s64 delta = ktime_us_delta(ktime_get(), data->write_time);
+
+ if (delta < UCD90320_WAIT_DELAY_US)
+ udelay(UCD90320_WAIT_DELAY_US - delta);
+}
+
+static int ucd90320_read_word_data(struct i2c_client *client, int page,
+ int phase, int reg)
+{
+ const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
+ struct ucd9000_data *data = to_ucd9000_data(info);
+
+ if (reg >= PMBUS_VIRT_BASE)
+ return -ENXIO;
+
+ ucd90320_wait(data);
+ return pmbus_read_word_data(client, page, phase, reg);
+}
+
+static int ucd90320_read_byte_data(struct i2c_client *client, int page, int reg)
+{
+ const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
+ struct ucd9000_data *data = to_ucd9000_data(info);
+
+ ucd90320_wait(data);
+ return pmbus_read_byte_data(client, page, reg);
+}
+
+static int ucd90320_write_word_data(struct i2c_client *client, int page,
+ int reg, u16 word)
+{
+ const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
+ struct ucd9000_data *data = to_ucd9000_data(info);
+ int ret;
+
+ ucd90320_wait(data);
+ ret = pmbus_write_word_data(client, page, reg, word);
+ data->write_time = ktime_get();
+
+ return ret;
+}
+
+static int ucd90320_write_byte(struct i2c_client *client, int page, u8 value)
+{
+ const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
+ struct ucd9000_data *data = to_ucd9000_data(info);
+ int ret;
+
+ ucd90320_wait(data);
+ ret = pmbus_write_byte(client, page, value);
+ data->write_time = ktime_get();
+
+ return ret;
+}
+
static int ucd9000_get_fan_config(struct i2c_client *client, int fan)
{
int fan_config = 0;
info->read_byte_data = ucd9000_read_byte_data;
info->func[0] |= PMBUS_HAVE_FAN12 | PMBUS_HAVE_STATUS_FAN12
| PMBUS_HAVE_FAN34 | PMBUS_HAVE_STATUS_FAN34;
+ } else if (mid->driver_data == ucd90320) {
+ info->read_byte_data = ucd90320_read_byte_data;
+ info->read_word_data = ucd90320_read_word_data;
+ info->write_byte = ucd90320_write_byte;
+ info->write_word_data = ucd90320_write_word_data;
}
ucd9000_probe_gpio(client, mid, data);
static struct i2c_driver tmp51x_driver = {
.driver = {
.name = "tmp51x",
- .of_match_table = of_match_ptr(tmp51x_of_match),
+ .of_match_table = tmp51x_of_match,
},
.probe_new = tmp51x_probe,
.id_table = tmp51x_id,
ctx->comm_base_addr = pcc_chan->shmem_base_addr;
if (ctx->comm_base_addr) {
if (version == XGENE_HWMON_V2)
- ctx->pcc_comm_addr = (void __force *)ioremap(
- ctx->comm_base_addr,
- pcc_chan->shmem_size);
+ ctx->pcc_comm_addr = (void __force *)devm_ioremap(&pdev->dev,
+ ctx->comm_base_addr,
+ pcc_chan->shmem_size);
else
- ctx->pcc_comm_addr = memremap(
- ctx->comm_base_addr,
- pcc_chan->shmem_size,
- MEMREMAP_WB);
+ ctx->pcc_comm_addr = devm_memremap(&pdev->dev,
+ ctx->comm_base_addr,
+ pcc_chan->shmem_size,
+ MEMREMAP_WB);
} else {
dev_err(&pdev->dev, "Failed to get PCC comm region\n");
rc = -ENODEV;
{
struct xgene_hwmon_dev *ctx = platform_get_drvdata(pdev);
+ cancel_work_sync(&ctx->workq);
hwmon_device_unregister(ctx->hwmon_dev);
kfifo_free(&ctx->async_msg_fifo);
if (acpi_disabled)
max_write == 0)
break;
}
+
+ /*
+ * Disable the TX_EMPTY interrupt after finishing all the messages to
+ * avoid overwhelming the CPU.
+ */
+ if (ctlr->msg_tx_idx == ctlr->msg_num)
+ hisi_i2c_disable_int(ctlr, HISI_I2C_INT_TX_EMPTY);
}
static irqreturn_t hisi_i2c_irq(int irq, void *context)
hisi_i2c_read_rx_fifo(ctlr);
out:
- if (int_stat & HISI_I2C_INT_TRANS_CPLT || ctlr->xfer_err) {
+ /*
+ * Only use TRANS_CPLT to indicate the completion. On error cases we'll
+ * get two interrupts, INT_ERR first then TRANS_CPLT.
+ */
+ if (int_stat & HISI_I2C_INT_TRANS_CPLT) {
hisi_i2c_disable_int(ctlr, HISI_I2C_INT_ALL);
hisi_i2c_clear_int(ctlr, HISI_I2C_INT_ALL);
complete(ctlr->completion);
if (num == 1 && msgs[0].len == 0)
goto stop;
+ lpi2c_imx->rx_buf = NULL;
+ lpi2c_imx->tx_buf = NULL;
lpi2c_imx->delivered = 0;
lpi2c_imx->msglen = msgs[i].len;
init_completion(&lpi2c_imx->complete);
static irqreturn_t lpi2c_imx_isr(int irq, void *dev_id)
{
struct lpi2c_imx_struct *lpi2c_imx = dev_id;
+ unsigned int enabled;
unsigned int temp;
+ enabled = readl(lpi2c_imx->base + LPI2C_MIER);
+
lpi2c_imx_intctrl(lpi2c_imx, 0);
temp = readl(lpi2c_imx->base + LPI2C_MSR);
+ temp &= enabled;
if (temp & MSR_RDF)
lpi2c_imx_read_rxfifo(lpi2c_imx);
}
static int mxs_i2c_dma_setup_xfer(struct i2c_adapter *adap,
- struct i2c_msg *msg, uint32_t flags)
+ struct i2c_msg *msg, u8 *buf, uint32_t flags)
{
struct dma_async_tx_descriptor *desc;
struct mxs_i2c_dev *i2c = i2c_get_adapdata(adap);
}
/* Queue the DMA data transfer. */
- sg_init_one(&i2c->sg_io[1], msg->buf, msg->len);
+ sg_init_one(&i2c->sg_io[1], buf, msg->len);
dma_map_sg(i2c->dev, &i2c->sg_io[1], 1, DMA_FROM_DEVICE);
desc = dmaengine_prep_slave_sg(i2c->dmach, &i2c->sg_io[1], 1,
DMA_DEV_TO_MEM,
/* Queue the DMA data transfer. */
sg_init_table(i2c->sg_io, 2);
sg_set_buf(&i2c->sg_io[0], &i2c->addr_data, 1);
- sg_set_buf(&i2c->sg_io[1], msg->buf, msg->len);
+ sg_set_buf(&i2c->sg_io[1], buf, msg->len);
dma_map_sg(i2c->dev, i2c->sg_io, 2, DMA_TO_DEVICE);
desc = dmaengine_prep_slave_sg(i2c->dmach, i2c->sg_io, 2,
DMA_MEM_TO_DEV,
struct mxs_i2c_dev *i2c = i2c_get_adapdata(adap);
int ret;
int flags;
+ u8 *dma_buf;
int use_pio = 0;
unsigned long time_left;
if (ret && (ret != -ENXIO))
mxs_i2c_reset(i2c);
} else {
+ dma_buf = i2c_get_dma_safe_msg_buf(msg, 1);
+ if (!dma_buf)
+ return -ENOMEM;
+
reinit_completion(&i2c->cmd_complete);
- ret = mxs_i2c_dma_setup_xfer(adap, msg, flags);
- if (ret)
+ ret = mxs_i2c_dma_setup_xfer(adap, msg, dma_buf, flags);
+ if (ret) {
+ i2c_put_dma_safe_msg_buf(dma_buf, msg, false);
return ret;
+ }
time_left = wait_for_completion_timeout(&i2c->cmd_complete,
msecs_to_jiffies(1000));
+ i2c_put_dma_safe_msg_buf(dma_buf, msg, true);
if (!time_left)
goto timeout;
u32 msg[3];
int rc;
+ if (writelen > I2C_SMBUS_BLOCK_MAX)
+ return -EINVAL;
+
memcpy(ctx->dma_buffer, data, writelen);
paddr = dma_map_single(ctx->dev, ctx->dma_buffer, writelen,
DMA_TO_DEVICE);
mutex_unlock(&icc_lock);
+ if (!node)
+ return;
+
+ kfree(node->links);
kfree(node);
}
EXPORT_SYMBOL_GPL(icc_node_destroy);
EXPORT_SYMBOL_GPL(icc_nodes_remove);
/**
- * icc_provider_add() - add a new interconnect provider
- * @provider: the interconnect provider that will be added into topology
+ * icc_provider_init() - initialize a new interconnect provider
+ * @provider: the interconnect provider to initialize
+ *
+ * Must be called before adding nodes to the provider.
+ */
+void icc_provider_init(struct icc_provider *provider)
+{
+ WARN_ON(!provider->set);
+
+ INIT_LIST_HEAD(&provider->nodes);
+}
+EXPORT_SYMBOL_GPL(icc_provider_init);
+
+/**
+ * icc_provider_register() - register a new interconnect provider
+ * @provider: the interconnect provider to register
*
* Return: 0 on success, or an error code otherwise
*/
-int icc_provider_add(struct icc_provider *provider)
+int icc_provider_register(struct icc_provider *provider)
{
- if (WARN_ON(!provider->set))
- return -EINVAL;
if (WARN_ON(!provider->xlate && !provider->xlate_extended))
return -EINVAL;
mutex_lock(&icc_lock);
-
- INIT_LIST_HEAD(&provider->nodes);
list_add_tail(&provider->provider_list, &icc_providers);
-
mutex_unlock(&icc_lock);
- dev_dbg(provider->dev, "interconnect provider added to topology\n");
+ dev_dbg(provider->dev, "interconnect provider registered\n");
return 0;
}
-EXPORT_SYMBOL_GPL(icc_provider_add);
+EXPORT_SYMBOL_GPL(icc_provider_register);
/**
- * icc_provider_del() - delete previously added interconnect provider
- * @provider: the interconnect provider that will be removed from topology
+ * icc_provider_deregister() - deregister an interconnect provider
+ * @provider: the interconnect provider to deregister
*/
-void icc_provider_del(struct icc_provider *provider)
+void icc_provider_deregister(struct icc_provider *provider)
{
mutex_lock(&icc_lock);
- if (provider->users) {
- pr_warn("interconnect provider still has %d users\n",
- provider->users);
- mutex_unlock(&icc_lock);
- return;
- }
-
- if (!list_empty(&provider->nodes)) {
- pr_warn("interconnect provider still has nodes\n");
- mutex_unlock(&icc_lock);
- return;
- }
+ WARN_ON(provider->users);
list_del(&provider->provider_list);
mutex_unlock(&icc_lock);
}
+EXPORT_SYMBOL_GPL(icc_provider_deregister);
+
+int icc_provider_add(struct icc_provider *provider)
+{
+ icc_provider_init(provider);
+
+ return icc_provider_register(provider);
+}
+EXPORT_SYMBOL_GPL(icc_provider_add);
+
+void icc_provider_del(struct icc_provider *provider)
+{
+ WARN_ON(!list_empty(&provider->nodes));
+
+ icc_provider_deregister(provider);
+}
EXPORT_SYMBOL_GPL(icc_provider_del);
static const struct of_device_id __maybe_unused ignore_list[] = {
provider->xlate = of_icc_xlate_onecell;
provider->data = data;
provider->dev = dev->parent;
+
+ icc_provider_init(provider);
+
platform_set_drvdata(pdev, imx_provider);
if (settings) {
}
}
- ret = icc_provider_add(provider);
- if (ret) {
- dev_err(dev, "error adding interconnect provider: %d\n", ret);
+ ret = imx_icc_register_nodes(imx_provider, nodes, nodes_count, settings);
+ if (ret)
return ret;
- }
- ret = imx_icc_register_nodes(imx_provider, nodes, nodes_count, settings);
+ ret = icc_provider_register(provider);
if (ret)
- goto provider_del;
+ goto err_unregister_nodes;
return 0;
-provider_del:
- icc_provider_del(provider);
+err_unregister_nodes:
+ imx_icc_unregister_nodes(&imx_provider->provider);
return ret;
}
EXPORT_SYMBOL_GPL(imx_icc_register);
{
struct imx_icc_provider *imx_provider = platform_get_drvdata(pdev);
+ icc_provider_deregister(&imx_provider->provider);
imx_icc_unregister_nodes(&imx_provider->provider);
-
- icc_provider_del(&imx_provider->provider);
}
EXPORT_SYMBOL_GPL(imx_icc_unregister);
}
provider = &qp->provider;
- INIT_LIST_HEAD(&provider->nodes);
provider->dev = dev;
provider->set = qcom_icc_set;
provider->pre_aggregate = qcom_icc_pre_bw_aggregate;
provider->xlate_extended = qcom_icc_xlate_extended;
provider->data = data;
- ret = icc_provider_add(provider);
- if (ret) {
- dev_err(dev, "error adding interconnect provider: %d\n", ret);
- clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
- return ret;
- }
+ icc_provider_init(provider);
for (i = 0; i < num_nodes; i++) {
size_t j;
node = icc_node_create(qnodes[i]->id);
if (IS_ERR(node)) {
ret = PTR_ERR(node);
- goto err;
+ goto err_remove_nodes;
}
node->name = qnodes[i]->name;
}
data->num_nodes = num_nodes;
+ ret = icc_provider_register(provider);
+ if (ret)
+ goto err_remove_nodes;
+
platform_set_drvdata(pdev, qp);
/* Populate child NoC devices if any */
- if (of_get_child_count(dev->of_node) > 0)
- return of_platform_populate(dev->of_node, NULL, NULL, dev);
+ if (of_get_child_count(dev->of_node) > 0) {
+ ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
+ if (ret)
+ goto err_deregister_provider;
+ }
return 0;
-err:
+
+err_deregister_provider:
+ icc_provider_deregister(provider);
+err_remove_nodes:
icc_nodes_remove(provider);
clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
- icc_provider_del(provider);
return ret;
}
{
struct qcom_icc_provider *qp = platform_get_drvdata(pdev);
+ icc_provider_deregister(&qp->provider);
icc_nodes_remove(&qp->provider);
clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
- icc_provider_del(&qp->provider);
return 0;
}
provider->pre_aggregate = qcom_icc_pre_aggregate;
provider->aggregate = qcom_icc_aggregate;
provider->xlate_extended = qcom_icc_xlate_extended;
- INIT_LIST_HEAD(&provider->nodes);
provider->data = data;
+ icc_provider_init(provider);
+
qp->dev = dev;
qp->bcms = desc->bcms;
qp->num_bcms = desc->num_bcms;
if (IS_ERR(qp->voter))
return PTR_ERR(qp->voter);
- ret = icc_provider_add(provider);
- if (ret)
- return ret;
-
for (i = 0; i < qp->num_bcms; i++)
qcom_icc_bcm_init(qp->bcms[i], dev);
node = icc_node_create(qn->id);
if (IS_ERR(node)) {
ret = PTR_ERR(node);
- goto err;
+ goto err_remove_nodes;
}
node->name = qn->name;
}
data->num_nodes = num_nodes;
+
+ ret = icc_provider_register(provider);
+ if (ret)
+ goto err_remove_nodes;
+
platform_set_drvdata(pdev, qp);
/* Populate child NoC devices if any */
- if (of_get_child_count(dev->of_node) > 0)
- return of_platform_populate(dev->of_node, NULL, NULL, dev);
+ if (of_get_child_count(dev->of_node) > 0) {
+ ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
+ if (ret)
+ goto err_deregister_provider;
+ }
return 0;
-err:
+
+err_deregister_provider:
+ icc_provider_deregister(provider);
+err_remove_nodes:
icc_nodes_remove(provider);
- icc_provider_del(provider);
+
return ret;
}
EXPORT_SYMBOL_GPL(qcom_icc_rpmh_probe);
{
struct qcom_icc_provider *qp = platform_get_drvdata(pdev);
+ icc_provider_deregister(&qp->provider);
icc_nodes_remove(&qp->provider);
- icc_provider_del(&qp->provider);
return 0;
}
return ret;
provider = &qp->provider;
- INIT_LIST_HEAD(&provider->nodes);
provider->dev = dev;
provider->set = msm8974_icc_set;
provider->aggregate = icc_std_aggregate;
provider->data = data;
provider->get_bw = msm8974_get_bw;
- ret = icc_provider_add(provider);
- if (ret) {
- dev_err(dev, "error adding interconnect provider: %d\n", ret);
- goto err_disable_clks;
- }
+ icc_provider_init(provider);
for (i = 0; i < num_nodes; i++) {
size_t j;
node = icc_node_create(qnodes[i]->id);
if (IS_ERR(node)) {
ret = PTR_ERR(node);
- goto err_del_icc;
+ goto err_remove_nodes;
}
node->name = qnodes[i]->name;
}
data->num_nodes = num_nodes;
+ ret = icc_provider_register(provider);
+ if (ret)
+ goto err_remove_nodes;
+
platform_set_drvdata(pdev, qp);
return 0;
-err_del_icc:
+err_remove_nodes:
icc_nodes_remove(provider);
- icc_provider_del(provider);
-
-err_disable_clks:
clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
return ret;
{
struct msm8974_icc_provider *qp = platform_get_drvdata(pdev);
+ icc_provider_deregister(&qp->provider);
icc_nodes_remove(&qp->provider);
clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
- icc_provider_del(&qp->provider);
return 0;
}
{
struct qcom_osm_l3_icc_provider *qp = platform_get_drvdata(pdev);
+ icc_provider_deregister(&qp->provider);
icc_nodes_remove(&qp->provider);
- icc_provider_del(&qp->provider);
return 0;
}
qnodes = desc->nodes;
num_nodes = desc->num_nodes;
- data = devm_kcalloc(&pdev->dev, num_nodes, sizeof(*node), GFP_KERNEL);
+ data = devm_kzalloc(&pdev->dev, struct_size(data, nodes, num_nodes), GFP_KERNEL);
if (!data)
return -ENOMEM;
provider->set = qcom_osm_l3_set;
provider->aggregate = icc_std_aggregate;
provider->xlate = of_icc_xlate_onecell;
- INIT_LIST_HEAD(&provider->nodes);
provider->data = data;
- ret = icc_provider_add(provider);
- if (ret) {
- dev_err(&pdev->dev, "error adding interconnect provider\n");
- return ret;
- }
+ icc_provider_init(provider);
for (i = 0; i < num_nodes; i++) {
size_t j;
}
data->num_nodes = num_nodes;
+ ret = icc_provider_register(provider);
+ if (ret)
+ goto err;
+
platform_set_drvdata(pdev, qp);
return 0;
err:
icc_nodes_remove(provider);
- icc_provider_del(provider);
return ret;
}
.name = "mas_snoc_bimc_nrt",
.buswidth = 16,
.qos.ap_owned = true,
- .qos.qos_port = 2,
+ .qos.qos_port = 3,
.qos.qos_mode = NOC_QOS_MODE_BYPASS,
- .mas_rpm_id = 163,
+ .mas_rpm_id = 164,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(mas_snoc_bimc_nrt_links),
.links = mas_snoc_bimc_nrt_links,
.num_bcms = ARRAY_SIZE(system_noc_bcms),
};
-static int qnoc_probe(struct platform_device *pdev)
-{
- const struct qcom_icc_desc *desc;
- struct icc_onecell_data *data;
- struct icc_provider *provider;
- struct qcom_icc_node * const *qnodes;
- struct qcom_icc_provider *qp;
- struct icc_node *node;
- size_t num_nodes, i;
- int ret;
-
- desc = device_get_match_data(&pdev->dev);
- if (!desc)
- return -EINVAL;
-
- qnodes = desc->nodes;
- num_nodes = desc->num_nodes;
-
- qp = devm_kzalloc(&pdev->dev, sizeof(*qp), GFP_KERNEL);
- if (!qp)
- return -ENOMEM;
-
- data = devm_kcalloc(&pdev->dev, num_nodes, sizeof(*node), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- provider = &qp->provider;
- provider->dev = &pdev->dev;
- provider->set = qcom_icc_set;
- provider->pre_aggregate = qcom_icc_pre_aggregate;
- provider->aggregate = qcom_icc_aggregate;
- provider->xlate_extended = qcom_icc_xlate_extended;
- INIT_LIST_HEAD(&provider->nodes);
- provider->data = data;
-
- qp->dev = &pdev->dev;
- qp->bcms = desc->bcms;
- qp->num_bcms = desc->num_bcms;
-
- qp->voter = of_bcm_voter_get(qp->dev, NULL);
- if (IS_ERR(qp->voter))
- return PTR_ERR(qp->voter);
-
- ret = icc_provider_add(provider);
- if (ret) {
- dev_err(&pdev->dev, "error adding interconnect provider\n");
- return ret;
- }
-
- for (i = 0; i < qp->num_bcms; i++)
- qcom_icc_bcm_init(qp->bcms[i], &pdev->dev);
-
- for (i = 0; i < num_nodes; i++) {
- size_t j;
-
- if (!qnodes[i])
- continue;
-
- node = icc_node_create(qnodes[i]->id);
- if (IS_ERR(node)) {
- ret = PTR_ERR(node);
- goto err;
- }
-
- node->name = qnodes[i]->name;
- node->data = qnodes[i];
- icc_node_add(node, provider);
-
- for (j = 0; j < qnodes[i]->num_links; j++)
- icc_link_create(node, qnodes[i]->links[j]);
-
- data->nodes[i] = node;
- }
- data->num_nodes = num_nodes;
-
- platform_set_drvdata(pdev, qp);
-
- return 0;
-err:
- icc_nodes_remove(provider);
- icc_provider_del(provider);
- return ret;
-}
-
-static int qnoc_remove(struct platform_device *pdev)
-{
- struct qcom_icc_provider *qp = platform_get_drvdata(pdev);
-
- icc_nodes_remove(&qp->provider);
- icc_provider_del(&qp->provider);
-
- return 0;
-}
-
static const struct of_device_id qnoc_of_match[] = {
{ .compatible = "qcom,sm8450-aggre1-noc",
.data = &sm8450_aggre1_noc},
MODULE_DEVICE_TABLE(of, qnoc_of_match);
static struct platform_driver qnoc_driver = {
- .probe = qnoc_probe,
- .remove = qnoc_remove,
+ .probe = qcom_icc_rpmh_probe,
+ .remove = qcom_icc_rpmh_remove,
.driver = {
.name = "qnoc-sm8450",
.of_match_table = qnoc_of_match,
.num_bcms = ARRAY_SIZE(system_noc_bcms),
};
-static int qnoc_probe(struct platform_device *pdev)
-{
- const struct qcom_icc_desc *desc;
- struct icc_onecell_data *data;
- struct icc_provider *provider;
- struct qcom_icc_node * const *qnodes;
- struct qcom_icc_provider *qp;
- struct icc_node *node;
- size_t num_nodes, i;
- int ret;
-
- desc = device_get_match_data(&pdev->dev);
- if (!desc)
- return -EINVAL;
-
- qnodes = desc->nodes;
- num_nodes = desc->num_nodes;
-
- qp = devm_kzalloc(&pdev->dev, sizeof(*qp), GFP_KERNEL);
- if (!qp)
- return -ENOMEM;
-
- data = devm_kcalloc(&pdev->dev, num_nodes, sizeof(*node), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- provider = &qp->provider;
- provider->dev = &pdev->dev;
- provider->set = qcom_icc_set;
- provider->pre_aggregate = qcom_icc_pre_aggregate;
- provider->aggregate = qcom_icc_aggregate;
- provider->xlate_extended = qcom_icc_xlate_extended;
- INIT_LIST_HEAD(&provider->nodes);
- provider->data = data;
-
- qp->dev = &pdev->dev;
- qp->bcms = desc->bcms;
- qp->num_bcms = desc->num_bcms;
-
- qp->voter = of_bcm_voter_get(qp->dev, NULL);
- if (IS_ERR(qp->voter))
- return PTR_ERR(qp->voter);
-
- ret = icc_provider_add(provider);
- if (ret) {
- dev_err_probe(&pdev->dev, ret,
- "error adding interconnect provider\n");
- return ret;
- }
-
- for (i = 0; i < qp->num_bcms; i++)
- qcom_icc_bcm_init(qp->bcms[i], &pdev->dev);
-
- for (i = 0; i < num_nodes; i++) {
- size_t j;
-
- if (!qnodes[i])
- continue;
-
- node = icc_node_create(qnodes[i]->id);
- if (IS_ERR(node)) {
- ret = PTR_ERR(node);
- goto err;
- }
-
- node->name = qnodes[i]->name;
- node->data = qnodes[i];
- icc_node_add(node, provider);
-
- for (j = 0; j < qnodes[i]->num_links; j++)
- icc_link_create(node, qnodes[i]->links[j]);
-
- data->nodes[i] = node;
- }
- data->num_nodes = num_nodes;
-
- platform_set_drvdata(pdev, qp);
-
- return 0;
-err:
- icc_nodes_remove(provider);
- icc_provider_del(provider);
- return ret;
-}
-
-static int qnoc_remove(struct platform_device *pdev)
-{
- struct qcom_icc_provider *qp = platform_get_drvdata(pdev);
-
- icc_nodes_remove(&qp->provider);
- icc_provider_del(&qp->provider);
-
- return 0;
-}
-
static const struct of_device_id qnoc_of_match[] = {
{ .compatible = "qcom,sm8550-aggre1-noc",
.data = &sm8550_aggre1_noc},
MODULE_DEVICE_TABLE(of, qnoc_of_match);
static struct platform_driver qnoc_driver = {
- .probe = qnoc_probe,
- .remove = qnoc_remove,
+ .probe = qcom_icc_rpmh_probe,
+ .remove = qcom_icc_rpmh_remove,
.driver = {
.name = "qnoc-sm8550",
.of_match_table = qnoc_of_match,
static int exynos_generic_icc_remove(struct platform_device *pdev)
{
struct exynos_icc_priv *priv = platform_get_drvdata(pdev);
- struct icc_node *parent_node, *node = priv->node;
-
- parent_node = exynos_icc_get_parent(priv->dev->parent->of_node);
- if (parent_node && !IS_ERR(parent_node))
- icc_link_destroy(node, parent_node);
+ icc_provider_deregister(&priv->provider);
icc_nodes_remove(&priv->provider);
- icc_provider_del(&priv->provider);
return 0;
}
provider->inter_set = true;
provider->data = priv;
- ret = icc_provider_add(provider);
- if (ret < 0)
- return ret;
+ icc_provider_init(provider);
icc_node = icc_node_create(pdev->id);
- if (IS_ERR(icc_node)) {
- ret = PTR_ERR(icc_node);
- goto err_prov_del;
- }
+ if (IS_ERR(icc_node))
+ return PTR_ERR(icc_node);
priv->node = icc_node;
icc_node->name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%pOFn",
&priv->bus_clk_ratio))
priv->bus_clk_ratio = EXYNOS_ICC_DEFAULT_BUS_CLK_RATIO;
+ icc_node->data = priv;
+ icc_node_add(icc_node, provider);
+
/*
* Register a PM QoS request for the parent (devfreq) device.
*/
if (ret < 0)
goto err_node_del;
- icc_node->data = priv;
- icc_node_add(icc_node, provider);
-
icc_parent_node = exynos_icc_get_parent(bus_dev->of_node);
if (IS_ERR(icc_parent_node)) {
ret = PTR_ERR(icc_parent_node);
goto err_pmqos_del;
}
+ ret = icc_provider_register(provider);
+ if (ret < 0)
+ goto err_pmqos_del;
+
return 0;
err_pmqos_del:
dev_pm_qos_remove_request(&priv->qos_req);
err_node_del:
icc_nodes_remove(provider);
-err_prov_del:
- icc_provider_del(provider);
+
return ret;
}
config BLK_DEV_MD
tristate "RAID support"
select BLOCK_HOLDER_DEPRECATED if SYSFS
+ # BLOCK_LEGACY_AUTOLOAD requirement should be removed
+ # after relevant mdadm enhancements - to make "names=yes"
+ # the default - are widely available.
+ select BLOCK_LEGACY_AUTOLOAD
help
This driver lets you combine several hard disk partitions into one
logical block device. This can be used to simply append one
struct crypt_config *cc;
struct bio *base_bio;
u8 *integrity_metadata;
- bool integrity_metadata_from_pool;
+ bool integrity_metadata_from_pool:1;
+ bool in_tasklet:1;
+
struct work_struct work;
struct tasklet_struct tasklet;
io->ctx.r.req = NULL;
io->integrity_metadata = NULL;
io->integrity_metadata_from_pool = false;
+ io->in_tasklet = false;
atomic_set(&io->io_pending, 0);
}
* our tasklet. In this case we need to delay bio_endio()
* execution to after the tasklet is done and dequeued.
*/
- if (tasklet_trylock(&io->tasklet)) {
- tasklet_unlock(&io->tasklet);
- bio_endio(base_bio);
+ if (io->in_tasklet) {
+ INIT_WORK(&io->work, kcryptd_io_bio_endio);
+ queue_work(cc->io_queue, &io->work);
return;
}
- INIT_WORK(&io->work, kcryptd_io_bio_endio);
- queue_work(cc->io_queue, &io->work);
+ bio_endio(base_bio);
}
/*
io = crypt_io_from_node(rb_first(&write_tree));
rb_erase(&io->rb_node, &write_tree);
kcryptd_io_write(io);
+ cond_resched();
} while (!RB_EMPTY_ROOT(&write_tree));
blk_finish_plug(&plug);
}
* it is being executed with irqs disabled.
*/
if (in_hardirq() || irqs_disabled()) {
+ io->in_tasklet = true;
tasklet_init(&io->tasklet, kcryptd_crypt_tasklet, (unsigned long)&io->work);
tasklet_schedule(&io->tasklet);
return;
atomic_read(&shared->in_flight[WRITE]);
}
-void dm_stats_init(struct dm_stats *stats)
+int dm_stats_init(struct dm_stats *stats)
{
int cpu;
struct dm_stats_last_position *last;
INIT_LIST_HEAD(&stats->list);
stats->precise_timestamps = false;
stats->last = alloc_percpu(struct dm_stats_last_position);
+ if (!stats->last)
+ return -ENOMEM;
+
for_each_possible_cpu(cpu) {
last = per_cpu_ptr(stats->last, cpu);
last->last_sector = (sector_t)ULLONG_MAX;
last->last_rw = UINT_MAX;
}
+
+ return 0;
}
void dm_stats_cleanup(struct dm_stats *stats)
unsigned long long duration_ns;
};
-void dm_stats_init(struct dm_stats *st);
+int dm_stats_init(struct dm_stats *st);
void dm_stats_cleanup(struct dm_stats *st);
struct mapped_device;
pt->low_water_blocks = low_water_blocks;
pt->adjusted_pf = pt->requested_pf = pf;
ti->num_flush_bios = 1;
+ ti->limit_swap_bios = true;
/*
* Only need to enable discards if the pool should pass
goto bad;
ti->num_flush_bios = 1;
+ ti->limit_swap_bios = true;
ti->flush_supported = true;
ti->accounts_remapped_io = true;
ti->per_io_data_size = sizeof(struct dm_thin_endio_hook);
sectors = io->sectors;
if (!end)
- bdev_start_io_acct(bio->bi_bdev, sectors, bio_op(bio),
- start_time);
+ bdev_start_io_acct(bio->bi_bdev, bio_op(bio), start_time);
else
- bdev_end_io_acct(bio->bi_bdev, bio_op(bio), start_time);
+ bdev_end_io_acct(bio->bi_bdev, bio_op(bio), sectors,
+ start_time);
if (static_branch_unlikely(&stats_enabled) &&
unlikely(dm_stats_used(&md->stats))) {
if (!md->pending_io)
goto bad;
- dm_stats_init(&md->stats);
+ r = dm_stats_init(&md->stats);
+ if (r < 0)
+ goto bad;
/* Populate the mapping, nobody knows we exist yet */
spin_lock(&_minor_lock);
err = kstrtouint(buf, 10, (unsigned int *)&slot);
if (err < 0)
return err;
+ if (slot < 0)
+ /* overflow */
+ return -ENOSPC;
}
if (rdev->mddev->pers && slot == -1) {
/* Setting 'slot' on an active array requires also
mddev->to_remove = &md_redundancy_group;
module_put(pers->owner);
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+
+ percpu_ref_exit(&mddev->writes_pending);
+ percpu_ref_exit(&mddev->active_io);
+ bioset_exit(&mddev->bio_set);
+ bioset_exit(&mddev->sync_set);
}
void md_stop(struct mddev *mddev)
*/
__md_stop_writes(mddev);
__md_stop(mddev);
- percpu_ref_exit(&mddev->writes_pending);
- percpu_ref_exit(&mddev->active_io);
- bioset_exit(&mddev->bio_set);
- bioset_exit(&mddev->sync_set);
}
EXPORT_SYMBOL_GPL(md_stop);
{
struct mddev *mddev = disk->private_data;
- percpu_ref_exit(&mddev->writes_pending);
- percpu_ref_exit(&mddev->active_io);
- bioset_exit(&mddev->bio_set);
- bioset_exit(&mddev->sync_set);
-
mddev_free(mddev);
}
do {
if (code == m5mols_default_ffmt[type].code)
return type;
- } while (type++ != SIZE_DEFAULT_FFMT);
+ } while (++type != SIZE_DEFAULT_FFMT);
return 0;
}
mc->provider.aggregate = mc->soc->icc_ops->aggregate;
mc->provider.xlate_extended = mc->soc->icc_ops->xlate_extended;
- err = icc_provider_add(&mc->provider);
- if (err)
- return err;
+ icc_provider_init(&mc->provider);
/* create Memory Controller node */
node = icc_node_create(TEGRA_ICC_MC);
- if (IS_ERR(node)) {
- err = PTR_ERR(node);
- goto del_provider;
- }
+ if (IS_ERR(node))
+ return PTR_ERR(node);
node->name = "Memory Controller";
icc_node_add(node, &mc->provider);
goto remove_nodes;
}
+ err = icc_provider_register(&mc->provider);
+ if (err)
+ goto remove_nodes;
+
return 0;
remove_nodes:
icc_nodes_remove(&mc->provider);
-del_provider:
- icc_provider_del(&mc->provider);
return err;
}
emc->provider.aggregate = soc->icc_ops->aggregate;
emc->provider.xlate_extended = emc_of_icc_xlate_extended;
- err = icc_provider_add(&emc->provider);
- if (err)
- goto err_msg;
+ icc_provider_init(&emc->provider);
/* create External Memory Controller node */
node = icc_node_create(TEGRA_ICC_EMC);
if (IS_ERR(node)) {
err = PTR_ERR(node);
- goto del_provider;
+ goto err_msg;
}
node->name = "External Memory Controller";
node->name = "External Memory (DRAM)";
icc_node_add(node, &emc->provider);
+ err = icc_provider_register(&emc->provider);
+ if (err)
+ goto remove_nodes;
+
return 0;
remove_nodes:
icc_nodes_remove(&emc->provider);
-del_provider:
- icc_provider_del(&emc->provider);
err_msg:
dev_err(emc->dev, "failed to initialize ICC: %d\n", err);
emc->provider.aggregate = soc->icc_ops->aggregate;
emc->provider.xlate_extended = emc_of_icc_xlate_extended;
- err = icc_provider_add(&emc->provider);
- if (err)
- goto err_msg;
+ icc_provider_init(&emc->provider);
/* create External Memory Controller node */
node = icc_node_create(TEGRA_ICC_EMC);
if (IS_ERR(node)) {
err = PTR_ERR(node);
- goto del_provider;
+ goto err_msg;
}
node->name = "External Memory Controller";
node->name = "External Memory (DRAM)";
icc_node_add(node, &emc->provider);
+ err = icc_provider_register(&emc->provider);
+ if (err)
+ goto remove_nodes;
+
return 0;
remove_nodes:
icc_nodes_remove(&emc->provider);
-del_provider:
- icc_provider_del(&emc->provider);
err_msg:
dev_err(emc->dev, "failed to initialize ICC: %d\n", err);
emc->provider.aggregate = soc->icc_ops->aggregate;
emc->provider.xlate_extended = emc_of_icc_xlate_extended;
- err = icc_provider_add(&emc->provider);
- if (err)
- goto err_msg;
+ icc_provider_init(&emc->provider);
/* create External Memory Controller node */
node = icc_node_create(TEGRA_ICC_EMC);
if (IS_ERR(node)) {
err = PTR_ERR(node);
- goto del_provider;
+ goto err_msg;
}
node->name = "External Memory Controller";
node->name = "External Memory (DRAM)";
icc_node_add(node, &emc->provider);
+ err = icc_provider_register(&emc->provider);
+ if (err)
+ goto remove_nodes;
+
return 0;
remove_nodes:
icc_nodes_remove(&emc->provider);
-del_provider:
- icc_provider_del(&emc->provider);
err_msg:
dev_err(emc->dev, "failed to initialize ICC: %d\n", err);
struct dw_mci *host = slot->host;
struct starfive_priv *priv = host->priv;
int rise_point = -1, fall_point = -1;
- int err, prev_err;
+ int err, prev_err = 0;
int i;
bool found = 0;
u32 regval;
MAX_POWER_ON_TIMEOUT, false, host, val,
reg);
if (ret)
- dev_warn(mmc_dev(host->mmc), "Power on failed\n");
+ dev_info(mmc_dev(host->mmc), "Power on failed\n");
}
}
slave_err(bond_dev, slave_dev, "Error: %s\n", errmsg); \
} while (0)
+/* The bonding driver uses ether_setup() to convert a master bond device
+ * to ARPHRD_ETHER, that resets the target netdevice's flags so we always
+ * have to restore the IFF_MASTER flag, and only restore IFF_SLAVE if it was set
+ */
+static void bond_ether_setup(struct net_device *bond_dev)
+{
+ unsigned int slave_flag = bond_dev->flags & IFF_SLAVE;
+
+ ether_setup(bond_dev);
+ bond_dev->flags |= IFF_MASTER | slave_flag;
+ bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+}
+
/* enslave device <slave> to bond device <master> */
int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
struct netlink_ext_ack *extack)
if (slave_dev->type != ARPHRD_ETHER)
bond_setup_by_slave(bond_dev, slave_dev);
- else {
- ether_setup(bond_dev);
- bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
- }
+ else
+ bond_ether_setup(bond_dev);
call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE,
bond_dev);
eth_hw_addr_random(bond_dev);
if (bond_dev->type != ARPHRD_ETHER) {
dev_close(bond_dev);
- ether_setup(bond_dev);
- bond_dev->flags |= IFF_MASTER;
- bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+ bond_ether_setup(bond_dev);
}
}
if (priv->can.clock.freq > 8000000)
priv->cpu_interface |= CPUIF_DMC;
- if (of_get_property(np, "bosch,divide-memory-clock", NULL))
+ if (of_property_read_bool(np, "bosch,divide-memory-clock"))
priv->cpu_interface |= CPUIF_DMC;
- if (of_get_property(np, "bosch,iso-low-speed-mux", NULL))
+ if (of_property_read_bool(np, "bosch,iso-low-speed-mux"))
priv->cpu_interface |= CPUIF_MUX;
if (!of_get_property(np, "bosch,no-comperator-bypass", NULL))
priv->bus_config |= BUSCFG_CBY;
- if (of_get_property(np, "bosch,disconnect-rx0-input", NULL))
+ if (of_property_read_bool(np, "bosch,disconnect-rx0-input"))
priv->bus_config |= BUSCFG_DR0;
- if (of_get_property(np, "bosch,disconnect-rx1-input", NULL))
+ if (of_property_read_bool(np, "bosch,disconnect-rx1-input"))
priv->bus_config |= BUSCFG_DR1;
- if (of_get_property(np, "bosch,disconnect-tx1-output", NULL))
+ if (of_property_read_bool(np, "bosch,disconnect-tx1-output"))
priv->bus_config |= BUSCFG_DT1;
- if (of_get_property(np, "bosch,polarity-dominant", NULL))
+ if (of_property_read_bool(np, "bosch,polarity-dominant"))
priv->bus_config |= BUSCFG_POL;
prop = of_get_property(np, "bosch,clock-out-frequency", &prop_size);
if (of_property_read_u32(of_port, "reg", ®))
continue;
- if (reg < B53_CPU_PORT)
+ if (reg < B53_N_PORTS)
pdata->enabled_ports |= BIT(reg);
}
[S_BROADCAST_CTRL] = 0x06,
[S_MULTICAST_CTRL] = 0x04,
[P_XMII_CTRL_0] = 0x06,
- [P_XMII_CTRL_1] = 0x56,
+ [P_XMII_CTRL_1] = 0x06,
};
static const u32 ksz8795_masks[] = {
/* Set up switch core clock for MT7530 */
static void mt7530_pll_setup(struct mt7530_priv *priv)
{
+ /* Disable core clock */
+ core_clear(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN);
+
/* Disable PLL */
core_write(priv, CORE_GSWPLL_GRP1, 0);
RG_GSWPLL_EN_PRE |
RG_GSWPLL_POSDIV_200M(2) |
RG_GSWPLL_FBKDIV_200M(32));
+
+ udelay(20);
+
+ /* Enable core clock */
+ core_set(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN);
}
-/* Setup TX circuit including relevant PAD and driving */
+/* Setup port 6 interface mode and TRGMII TX circuit */
static int
mt7530_pad_clk_setup(struct dsa_switch *ds, phy_interface_t interface)
{
struct mt7530_priv *priv = ds->priv;
- u32 ncpo1, ssc_delta, trgint, i, xtal;
+ u32 ncpo1, ssc_delta, trgint, xtal;
xtal = mt7530_read(priv, MT7530_MHWTRAP) & HWTRAP_XTAL_MASK;
switch (interface) {
case PHY_INTERFACE_MODE_RGMII:
trgint = 0;
- /* PLL frequency: 125MHz */
- ncpo1 = 0x0c80;
break;
case PHY_INTERFACE_MODE_TRGMII:
trgint = 1;
+ if (xtal == HWTRAP_XTAL_25MHZ)
+ ssc_delta = 0x57;
+ else
+ ssc_delta = 0x87;
if (priv->id == ID_MT7621) {
/* PLL frequency: 150MHz: 1.2GBit */
if (xtal == HWTRAP_XTAL_40MHZ)
return -EINVAL;
}
- if (xtal == HWTRAP_XTAL_25MHZ)
- ssc_delta = 0x57;
- else
- ssc_delta = 0x87;
-
mt7530_rmw(priv, MT7530_P6ECR, P6_INTF_MODE_MASK,
P6_INTF_MODE(trgint));
- /* Lower Tx Driving for TRGMII path */
- for (i = 0 ; i < NUM_TRGMII_CTRL ; i++)
- mt7530_write(priv, MT7530_TRGMII_TD_ODT(i),
- TD_DM_DRVP(8) | TD_DM_DRVN(8));
+ if (trgint) {
+ /* Disable the MT7530 TRGMII clocks */
+ core_clear(priv, CORE_TRGMII_GSW_CLK_CG, REG_TRGMIICK_EN);
+
+ /* Setup the MT7530 TRGMII Tx Clock */
+ core_write(priv, CORE_PLL_GROUP5, RG_LCDDS_PCW_NCPO1(ncpo1));
+ core_write(priv, CORE_PLL_GROUP6, RG_LCDDS_PCW_NCPO0(0));
+ core_write(priv, CORE_PLL_GROUP10, RG_LCDDS_SSC_DELTA(ssc_delta));
+ core_write(priv, CORE_PLL_GROUP11, RG_LCDDS_SSC_DELTA1(ssc_delta));
+ core_write(priv, CORE_PLL_GROUP4,
+ RG_SYSPLL_DDSFBK_EN | RG_SYSPLL_BIAS_EN |
+ RG_SYSPLL_BIAS_LPF_EN);
+ core_write(priv, CORE_PLL_GROUP2,
+ RG_SYSPLL_EN_NORMAL | RG_SYSPLL_VODEN |
+ RG_SYSPLL_POSDIV(1));
+ core_write(priv, CORE_PLL_GROUP7,
+ RG_LCDDS_PCW_NCPO_CHG | RG_LCCDS_C(3) |
+ RG_LCDDS_PWDB | RG_LCDDS_ISO_EN);
+
+ /* Enable the MT7530 TRGMII clocks */
+ core_set(priv, CORE_TRGMII_GSW_CLK_CG, REG_TRGMIICK_EN);
+ }
- /* Disable MT7530 core and TRGMII Tx clocks */
- core_clear(priv, CORE_TRGMII_GSW_CLK_CG,
- REG_GSWCK_EN | REG_TRGMIICK_EN);
-
- /* Setup the MT7530 TRGMII Tx Clock */
- core_write(priv, CORE_PLL_GROUP5, RG_LCDDS_PCW_NCPO1(ncpo1));
- core_write(priv, CORE_PLL_GROUP6, RG_LCDDS_PCW_NCPO0(0));
- core_write(priv, CORE_PLL_GROUP10, RG_LCDDS_SSC_DELTA(ssc_delta));
- core_write(priv, CORE_PLL_GROUP11, RG_LCDDS_SSC_DELTA1(ssc_delta));
- core_write(priv, CORE_PLL_GROUP4,
- RG_SYSPLL_DDSFBK_EN | RG_SYSPLL_BIAS_EN |
- RG_SYSPLL_BIAS_LPF_EN);
- core_write(priv, CORE_PLL_GROUP2,
- RG_SYSPLL_EN_NORMAL | RG_SYSPLL_VODEN |
- RG_SYSPLL_POSDIV(1));
- core_write(priv, CORE_PLL_GROUP7,
- RG_LCDDS_PCW_NCPO_CHG | RG_LCCDS_C(3) |
- RG_LCDDS_PWDB | RG_LCDDS_ISO_EN);
-
- /* Enable MT7530 core and TRGMII Tx clocks */
- core_set(priv, CORE_TRGMII_GSW_CLK_CG,
- REG_GSWCK_EN | REG_TRGMIICK_EN);
-
- if (!trgint)
- for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
- mt7530_rmw(priv, MT7530_TRGMII_RD(i),
- RD_TAP_MASK, RD_TAP(16));
return 0;
}
mt7530_pll_setup(priv);
- /* Enable Port 6 only; P5 as GMAC5 which currently is not supported */
+ /* Lower Tx driving for TRGMII path */
+ for (i = 0; i < NUM_TRGMII_CTRL; i++)
+ mt7530_write(priv, MT7530_TRGMII_TD_ODT(i),
+ TD_DM_DRVP(8) | TD_DM_DRVN(8));
+
+ for (i = 0; i < NUM_TRGMII_CTRL; i++)
+ mt7530_rmw(priv, MT7530_TRGMII_RD(i),
+ RD_TAP_MASK, RD_TAP(16));
+
+ /* Enable port 6 */
val = mt7530_read(priv, MT7530_MHWTRAP);
val &= ~MHWTRAP_P6_DIS & ~MHWTRAP_PHY_ACCESS;
val |= MHWTRAP_MANUAL;
return 10240 - VLAN_ETH_HLEN - EDSA_HLEN - ETH_FCS_LEN;
else if (chip->info->ops->set_max_frame_size)
return 1632 - VLAN_ETH_HLEN - EDSA_HLEN - ETH_FCS_LEN;
- return 1522 - VLAN_ETH_HLEN - EDSA_HLEN - ETH_FCS_LEN;
+ return ETH_DATA_LEN;
}
static int mv88e6xxx_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
struct mv88e6xxx_chip *chip = ds->priv;
int ret = 0;
+ /* For families where we don't know how to alter the MTU,
+ * just accept any value up to ETH_DATA_LEN
+ */
+ if (!chip->info->ops->port_set_jumbo_size &&
+ !chip->info->ops->set_max_frame_size) {
+ if (new_mtu > ETH_DATA_LEN)
+ return -EINVAL;
+
+ return 0;
+ }
+
if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port))
new_mtu += EDSA_HLEN;
ret = chip->info->ops->port_set_jumbo_size(chip, port, new_mtu);
else if (chip->info->ops->set_max_frame_size)
ret = chip->info->ops->set_max_frame_size(chip, new_mtu);
- else
- if (new_mtu > 1522)
- ret = -EINVAL;
mv88e6xxx_reg_unlock(chip);
return ret;
struct ena_adapter *adapter = netdev_priv(netdev);
u32 count = channels->combined_count;
/* The check for max value is already done in ethtool */
- if (count < ENA_MIN_NUM_IO_QUEUES ||
- (ena_xdp_present(adapter) &&
- !ena_xdp_legal_queue_count(adapter, count)))
+ if (count < ENA_MIN_NUM_IO_QUEUES)
return -EINVAL;
+ if (!ena_xdp_legal_queue_count(adapter, count)) {
+ if (ena_xdp_present(adapter))
+ return -EINVAL;
+
+ xdp_clear_features_flag(netdev);
+ } else {
+ xdp_set_features_flag(netdev,
+ NETDEV_XDP_ACT_BASIC |
+ NETDEV_XDP_ACT_REDIRECT);
+ }
+
return ena_update_queue_count(adapter, count);
}
/* Set offload features */
ena_set_dev_offloads(feat, netdev);
- netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT;
-
adapter->max_mtu = feat->dev_attr.max_mtu;
netdev->max_mtu = adapter->max_mtu;
netdev->min_mtu = ENA_MIN_MTU;
ena_config_debug_area(adapter);
+ if (ena_xdp_legal_queue_count(adapter, adapter->num_io_queues))
+ netdev->xdp_features = NETDEV_XDP_ACT_BASIC |
+ NETDEV_XDP_ACT_REDIRECT;
+
memcpy(adapter->netdev->perm_addr, adapter->mac_addr, netdev->addr_len);
netif_carrier_off(netdev);
return num_frames - drop;
}
+static struct sk_buff *aq_xdp_build_skb(struct xdp_buff *xdp,
+ struct net_device *dev,
+ struct aq_ring_buff_s *buff)
+{
+ struct xdp_frame *xdpf;
+ struct sk_buff *skb;
+
+ xdpf = xdp_convert_buff_to_frame(xdp);
+ if (unlikely(!xdpf))
+ return NULL;
+
+ skb = xdp_build_skb_from_frame(xdpf, dev);
+ if (!skb)
+ return NULL;
+
+ aq_get_rxpages_xdp(buff, xdp);
+ return skb;
+}
+
static struct sk_buff *aq_xdp_run_prog(struct aq_nic_s *aq_nic,
struct xdp_buff *xdp,
struct aq_ring_s *rx_ring,
prog = READ_ONCE(rx_ring->xdp_prog);
if (!prog)
- goto pass;
+ return aq_xdp_build_skb(xdp, aq_nic->ndev, buff);
prefetchw(xdp->data_hard_start); /* xdp_frame write */
act = bpf_prog_run_xdp(prog, xdp);
switch (act) {
case XDP_PASS:
-pass:
- xdpf = xdp_convert_buff_to_frame(xdp);
- if (unlikely(!xdpf))
- goto out_aborted;
- skb = xdp_build_skb_from_frame(xdpf, aq_nic->ndev);
+ skb = aq_xdp_build_skb(xdp, aq_nic->ndev, buff);
if (!skb)
goto out_aborted;
u64_stats_update_begin(&rx_ring->stats.rx.syncp);
++rx_ring->stats.rx.xdp_pass;
u64_stats_update_end(&rx_ring->stats.rx.syncp);
- aq_get_rxpages_xdp(buff, xdp);
return skb;
case XDP_TX:
xdpf = xdp_convert_buff_to_frame(xdp);
if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED)
bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT;
}
- if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST)) {
+ if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST))
bp->flags |= BNXT_FLAG_MULTI_HOST;
- if (bp->fw_cap & BNXT_FW_CAP_PTP_RTC)
- bp->fw_cap &= ~BNXT_FW_CAP_PTP_RTC;
- }
+
if (flags & FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED)
bp->fw_cap |= BNXT_FW_CAP_RING_MONITOR;
u32 fw_dbg_cap;
#define BNXT_NEW_RM(bp) ((bp)->fw_cap & BNXT_FW_CAP_NEW_RM)
+#define BNXT_PTP_USE_RTC(bp) (!BNXT_MH(bp) && \
+ ((bp)->fw_cap & BNXT_FW_CAP_PTP_RTC))
u32 hwrm_spec_code;
u16 hwrm_cmd_seq;
u16 hwrm_cmd_kong_seq;
ptp_info);
u64 ns = timespec64_to_ns(ts);
- if (ptp->bp->fw_cap & BNXT_FW_CAP_PTP_RTC)
+ if (BNXT_PTP_USE_RTC(ptp->bp))
return bnxt_ptp_cfg_settime(ptp->bp, ns);
spin_lock_bh(&ptp->ptp_lock);
struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg,
ptp_info);
- if (ptp->bp->fw_cap & BNXT_FW_CAP_PTP_RTC)
+ if (BNXT_PTP_USE_RTC(ptp->bp))
return bnxt_ptp_adjphc(ptp, delta);
spin_lock_bh(&ptp->ptp_lock);
return 0;
}
+static int bnxt_ptp_adjfine_rtc(struct bnxt *bp, long scaled_ppm)
+{
+ s32 ppb = scaled_ppm_to_ppb(scaled_ppm);
+ struct hwrm_port_mac_cfg_input *req;
+ int rc;
+
+ rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_CFG);
+ if (rc)
+ return rc;
+
+ req->ptp_freq_adj_ppb = cpu_to_le32(ppb);
+ req->enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_PTP_FREQ_ADJ_PPB);
+ rc = hwrm_req_send(bp, req);
+ if (rc)
+ netdev_err(bp->dev,
+ "ptp adjfine failed. rc = %d\n", rc);
+ return rc;
+}
+
static int bnxt_ptp_adjfine(struct ptp_clock_info *ptp_info, long scaled_ppm)
{
struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg,
ptp_info);
- struct hwrm_port_mac_cfg_input *req;
struct bnxt *bp = ptp->bp;
- int rc = 0;
- if (!(ptp->bp->fw_cap & BNXT_FW_CAP_PTP_RTC)) {
- spin_lock_bh(&ptp->ptp_lock);
- timecounter_read(&ptp->tc);
- ptp->cc.mult = adjust_by_scaled_ppm(ptp->cmult, scaled_ppm);
- spin_unlock_bh(&ptp->ptp_lock);
- } else {
- s32 ppb = scaled_ppm_to_ppb(scaled_ppm);
-
- rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_CFG);
- if (rc)
- return rc;
+ if (BNXT_PTP_USE_RTC(bp))
+ return bnxt_ptp_adjfine_rtc(bp, scaled_ppm);
- req->ptp_freq_adj_ppb = cpu_to_le32(ppb);
- req->enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_PTP_FREQ_ADJ_PPB);
- rc = hwrm_req_send(ptp->bp, req);
- if (rc)
- netdev_err(ptp->bp->dev,
- "ptp adjfine failed. rc = %d\n", rc);
- }
- return rc;
+ spin_lock_bh(&ptp->ptp_lock);
+ timecounter_read(&ptp->tc);
+ ptp->cc.mult = adjust_by_scaled_ppm(ptp->cmult, scaled_ppm);
+ spin_unlock_bh(&ptp->ptp_lock);
+ return 0;
}
void bnxt_ptp_pps_event(struct bnxt *bp, u32 data1, u32 data2)
u64 ns;
int rc;
- if (!bp->ptp_cfg || !(bp->fw_cap & BNXT_FW_CAP_PTP_RTC))
+ if (!bp->ptp_cfg || !BNXT_PTP_USE_RTC(bp))
return -ENODEV;
if (!phc_cfg) {
atomic_set(&ptp->tx_avail, BNXT_MAX_TX_TS);
spin_lock_init(&ptp->ptp_lock);
- if (bp->fw_cap & BNXT_FW_CAP_PTP_RTC) {
+ if (BNXT_PTP_USE_RTC(bp)) {
bnxt_ptp_timecounter_init(bp, false);
rc = bnxt_ptp_init_rtc(bp, phc_cfg);
if (rc)
goto out;
} else {
bnxt_ptp_timecounter_init(bp, true);
+ bnxt_ptp_adjfine_rtc(bp, 0);
}
ptp->ptp_info = bnxt_ptp_caps;
bp->jumbo_max_len = macb_config->jumbo_max_len;
bp->wol = 0;
- if (of_get_property(np, "magic-packet", NULL))
+ if (of_property_read_bool(np, "magic-packet"))
bp->wol |= MACB_WOL_HAS_MAGIC_PACKET;
device_set_wakeup_capable(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET);
if (channel->tx_count > nic->max_queues)
return -EINVAL;
- if (nic->xdp_prog &&
- ((channel->tx_count + channel->rx_count) > nic->max_queues)) {
- netdev_err(nic->netdev,
- "XDP mode, RXQs + TXQs > Max %d\n",
- nic->max_queues);
- return -EINVAL;
+ if (channel->tx_count + channel->rx_count > nic->max_queues) {
+ if (nic->xdp_prog) {
+ netdev_err(nic->netdev,
+ "XDP mode, RXQs + TXQs > Max %d\n",
+ nic->max_queues);
+ return -EINVAL;
+ }
+
+ xdp_clear_features_flag(nic->netdev);
+ } else if (!pass1_silicon(nic->pdev)) {
+ xdp_set_features_flag(dev, NETDEV_XDP_ACT_BASIC);
}
if (if_up)
netdev->netdev_ops = &nicvf_netdev_ops;
netdev->watchdog_timeo = NICVF_TX_TIMEOUT;
- netdev->xdp_features = NETDEV_XDP_ACT_BASIC;
+ if (!pass1_silicon(nic->pdev) &&
+ nic->rx_queues + nic->tx_queues <= nic->max_queues)
+ netdev->xdp_features = NETDEV_XDP_ACT_BASIC;
/* MTU range: 64 - 9200 */
netdev->min_mtu = NIC_HW_MIN_FRS;
if (!pdata)
return ERR_PTR(-ENOMEM);
- if (of_find_property(np, "davicom,ext-phy", NULL))
+ if (of_property_read_bool(np, "davicom,ext-phy"))
pdata->flags |= DM9000_PLATF_EXT_PHY;
- if (of_find_property(np, "davicom,no-eeprom", NULL))
+ if (of_property_read_bool(np, "davicom,no-eeprom"))
pdata->flags |= DM9000_PLATF_NO_EEPROM;
ret = of_get_mac_address(np, pdata->dev_addr);
};
static void enetc_rmon_stats(struct enetc_hw *hw, int mac,
- struct ethtool_rmon_stats *s,
- const struct ethtool_rmon_hist_range **ranges)
+ struct ethtool_rmon_stats *s)
{
s->undersize_pkts = enetc_port_rd(hw, ENETC_PM_RUND(mac));
s->oversize_pkts = enetc_port_rd(hw, ENETC_PM_ROVR(mac));
s->hist_tx[4] = enetc_port_rd(hw, ENETC_PM_T1023(mac));
s->hist_tx[5] = enetc_port_rd(hw, ENETC_PM_T1522(mac));
s->hist_tx[6] = enetc_port_rd(hw, ENETC_PM_T1523X(mac));
-
- *ranges = enetc_rmon_ranges;
}
static void enetc_get_eth_mac_stats(struct net_device *ndev,
struct enetc_hw *hw = &priv->si->hw;
struct enetc_si *si = priv->si;
+ *ranges = enetc_rmon_ranges;
+
switch (rmon_stats->src) {
case ETHTOOL_MAC_STATS_SRC_EMAC:
- enetc_rmon_stats(hw, 0, rmon_stats, ranges);
+ enetc_rmon_stats(hw, 0, rmon_stats);
break;
case ETHTOOL_MAC_STATS_SRC_PMAC:
if (si->hw_features & ENETC_SI_F_QBU)
- enetc_rmon_stats(hw, 1, rmon_stats, ranges);
+ enetc_rmon_stats(hw, 1, rmon_stats);
break;
case ETHTOOL_MAC_STATS_SRC_AGGREGATE:
ethtool_aggregate_rmon_stats(ndev, rmon_stats);
if (ret)
goto failed_ipc_init;
- if (of_get_property(np, "fsl,magic-packet", NULL))
+ if (of_property_read_bool(np, "fsl,magic-packet"))
fep->wol_flag |= FEC_WOL_HAS_MAGIC_PACKET;
ret = fec_enet_init_stop_mode(fep, np);
priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
/* the 7-wire property means don't use MII mode */
- if (of_find_property(np, "fsl,7-wire-mode", NULL)) {
+ if (of_property_read_bool(np, "fsl,7-wire-mode")) {
priv->seven_wire_mode = 1;
dev_info(&ndev->dev, "using 7-wire PHY mode\n");
}
else
priv->interface = gfar_get_interface(dev);
- if (of_find_property(np, "fsl,magic-packet", NULL))
+ if (of_property_read_bool(np, "fsl,magic-packet"))
priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET;
- if (of_get_property(np, "fsl,wake-on-filer", NULL))
+ if (of_property_read_bool(np, "fsl,wake-on-filer"))
priv->device_flags |= FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER;
priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
struct ethtool_link_ksettings *cmd)
{
struct gve_priv *priv = netdev_priv(netdev);
- int err = gve_adminq_report_link_speed(priv);
+ int err = 0;
+
+ if (priv->link_speed == 0)
+ err = gve_adminq_report_link_speed(priv);
cmd->base.speed = priv->link_speed;
return err;
void __iomem *mpu_addr;
void __iomem *ca_addr;
u8 __iomem *eth_addr;
+ u8 mac[ETH_ALEN];
res = platform_get_resource(dev, IORESOURCE_MEM, 0);
ca = platform_get_resource(dev, IORESOURCE_MEM, 1);
goto probe_failed;
/* someone seems to like messed up stuff */
- netdevice->dev_addr[0] = readb(eth_addr + 0x0b);
- netdevice->dev_addr[1] = readb(eth_addr + 0x0a);
- netdevice->dev_addr[2] = readb(eth_addr + 0x09);
- netdevice->dev_addr[3] = readb(eth_addr + 0x08);
- netdevice->dev_addr[4] = readb(eth_addr + 0x07);
- netdevice->dev_addr[5] = readb(eth_addr + 0x06);
+ mac[0] = readb(eth_addr + 0x0b);
+ mac[1] = readb(eth_addr + 0x0a);
+ mac[2] = readb(eth_addr + 0x09);
+ mac[3] = readb(eth_addr + 0x08);
+ mac[4] = readb(eth_addr + 0x07);
+ mac[5] = readb(eth_addr + 0x06);
+ eth_hw_addr_set(netdevice, mac);
iounmap(eth_addr);
if (netdevice->irq < 0) {
}
/* Fixup some feature bits based on the device tree */
- if (of_get_property(np, "has-inverted-stacr-oc", NULL))
+ if (of_property_read_bool(np, "has-inverted-stacr-oc"))
dev->features |= EMAC_FTR_STACR_OC_INVERT;
- if (of_get_property(np, "has-new-stacr-staopc", NULL))
+ if (of_property_read_bool(np, "has-new-stacr-staopc"))
dev->features |= EMAC_FTR_HAS_NEW_STACR;
/* CAB lacks the appropriate properties */
* property here for now, but new flat device trees should set a
* status property to "disabled" instead.
*/
- if (of_get_property(np, "unused", NULL) || !of_device_is_available(np))
+ if (of_property_read_bool(np, "unused") || !of_device_is_available(np))
return -ENODEV;
/* Find ourselves in the bootlist if we are there */
if (of_match_node(emac_match, np) == NULL)
continue;
- if (of_get_property(np, "unused", NULL))
+ if (of_property_read_bool(np, "unused"))
continue;
idx = of_get_property(np, "cell-index", NULL);
if (idx == NULL)
}
/* Check for RGMII flags */
- if (of_get_property(ofdev->dev.of_node, "has-mdio", NULL))
+ if (of_property_read_bool(ofdev->dev.of_node, "has-mdio"))
dev->flags |= EMAC_RGMII_FLAG_HAS_MDIO;
/* CAB lacks the right properties, fix this up */
int err;
int v_idx;
+ pci_set_drvdata(pf->pdev, pf);
pci_save_state(pf->pdev);
/* set up periodic task facility */
struct i40e_fdir_filter *data)
{
bool is_vlan = !!data->vlan_tag;
- struct vlan_hdr vlan;
- struct ipv6hdr ipv6;
- struct ethhdr eth;
- struct iphdr ip;
+ struct vlan_hdr vlan = {};
+ struct ipv6hdr ipv6 = {};
+ struct ethhdr eth = {};
+ struct iphdr ip = {};
u8 *tmp;
if (ipv4) {
/* Non Tunneled IPv6 */
IAVF_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3),
IAVF_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3),
- IAVF_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY3),
+ IAVF_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4),
IAVF_PTT_UNUSED_ENTRY(91),
IAVF_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4),
IAVF_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4),
{
struct iavf_adapter *adapter = netdev_priv(netdev);
+ /* Do not track VLAN 0 filter, always added by the PF on VF init */
+ if (!vid)
+ return 0;
+
if (!VLAN_FILTERING_ALLOWED(adapter))
return -EIO;
{
struct iavf_adapter *adapter = netdev_priv(netdev);
+ /* We do not track VLAN 0 filter */
+ if (!vid)
+ return 0;
+
iavf_del_vlan(adapter, IAVF_VLAN(vid, be16_to_cpu(proto)));
if (proto == cpu_to_be16(ETH_P_8021Q))
clear_bit(vid, adapter->vsi.active_cvlans);
mutex_unlock(&adapter->crit_lock);
break;
}
+ /* Simply return if we already went through iavf_shutdown */
+ if (adapter->state == __IAVF_REMOVE) {
+ mutex_unlock(&adapter->crit_lock);
+ return;
+ }
mutex_unlock(&adapter->crit_lock);
usleep_range(500, 1000);
cpu_to_le64((u64)IAVF_RX_DESC_FLTSTAT_RSS_HASH <<
IAVF_RX_DESC_STATUS_FLTSTAT_SHIFT);
- if (ring->netdev->features & NETIF_F_RXHASH)
+ if (!(ring->netdev->features & NETIF_F_RXHASH))
return;
if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
list_for_each_entry(f, &adapter->vlan_filter_list, list) {
if (f->is_new_vlan) {
f->is_new_vlan = false;
- if (!f->vlan.vid)
- continue;
if (f->vlan.tpid == ETH_P_8021Q)
set_bit(f->vlan.vid,
adapter->vsi.active_cvlans);
ICE_FLAG_VF_VLAN_PRUNING,
ICE_FLAG_LINK_LENIENT_MODE_ENA,
ICE_FLAG_PLUG_AUX_DEV,
+ ICE_FLAG_UNPLUG_AUX_DEV,
ICE_FLAG_MTU_CHANGED,
ICE_FLAG_GNSS, /* GNSS successfully initialized */
ICE_PF_FLAGS_NBITS /* must be last */
*/
static inline void ice_clear_rdma_cap(struct ice_pf *pf)
{
- /* We can directly unplug aux device here only if the flag bit
- * ICE_FLAG_PLUG_AUX_DEV is not set because ice_unplug_aux_dev()
- * could race with ice_plug_aux_dev() called from
- * ice_service_task(). In this case we only clear that bit now and
- * aux device will be unplugged later once ice_plug_aux_device()
- * called from ice_service_task() finishes (see ice_service_task()).
+ /* defer unplug to service task to avoid RTNL lock and
+ * clear PLUG bit so that pending plugs don't interfere
*/
- if (!test_and_clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags))
- ice_unplug_aux_dev(pf);
-
+ clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags);
+ set_bit(ICE_FLAG_UNPLUG_AUX_DEV, pf->flags);
clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
}
#endif /* _ICE_H_ */
struct ice_vsi_ctx *ctxt;
int status;
+ ice_fltr_remove_all(vsi);
ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
if (!ctxt)
return;
!test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags))
ice_cfg_sw_lldp(vsi, false, false);
- ice_fltr_remove_all(vsi);
ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
err = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx);
if (err)
}
}
- if (test_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags)) {
- /* Plug aux device per request */
- ice_plug_aux_dev(pf);
+ /* unplug aux dev per request, if an unplug request came in
+ * while processing a plug request, this will handle it
+ */
+ if (test_and_clear_bit(ICE_FLAG_UNPLUG_AUX_DEV, pf->flags))
+ ice_unplug_aux_dev(pf);
- /* Mark plugging as done but check whether unplug was
- * requested during ice_plug_aux_dev() call
- * (e.g. from ice_clear_rdma_cap()) and if so then
- * plug aux device.
- */
- if (!test_and_clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags))
- ice_unplug_aux_dev(pf);
- }
+ /* Plug aux device per request */
+ if (test_and_clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags))
+ ice_plug_aux_dev(pf);
if (test_and_clear_bit(ICE_FLAG_MTU_CHANGED, pf->flags)) {
struct iidc_event *event;
return err;
}
+static void ice_stop_eth(struct ice_vsi *vsi)
+{
+ ice_fltr_remove_all(vsi);
+ ice_vsi_close(vsi);
+}
+
static int ice_init_eth(struct ice_pf *pf)
{
struct ice_vsi *vsi = ice_get_main_vsi(pf);
{
ice_deinit_features(pf);
ice_deinit_rdma(pf);
- ice_vsi_close(ice_get_main_vsi(pf));
+ ice_stop_eth(ice_get_main_vsi(pf));
ice_vsi_decfg(ice_get_main_vsi(pf));
ice_deinit_dev(pf);
}
struct ice_vf *vf;
int ret;
+ vf = ice_get_vf_by_id(pf, vf_id);
+ if (!vf)
+ return -EINVAL;
+
if (ice_is_eswitch_mode_switchdev(pf)) {
dev_info(ice_pf_to_dev(pf), "Trusted VF is forbidden in switchdev mode\n");
return -EOPNOTSUPP;
}
- vf = ice_get_vf_by_id(pf, vf_id);
- if (!vf)
- return -EINVAL;
-
ret = ice_check_vf_ready_for_cfg(vf);
if (ret)
goto out_put_vf;
ice_vc_fdir_irq_handler(ctrl_vsi, rx_desc);
if (++ntc == cnt)
ntc = 0;
+ rx_ring->first_desc = ntc;
continue;
}
}
netif_tx_stop_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
- ice_qvec_dis_irq(vsi, rx_ring, q_vector);
-
ice_fill_txq_meta(vsi, tx_ring, &txq_meta);
err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, tx_ring, &txq_meta);
if (err)
if (err)
return err;
}
+ ice_qvec_dis_irq(vsi, rx_ring, q_vector);
+
err = ice_vsi_ctrl_one_rx_ring(vsi, false, q_idx, true);
if (err)
return err;
- ice_clean_rx_ring(rx_ring);
ice_qvec_toggle_napi(vsi, q_vector, false);
ice_qp_clean_rings(vsi, q_idx);
static void igb_setup_mrqc(struct igb_adapter *);
static int igb_probe(struct pci_dev *, const struct pci_device_id *);
static void igb_remove(struct pci_dev *pdev);
+static void igb_init_queue_configuration(struct igb_adapter *adapter);
static int igb_sw_init(struct igb_adapter *);
int igb_open(struct net_device *);
int igb_close(struct net_device *);
#ifdef CONFIG_PCI_IOV
static int igb_vf_configure(struct igb_adapter *adapter, int vf);
-static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs);
-static int igb_disable_sriov(struct pci_dev *dev);
-static int igb_pci_disable_sriov(struct pci_dev *dev);
+static int igb_disable_sriov(struct pci_dev *dev, bool reinit);
#endif
static int igb_suspend(struct device *);
kfree(adapter->shadow_vfta);
igb_clear_interrupt_scheme(adapter);
#ifdef CONFIG_PCI_IOV
- igb_disable_sriov(pdev);
+ igb_disable_sriov(pdev, false);
#endif
pci_iounmap(pdev, adapter->io_addr);
err_ioremap:
}
#ifdef CONFIG_PCI_IOV
-static int igb_disable_sriov(struct pci_dev *pdev)
+static int igb_sriov_reinit(struct pci_dev *dev)
+{
+ struct net_device *netdev = pci_get_drvdata(dev);
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct pci_dev *pdev = adapter->pdev;
+
+ rtnl_lock();
+
+ if (netif_running(netdev))
+ igb_close(netdev);
+ else
+ igb_reset(adapter);
+
+ igb_clear_interrupt_scheme(adapter);
+
+ igb_init_queue_configuration(adapter);
+
+ if (igb_init_interrupt_scheme(adapter, true)) {
+ rtnl_unlock();
+ dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
+ return -ENOMEM;
+ }
+
+ if (netif_running(netdev))
+ igb_open(netdev);
+
+ rtnl_unlock();
+
+ return 0;
+}
+
+static int igb_disable_sriov(struct pci_dev *pdev, bool reinit)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct igb_adapter *adapter = netdev_priv(netdev);
adapter->flags |= IGB_FLAG_DMAC;
}
- return 0;
+ return reinit ? igb_sriov_reinit(pdev) : 0;
}
-static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs)
+static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs, bool reinit)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct igb_adapter *adapter = netdev_priv(netdev);
"Unable to allocate memory for VF MAC filter list\n");
}
- /* only call pci_enable_sriov() if no VFs are allocated already */
- if (!old_vfs) {
- err = pci_enable_sriov(pdev, adapter->vfs_allocated_count);
- if (err)
- goto err_out;
- }
dev_info(&pdev->dev, "%d VFs allocated\n",
adapter->vfs_allocated_count);
for (i = 0; i < adapter->vfs_allocated_count; i++)
/* DMA Coalescing is not supported in IOV mode. */
adapter->flags &= ~IGB_FLAG_DMAC;
+
+ if (reinit) {
+ err = igb_sriov_reinit(pdev);
+ if (err)
+ goto err_out;
+ }
+
+ /* only call pci_enable_sriov() if no VFs are allocated already */
+ if (!old_vfs)
+ err = pci_enable_sriov(pdev, adapter->vfs_allocated_count);
+
goto out;
err_out:
igb_release_hw_control(adapter);
#ifdef CONFIG_PCI_IOV
- rtnl_lock();
- igb_disable_sriov(pdev);
- rtnl_unlock();
+ igb_disable_sriov(pdev, false);
#endif
unregister_netdev(netdev);
igb_reset_interrupt_capability(adapter);
pci_sriov_set_totalvfs(pdev, 7);
- igb_enable_sriov(pdev, max_vfs);
+ igb_enable_sriov(pdev, max_vfs, false);
#endif /* CONFIG_PCI_IOV */
}
}
}
-#ifdef CONFIG_PCI_IOV
-static int igb_sriov_reinit(struct pci_dev *dev)
-{
- struct net_device *netdev = pci_get_drvdata(dev);
- struct igb_adapter *adapter = netdev_priv(netdev);
- struct pci_dev *pdev = adapter->pdev;
-
- rtnl_lock();
-
- if (netif_running(netdev))
- igb_close(netdev);
- else
- igb_reset(adapter);
-
- igb_clear_interrupt_scheme(adapter);
-
- igb_init_queue_configuration(adapter);
-
- if (igb_init_interrupt_scheme(adapter, true)) {
- rtnl_unlock();
- dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
- return -ENOMEM;
- }
-
- if (netif_running(netdev))
- igb_open(netdev);
-
- rtnl_unlock();
-
- return 0;
-}
-
-static int igb_pci_disable_sriov(struct pci_dev *dev)
-{
- int err = igb_disable_sriov(dev);
-
- if (!err)
- err = igb_sriov_reinit(dev);
-
- return err;
-}
-
-static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs)
-{
- int err = igb_enable_sriov(dev, num_vfs);
-
- if (err)
- goto out;
-
- err = igb_sriov_reinit(dev);
- if (!err)
- return num_vfs;
-
-out:
- return err;
-}
-
-#endif
static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
{
#ifdef CONFIG_PCI_IOV
- if (num_vfs == 0)
- return igb_pci_disable_sriov(dev);
- else
- return igb_pci_enable_sriov(dev, num_vfs);
+ int err;
+
+ if (num_vfs == 0) {
+ return igb_disable_sriov(dev, true);
+ } else {
+ err = igb_enable_sriov(dev, num_vfs, true);
+ return err ? err : num_vfs;
+ }
#endif
return 0;
}
igbvf_intr_msix_rx, 0, adapter->rx_ring->name,
netdev);
if (err)
- goto out;
+ goto free_irq_tx;
adapter->rx_ring->itr_register = E1000_EITR(vector);
adapter->rx_ring->itr_val = adapter->current_itr;
err = request_irq(adapter->msix_entries[vector].vector,
igbvf_msix_other, 0, netdev->name, netdev);
if (err)
- goto out;
+ goto free_irq_rx;
igbvf_configure_msix(adapter);
return 0;
+free_irq_rx:
+ free_irq(adapter->msix_entries[--vector].vector, netdev);
+free_irq_tx:
+ free_irq(adapter->msix_entries[--vector].vector, netdev);
out:
return err;
}
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2009 - 2018 Intel Corporation. */
+#include <linux/etherdevice.h>
+
#include "vf.h"
static s32 e1000_check_for_link_vf(struct e1000_hw *hw);
/* set our "perm_addr" based on info provided by PF */
ret_val = mbx->ops.read_posted(hw, msgbuf, 3);
if (!ret_val) {
- if (msgbuf[0] == (E1000_VF_RESET |
- E1000_VT_MSGTYPE_ACK))
+ switch (msgbuf[0]) {
+ case E1000_VF_RESET | E1000_VT_MSGTYPE_ACK:
memcpy(hw->mac.perm_addr, addr, ETH_ALEN);
- else
+ break;
+ case E1000_VF_RESET | E1000_VT_MSGTYPE_NACK:
+ eth_zero_addr(hw->mac.perm_addr);
+ break;
+ default:
ret_val = -E1000_ERR_MAC_INIT;
+ }
}
}
if (e->command != TC_TAPRIO_CMD_SET_GATES)
return false;
- for (i = 0; i < adapter->num_tx_queues; i++) {
- if (e->gate_mask & BIT(i))
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ if (e->gate_mask & BIT(i)) {
queue_uses[i]++;
- /* There are limitations: A single queue cannot be
- * opened and closed multiple times per cycle unless the
- * gate stays open. Check for it.
- */
- if (queue_uses[i] > 1 &&
- !(prev->gate_mask & BIT(i)))
- return false;
- }
+ /* There are limitations: A single queue cannot
+ * be opened and closed multiple times per cycle
+ * unless the gate stays open. Check for it.
+ */
+ if (queue_uses[i] > 1 &&
+ !(prev->gate_mask & BIT(i)))
+ return false;
+ }
}
return true;
for (i = 0; i < priv->port_count; i++) {
port = priv->port_list[i];
+ if (percpu && port->ntxqs >= num_possible_cpus() * 2)
+ xdp_set_features_flag(port->dev,
+ NETDEV_XDP_ACT_BASIC |
+ NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT);
+ else
+ xdp_clear_features_flag(port->dev);
+
mvpp2_swf_bm_pool_init(port);
if (status[i])
mvpp2_open(port->dev);
if (!port->priv->percpu_pools)
mvpp2_set_hw_csum(port, port->pool_long->id);
+ else if (port->ntxqs >= num_possible_cpus() * 2)
+ dev->xdp_features = NETDEV_XDP_ACT_BASIC |
+ NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT;
dev->vlan_features |= features;
netif_set_tso_max_segs(dev, MVPP2_MAX_TSO_SEGS);
- dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
- NETDEV_XDP_ACT_NDO_XMIT;
-
dev->priv_flags |= IFF_UNICAST_FLT;
/* MTU range: 68 - 9704 */
err_ptp_destroy:
otx2_ptp_destroy(vf);
err_detach_rsrc:
+ free_percpu(vf->hw.lmt_info);
if (test_bit(CN10K_LMTST, &vf->hw.cap_flag))
qmem_free(vf->dev, vf->dync_lmt);
otx2_detach_resources(&vf->mbox);
otx2_shutdown_tc(vf);
otx2vf_disable_mbox_intr(vf);
otx2_detach_resources(&vf->mbox);
+ free_percpu(vf->hw.lmt_info);
if (test_bit(CN10K_LMTST, &vf->hw.cap_flag))
qmem_free(vf->dev, vf->dync_lmt);
otx2vf_vfaf_mbox_destroy(vf);
#define SGMII_SEND_AN_ERROR_EN BIT(11)
#define SGMII_IF_MODE_MASK GENMASK(5, 1)
+/* Register to reset SGMII design */
+#define SGMII_RESERVED_0 0x34
+#define SGMII_SW_RESET BIT(0)
+
/* Register to set SGMII speed, ANA RG_ Control Signals III*/
#define SGMSYS_ANA_RG_CS3 0x2028
#define RG_PHY_SPEED_MASK (BIT(2) | BIT(3))
const unsigned long *advertising,
bool permit_pause_to_mac)
{
+ bool mode_changed = false, changed, use_an;
struct mtk_pcs *mpcs = pcs_to_mtk_pcs(pcs);
unsigned int rgc3, sgm_mode, bmcr;
int advertise, link_timer;
- bool changed, use_an;
advertise = phylink_mii_c22_pcs_encode_advertisement(interface,
advertising);
if (advertise < 0)
return advertise;
- link_timer = phylink_get_link_timer_ns(interface);
- if (link_timer < 0)
- return link_timer;
-
/* Clearing IF_MODE_BIT0 switches the PCS to BASE-X mode, and
* we assume that fixes it's speed at bitrate = line rate (in
* other words, 1000Mbps or 2500Mbps).
}
if (use_an) {
- /* FIXME: Do we need to set AN_RESTART here? */
- bmcr = SGMII_AN_RESTART | SGMII_AN_ENABLE;
+ bmcr = SGMII_AN_ENABLE;
} else {
bmcr = 0;
}
if (mpcs->interface != interface) {
+ link_timer = phylink_get_link_timer_ns(interface);
+ if (link_timer < 0)
+ return link_timer;
+
/* PHYA power down */
regmap_update_bits(mpcs->regmap, SGMSYS_QPHY_PWR_STATE_CTRL,
SGMII_PHYA_PWD, SGMII_PHYA_PWD);
+ /* Reset SGMII PCS state */
+ regmap_update_bits(mpcs->regmap, SGMII_RESERVED_0,
+ SGMII_SW_RESET, SGMII_SW_RESET);
+
if (interface == PHY_INTERFACE_MODE_2500BASEX)
rgc3 = RG_PHY_SPEED_3_125G;
else
regmap_update_bits(mpcs->regmap, mpcs->ana_rgc3,
RG_PHY_SPEED_3_125G, rgc3);
+ /* Setup the link timer */
+ regmap_write(mpcs->regmap, SGMSYS_PCS_LINK_TIMER, link_timer / 2 / 8);
+
mpcs->interface = interface;
+ mode_changed = true;
}
/* Update the advertisement, noting whether it has changed */
regmap_update_bits_check(mpcs->regmap, SGMSYS_PCS_ADVERTISE,
SGMII_ADVERTISE, advertise, &changed);
- /* Setup the link timer and QPHY power up inside SGMIISYS */
- regmap_write(mpcs->regmap, SGMSYS_PCS_LINK_TIMER, link_timer / 2 / 8);
-
/* Update the sgmsys mode register */
regmap_update_bits(mpcs->regmap, SGMSYS_SGMII_MODE,
SGMII_REMOTE_FAULT_DIS | SGMII_SPEED_DUPLEX_AN |
/* Update the BMCR */
regmap_update_bits(mpcs->regmap, SGMSYS_PCS_CONTROL_1,
- SGMII_AN_RESTART | SGMII_AN_ENABLE, bmcr);
+ SGMII_AN_ENABLE, bmcr);
/* Release PHYA power down state
* Only removing bit SGMII_PHYA_PWD isn't enough.
usleep_range(50, 100);
regmap_write(mpcs->regmap, SGMSYS_QPHY_PWR_STATE_CTRL, 0);
- return changed;
+ return changed || mode_changed;
}
static void mtk_pcs_restart_an(struct phylink_pcs *pcs)
struct mlx4_en_xdp_buff *_ctx = (void *)ctx;
if (unlikely(_ctx->ring->hwtstamp_rx_filter != HWTSTAMP_FILTER_ALL))
- return -EOPNOTSUPP;
+ return -ENODATA;
*timestamp = mlx4_en_get_hwtstamp(_ctx->mdev,
mlx4_en_get_cqe_ts(_ctx->cqe));
struct mlx4_en_xdp_buff *_ctx = (void *)ctx;
if (unlikely(!(_ctx->dev->features & NETIF_F_RXHASH)))
- return -EOPNOTSUPP;
+ return -ENODATA;
*hash = be32_to_cpu(_ctx->cqe->immed_rss_invalid);
return 0;
} channel;
} mqprio;
bool rx_cqe_compress_def;
- bool tunneled_offload_en;
struct dim_cq_moder rx_cq_moderation;
struct dim_cq_moder tx_cq_moderation;
struct mlx5e_packet_merge_param packet_merge;
void mlx5e_rx_dim_work(struct work_struct *work);
void mlx5e_tx_dim_work(struct work_struct *work);
+void mlx5e_set_xdp_feature(struct net_device *netdev);
netdev_features_t mlx5e_features_check(struct sk_buff *skb,
struct net_device *netdev,
netdev_features_t features);
meter = mlx5e_tc_meter_get(priv->mdev, ¶ms);
if (IS_ERR(meter)) {
NL_SET_ERR_MSG_MOD(fl_act->extack, "Failed to get flow meter");
- mlx5_core_err(priv->mdev, "Failed to get flow meter %d\n", params.index);
return PTR_ERR(meter);
}
{
struct mlx5e_tc_act_stats *act_stats, *old_act_stats;
struct rhashtable *ht = &handle->ht;
+ u64 lastused;
int err = 0;
act_stats = kvzalloc(sizeof(*act_stats), GFP_KERNEL);
act_stats->tc_act_cookie = act_cookie;
act_stats->counter = counter;
+ mlx5_fc_query_cached_raw(counter,
+ &act_stats->lastbytes,
+ &act_stats->lastpackets, &lastused);
+
rcu_read_lock();
old_act_stats = rhashtable_lookup_get_insert_fast(ht,
&act_stats->hash,
const struct mlx5e_xdp_buff *_ctx = (void *)ctx;
if (unlikely(!mlx5e_rx_hw_stamp(_ctx->rq->tstamp)))
- return -EOPNOTSUPP;
+ return -ENODATA;
*timestamp = mlx5e_cqe_ts_to_ns(_ctx->rq->ptp_cyc2time,
_ctx->rq->clock, get_cqe_ts(_ctx->cqe));
const struct mlx5e_xdp_buff *_ctx = (void *)ctx;
if (unlikely(!(_ctx->xdp.rxq->dev->features & NETIF_F_RXHASH)))
- return -EOPNOTSUPP;
+ return -ENODATA;
*hash = be32_to_cpu(_ctx->cqe->rss_hash_result);
return 0;
if (unlikely(!priv_rx))
return -ENOMEM;
- dek = mlx5_ktls_create_key(priv->tls->dek_pool, crypto_info);
- if (IS_ERR(dek)) {
- err = PTR_ERR(dek);
- goto err_create_key;
- }
- priv_rx->dek = dek;
-
- INIT_LIST_HEAD(&priv_rx->list);
- spin_lock_init(&priv_rx->lock);
switch (crypto_info->cipher_type) {
case TLS_CIPHER_AES_GCM_128:
priv_rx->crypto_info.crypto_info_128 =
default:
WARN_ONCE(1, "Unsupported cipher type %u\n",
crypto_info->cipher_type);
- return -EOPNOTSUPP;
+ err = -EOPNOTSUPP;
+ goto err_cipher_type;
}
+ dek = mlx5_ktls_create_key(priv->tls->dek_pool, crypto_info);
+ if (IS_ERR(dek)) {
+ err = PTR_ERR(dek);
+ goto err_cipher_type;
+ }
+ priv_rx->dek = dek;
+
+ INIT_LIST_HEAD(&priv_rx->list);
+ spin_lock_init(&priv_rx->lock);
+
rxq = mlx5e_ktls_sk_get_rxq(sk);
priv_rx->rxq = rxq;
priv_rx->sk = sk;
mlx5e_tir_destroy(&priv_rx->tir);
err_create_tir:
mlx5_ktls_destroy_key(priv->tls->dek_pool, priv_rx->dek);
-err_create_key:
+err_cipher_type:
kfree(priv_rx);
return err;
}
if (IS_ERR(priv_tx))
return PTR_ERR(priv_tx);
- dek = mlx5_ktls_create_key(priv->tls->dek_pool, crypto_info);
- if (IS_ERR(dek)) {
- err = PTR_ERR(dek);
- goto err_create_key;
- }
- priv_tx->dek = dek;
-
- priv_tx->expected_seq = start_offload_tcp_sn;
switch (crypto_info->cipher_type) {
case TLS_CIPHER_AES_GCM_128:
priv_tx->crypto_info.crypto_info_128 =
default:
WARN_ONCE(1, "Unsupported cipher type %u\n",
crypto_info->cipher_type);
- return -EOPNOTSUPP;
+ err = -EOPNOTSUPP;
+ goto err_pool_push;
}
+
+ dek = mlx5_ktls_create_key(priv->tls->dek_pool, crypto_info);
+ if (IS_ERR(dek)) {
+ err = PTR_ERR(dek);
+ goto err_pool_push;
+ }
+
+ priv_tx->dek = dek;
+ priv_tx->expected_seq = start_offload_tcp_sn;
priv_tx->tx_ctx = tls_offload_ctx_tx(tls_ctx);
mlx5e_set_ktls_tx_priv_ctx(tls_ctx, priv_tx);
return 0;
-err_create_key:
+err_pool_push:
pool_push(pool, priv_tx);
return err;
}
};
struct mlx5e_macsec_umr {
+ u8 __aligned(64) ctx[MLX5_ST_SZ_BYTES(macsec_aso)];
dma_addr_t dma_addr;
- u8 ctx[MLX5_ST_SZ_BYTES(macsec_aso)];
u32 mkey;
};
struct mlx5e_macsec_aso *aso;
struct mlx5_aso_wqe *aso_wqe;
struct mlx5_aso *maso;
+ unsigned long expires;
int err;
aso = &macsec->aso;
macsec_aso_build_wqe_ctrl_seg(aso, &aso_wqe->aso_ctrl, NULL);
mlx5_aso_post_wqe(maso, false, &aso_wqe->ctrl);
- err = mlx5_aso_poll_cq(maso, false);
+ expires = jiffies + msecs_to_jiffies(10);
+ do {
+ err = mlx5_aso_poll_cq(maso, false);
+ if (err)
+ usleep_range(2, 10);
+ } while (err && time_is_after_jiffies(expires));
+
if (err)
goto err_out;
if (!MLX5_CAP_GEN(priv->mdev, ets))
return -EOPNOTSUPP;
- ets->ets_cap = mlx5_max_tc(priv->mdev) + 1;
- for (i = 0; i < ets->ets_cap; i++) {
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
err = mlx5_query_port_prio_tc(mdev, i, &ets->prio_tc[i]);
if (err)
return err;
+ }
+ ets->ets_cap = mlx5_max_tc(priv->mdev) + 1;
+ for (i = 0; i < ets->ets_cap; i++) {
err = mlx5_query_port_tc_group(mdev, i, &tc_group[i]);
if (err)
return err;
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_params new_params;
+ int err;
if (enable) {
/* Checking the regular RQ here; mlx5e_validate_xsk_param called
MLX5E_SET_PFLAG(&new_params, MLX5E_PFLAG_RX_STRIDING_RQ, enable);
mlx5e_set_rq_type(mdev, &new_params);
- return mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, true);
+ err = mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, true);
+ if (err)
+ return err;
+
+ /* update XDP supported features */
+ mlx5e_set_xdp_feature(netdev);
+
+ return 0;
}
static int set_pflag_rx_no_csum_complete(struct net_device *netdev, bool enable)
return 0;
}
+void mlx5e_set_xdp_feature(struct net_device *netdev)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5e_params *params = &priv->channels.params;
+ xdp_features_t val;
+
+ if (params->packet_merge.type != MLX5E_PACKET_MERGE_NONE) {
+ xdp_clear_features_flag(netdev);
+ return;
+ }
+
+ val = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_XSK_ZEROCOPY |
+ NETDEV_XDP_ACT_NDO_XMIT;
+ if (params->rq_wq_type == MLX5_WQ_TYPE_CYCLIC)
+ val |= NETDEV_XDP_ACT_RX_SG;
+ xdp_set_features_flag(netdev, val);
+}
+
int mlx5e_set_features(struct net_device *netdev, netdev_features_t features)
{
netdev_features_t oper_features = features;
return -EINVAL;
}
+ /* update XDP supported features */
+ mlx5e_set_xdp_feature(netdev);
+
return 0;
}
}
}
- if (mlx5e_is_uplink_rep(priv))
+ if (mlx5e_is_uplink_rep(priv)) {
features = mlx5e_fix_uplink_rep_features(netdev, features);
+ features |= NETIF_F_NETNS_LOCAL;
+ } else {
+ features &= ~NETIF_F_NETNS_LOCAL;
+ }
mutex_unlock(&priv->state_lock);
struct xsk_buff_pool *xsk_pool =
mlx5e_xsk_get_pool(&chs->params, chs->params.xsk, ix);
struct mlx5e_xsk_param xsk;
+ int max_xdp_mtu;
if (!xsk_pool)
continue;
mlx5e_build_xsk_param(xsk_pool, &xsk);
+ max_xdp_mtu = mlx5e_xdp_max_mtu(new_params, &xsk);
- if (!mlx5e_validate_xsk_param(new_params, &xsk, mdev)) {
+ /* Validate XSK params and XDP MTU in advance */
+ if (!mlx5e_validate_xsk_param(new_params, &xsk, mdev) ||
+ new_params->sw_mtu > max_xdp_mtu) {
u32 hr = mlx5e_get_linear_rq_headroom(new_params, &xsk);
int max_mtu_frame, max_mtu_page, max_mtu;
*/
max_mtu_frame = MLX5E_HW2SW_MTU(new_params, xsk.chunk_size - hr);
max_mtu_page = MLX5E_HW2SW_MTU(new_params, SKB_MAX_HEAD(0));
- max_mtu = min(max_mtu_frame, max_mtu_page);
+ max_mtu = min3(max_mtu_frame, max_mtu_page, max_xdp_mtu);
- netdev_err(netdev, "MTU %d is too big for an XSK running on channel %u. Try MTU <= %d\n",
+ netdev_err(netdev, "MTU %d is too big for an XSK running on channel %u or its redirection XDP program. Try MTU <= %d\n",
new_params->sw_mtu, ix, max_mtu);
return false;
}
if (old_prog)
bpf_prog_put(old_prog);
- if (reset) {
- if (prog)
- xdp_features_set_redirect_target(netdev, true);
- else
- xdp_features_clear_redirect_target(netdev);
- }
-
if (!test_bit(MLX5E_STATE_OPENED, &priv->state) || reset)
goto unlock;
/* TX inline */
mlx5_query_min_inline(mdev, ¶ms->tx_min_inline_mode);
- params->tunneled_offload_en = mlx5_tunnel_inner_ft_supported(mdev);
-
/* AF_XDP */
params->xsk = xsk;
netdev->features |= NETIF_F_HIGHDMA;
netdev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
- netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
- NETDEV_XDP_ACT_XSK_ZEROCOPY |
- NETDEV_XDP_ACT_RX_SG;
-
netdev->priv_flags |= IFF_UNICAST_FLT;
netif_set_tso_max_size(netdev, GSO_MAX_SIZE);
+ mlx5e_set_xdp_feature(netdev);
mlx5e_set_netdev_dev_addr(netdev);
mlx5e_macsec_build_netdev(priv);
mlx5e_ipsec_build_netdev(priv);
mlx5_core_err(mdev, "TLS initialization failed, %d\n", err);
mlx5e_health_create_reporters(priv);
+ /* update XDP supported features */
+ mlx5e_set_xdp_feature(netdev);
+
return 0;
}
}
features = MLX5E_RX_RES_FEATURE_PTP;
- if (priv->channels.params.tunneled_offload_en)
+ if (mlx5_tunnel_inner_ft_supported(mdev))
features |= MLX5E_RX_RES_FEATURE_INNER_FT;
err = mlx5e_rx_res_init(priv->rx_res, priv->mdev, features,
priv->max_nch, priv->drop_rq.rqn,
/* RQ */
mlx5e_build_rq_params(mdev, params);
+ /* update XDP supported features */
+ mlx5e_set_xdp_feature(netdev);
+
/* CQ moderation params */
params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
mlx5e_set_rx_cq_mode_params(params, cq_period_mode);
params->mqprio.num_tc = 1;
- params->tunneled_offload_en = false;
if (rep->vport != MLX5_VPORT_UPLINK)
params->vlan_strip_disable = true;
mlx5e_hairpin_params_init(struct mlx5e_hairpin_params *hairpin_params,
struct mlx5_core_dev *mdev)
{
+ u32 link_speed = 0;
u64 link_speed64;
- u32 link_speed;
hairpin_params->mdev = mdev;
/* set hairpin pair per each 50Gbs share of the link */
parse_attr->filter_dev = attr->parse_attr->filter_dev;
attr2->action = 0;
attr2->counter = NULL;
- attr->tc_act_cookies_count = 0;
+ attr2->tc_act_cookies_count = 0;
attr2->flags = 0;
attr2->parse_attr = parse_attr;
attr2->dest_chain = 0;
esw_attr->dest_int_port = dest_int_port;
esw_attr->dests[out_index].flags |= MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE;
+ esw_attr->split_count = out_index;
/* Forward to root fdb for matching against the new source vport */
attr->dest_chain = 0;
mlx5e_tc_debugfs_init(tc, mlx5e_fs_get_debugfs_root(priv->fs));
tc->action_stats_handle = mlx5e_tc_act_stats_create();
- if (IS_ERR(tc->action_stats_handle))
+ if (IS_ERR(tc->action_stats_handle)) {
+ err = PTR_ERR(tc->action_stats_handle);
goto err_act_stats;
+ }
return 0;
}
uplink_priv->action_stats_handle = mlx5e_tc_act_stats_create();
- if (IS_ERR(uplink_priv->action_stats_handle))
+ if (IS_ERR(uplink_priv->action_stats_handle)) {
+ err = PTR_ERR(uplink_priv->action_stats_handle);
goto err_action_counter;
+ }
return 0;
void mlx5e_tc_esw_cleanup(struct mlx5_rep_uplink_priv *uplink_priv)
{
+ struct mlx5e_rep_priv *rpriv;
+ struct mlx5_eswitch *esw;
+ struct mlx5e_priv *priv;
+
+ rpriv = container_of(uplink_priv, struct mlx5e_rep_priv, uplink_priv);
+ priv = netdev_priv(rpriv->netdev);
+ esw = priv->mdev->priv.eswitch;
+
+ mlx5e_tc_clean_fdb_peer_flows(esw);
+
mlx5e_tc_tun_cleanup(uplink_priv->encap);
mapping_destroy(uplink_priv->tunnel_enc_opts_mapping);
if (WARN_ON_ONCE(IS_ERR(vport))) {
esw_warn(esw->dev, "vport(%d) invalid!\n", vport_num);
- err = PTR_ERR(vport);
- goto out;
+ return PTR_ERR(vport);
}
esw_acl_ingress_ofld_rules_destroy(esw, vport);
*/
esw_vport_change_handle_locked(vport);
vport->enabled_events = 0;
+ esw_apply_vport_rx_mode(esw, vport, false, false);
esw_vport_cleanup(esw, vport);
esw->enabled_vports--;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
for (i = 0; i < esw_attr->split_count; i++) {
- if (esw_is_indir_table(esw, attr))
- err = esw_setup_indir_table(dest, &flow_act, esw, attr, false, &i);
- else if (esw_is_chain_src_port_rewrite(esw, esw_attr))
- err = esw_setup_chain_src_port_rewrite(dest, &flow_act, esw, chains, attr,
- &i);
+ if (esw_attr->dests[i].flags & MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE)
+ /* Source port rewrite (forward to ovs internal port or statck device) isn't
+ * supported in the rule of split action.
+ */
+ err = -EOPNOTSUPP;
else
esw_setup_vport_dest(dest, &flow_act, esw, esw_attr, i, i, false);
return 0;
}
+static bool esw_offloads_devlink_ns_eq_netdev_ns(struct devlink *devlink)
+{
+ struct net *devl_net, *netdev_net;
+ struct mlx5_eswitch *esw;
+
+ esw = mlx5_devlink_eswitch_get(devlink);
+ netdev_net = dev_net(esw->dev->mlx5e_res.uplink_netdev);
+ devl_net = devlink_net(devlink);
+
+ return net_eq(devl_net, netdev_net);
+}
+
int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
struct netlink_ext_ack *extack)
{
if (esw_mode_from_devlink(mode, &mlx5_mode))
return -EINVAL;
+ if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV &&
+ !esw_offloads_devlink_ns_eq_netdev_ns(devlink)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can't change E-Switch mode to switchdev when netdev net namespace has diverged from the devlink's.");
+ return -EPERM;
+ }
+
mlx5_lag_disable_change(esw->dev);
err = mlx5_esw_try_lock(esw);
if (err < 0) {
params->packet_merge.type = MLX5E_PACKET_MERGE_NONE;
params->hard_mtu = MLX5_IB_GRH_BYTES + MLX5_IPOIB_HARD_LEN;
- params->tunneled_offload_en = false;
/* CQE compression is not supported for IPoIB */
params->rx_cqe_compress_def = false;
{
mlx5_devlink_traps_unregister(priv_to_devlink(dev));
mlx5_sf_dev_table_destroy(dev);
- mlx5_sriov_detach(dev);
mlx5_eswitch_disable(dev->priv.eswitch);
+ mlx5_sriov_detach(dev);
mlx5_lag_remove_mdev(dev);
mlx5_ec_cleanup(dev);
mlx5_sf_hw_table_destroy(dev);
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
struct devlink *devlink = priv_to_devlink(dev);
+ set_bit(MLX5_BREAK_FW_WAIT, &dev->intf_state);
/* mlx5_drain_fw_reset() is using devlink APIs. Hence, we must drain
* fw_reset before unregistering the devlink.
*/
mlx5_drain_fw_reset(dev);
- set_bit(MLX5_BREAK_FW_WAIT, &dev->intf_state);
devlink_unregister(devlink);
mlx5_sriov_disable(pdev);
mlx5_crdump_disable(dev);
return func_id <= mlx5_core_max_vfs(dev) ? MLX5_VF : MLX5_SF;
}
+static u32 mlx5_get_ec_function(u32 function)
+{
+ return function >> 16;
+}
+
+static u32 mlx5_get_func_id(u32 function)
+{
+ return function & 0xffff;
+}
+
static struct rb_root *page_root_per_function(struct mlx5_core_dev *dev, u32 function)
{
struct rb_root *root;
}
static int mlx5_reclaim_root_pages(struct mlx5_core_dev *dev,
- struct rb_root *root, u16 func_id)
+ struct rb_root *root, u32 function)
{
u64 recl_pages_to_jiffies = msecs_to_jiffies(mlx5_tout_ms(dev, RECLAIM_PAGES));
unsigned long end = jiffies + recl_pages_to_jiffies;
while (!RB_EMPTY_ROOT(root)) {
+ u32 ec_function = mlx5_get_ec_function(function);
+ u32 function_id = mlx5_get_func_id(function);
int nclaimed;
int err;
- err = reclaim_pages(dev, func_id, optimal_reclaimed_pages(),
- &nclaimed, false, mlx5_core_is_ecpf(dev));
+ err = reclaim_pages(dev, function_id, optimal_reclaimed_pages(),
+ &nclaimed, false, ec_function);
if (err) {
- mlx5_core_warn(dev, "failed reclaiming pages (%d) for func id 0x%x\n",
- err, func_id);
+ mlx5_core_warn(dev, "reclaim_pages err (%d) func_id=0x%x ec_func=0x%x\n",
+ err, function_id, ec_function);
return err;
}
struct thermal_zone_device *tzdev;
int polling_delay;
struct thermal_cooling_device *cdevs[MLXSW_MFCR_PWMS_MAX];
- u8 cooling_levels[MLXSW_THERMAL_MAX_STATE + 1];
struct thermal_trip trips[MLXSW_THERMAL_NUM_TRIPS];
struct mlxsw_cooling_states cooling_states[MLXSW_THERMAL_NUM_TRIPS];
struct mlxsw_thermal_area line_cards[];
return idx;
/* Normalize the state to the valid speed range. */
- state = thermal->cooling_levels[state];
+ state = max_t(unsigned long, MLXSW_THERMAL_MIN_STATE, state);
mlxsw_reg_mfsc_pack(mfsc_pl, idx, mlxsw_state_to_duty(state));
err = mlxsw_reg_write(thermal->core, MLXSW_REG(mfsc), mfsc_pl);
if (err) {
}
}
- /* Initialize cooling levels per PWM state. */
- for (i = 0; i < MLXSW_THERMAL_MAX_STATE; i++)
- thermal->cooling_levels[i] = max(MLXSW_THERMAL_MIN_STATE, i);
-
thermal->polling_delay = bus_info->low_frequency ?
MLXSW_THERMAL_SLOW_POLL_INT :
MLXSW_THERMAL_POLL_INT;
static void mlxsw_sp_parsing_init(struct mlxsw_sp *mlxsw_sp)
{
+ refcount_set(&mlxsw_sp->parsing.parsing_depth_ref, 0);
mlxsw_sp->parsing.parsing_depth = MLXSW_SP_DEFAULT_PARSING_DEPTH;
mlxsw_sp->parsing.vxlan_udp_dport = MLXSW_SP_DEFAULT_VXLAN_UDP_DPORT;
mutex_init(&mlxsw_sp->parsing.lock);
static void mlxsw_sp_parsing_fini(struct mlxsw_sp *mlxsw_sp)
{
mutex_destroy(&mlxsw_sp->parsing.lock);
+ WARN_ON_ONCE(refcount_read(&mlxsw_sp->parsing.parsing_depth_ref));
}
struct mlxsw_sp_ipv6_addr_node {
u16 vid)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- u8 local_port = mlxsw_sp_port->local_port;
+ u16 local_port = mlxsw_sp_port->local_port;
int err;
/* In case there are no {Port, VID} => FID mappings on the port,
struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- u8 local_port = mlxsw_sp_port->local_port;
+ u16 local_port = mlxsw_sp_port->local_port;
mlxsw_sp_fid_port_vid_list_del(fid, mlxsw_sp_port->local_port, vid);
mlxsw_sp_fid_evid_map(fid, local_port, vid, false);
old_inc_parsing_depth);
return err;
}
+
+static void mlxsw_sp_mp_hash_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ bool old_inc_parsing_depth = mlxsw_sp->router->inc_parsing_depth;
+
+ mlxsw_sp_mp_hash_parsing_depth_adjust(mlxsw_sp, old_inc_parsing_depth,
+ false);
+}
#else
static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
{
return 0;
}
+
+static void mlxsw_sp_mp_hash_fini(struct mlxsw_sp *mlxsw_sp)
+{
+}
#endif
static int mlxsw_sp_dscp_init(struct mlxsw_sp *mlxsw_sp)
err_register_inetaddr_notifier:
mlxsw_core_flush_owq();
err_dscp_init:
+ mlxsw_sp_mp_hash_fini(mlxsw_sp);
err_mp_hash_init:
mlxsw_sp_neigh_fini(mlxsw_sp);
err_neigh_init:
unregister_inet6addr_notifier(&mlxsw_sp->router->inet6addr_nb);
unregister_inetaddr_notifier(&mlxsw_sp->router->inetaddr_nb);
mlxsw_core_flush_owq();
+ mlxsw_sp_mp_hash_fini(mlxsw_sp);
mlxsw_sp_neigh_fini(mlxsw_sp);
mlxsw_sp_lb_rif_fini(mlxsw_sp);
mlxsw_sp_vrs_fini(mlxsw_sp);
struct ocelot_stats_region {
struct list_head node;
u32 base;
+ enum ocelot_stat first_stat;
int count;
u32 *buf;
};
OCELOT_STAT(RX_ASSEMBLY_OK),
OCELOT_STAT(RX_MERGE_FRAGMENTS),
OCELOT_STAT(TX_MERGE_FRAGMENTS),
+ OCELOT_STAT(TX_MM_HOLD),
OCELOT_STAT(RX_PMAC_OCTETS),
OCELOT_STAT(RX_PMAC_UNICAST),
OCELOT_STAT(RX_PMAC_MULTICAST),
*/
static void ocelot_port_transfer_stats(struct ocelot *ocelot, int port)
{
- unsigned int idx = port * OCELOT_NUM_STATS;
struct ocelot_stats_region *region;
int j;
list_for_each_entry(region, &ocelot->stats_regions, node) {
+ unsigned int idx = port * OCELOT_NUM_STATS + region->first_stat;
+
for (j = 0; j < region->count; j++) {
u64 *stat = &ocelot->stats[idx + j];
u64 val = region->buf[j];
*stat = (*stat & ~(u64)U32_MAX) + val;
}
-
- idx += region->count;
}
}
if (!layout[i].reg)
continue;
- if (region && layout[i].reg == last + 4) {
+ if (region && ocelot->map[SYS][layout[i].reg & REG_MASK] ==
+ ocelot->map[SYS][last & REG_MASK] + 4) {
region->count++;
} else {
region = devm_kzalloc(ocelot->dev, sizeof(*region),
WARN_ON(last >= layout[i].reg);
region->base = layout[i].reg;
+ region->first_stat = i;
region->count = 1;
list_add_tail(®ion->node, &ocelot->stats_regions);
}
*/
laddr = dma_map_single(lp->device, skb->data, length, DMA_TO_DEVICE);
- if (!laddr) {
+ if (dma_mapping_error(lp->device, laddr)) {
pr_err_ratelimited("%s: failed to map tx DMA buffer.\n", dev->name);
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
*new_addr = dma_map_single(lp->device, skb_put(*new_skb, SONIC_RBSIZE),
SONIC_RBSIZE, DMA_FROM_DEVICE);
- if (!*new_addr) {
+ if (dma_mapping_error(lp->device, *new_addr)) {
dev_kfree_skb(*new_skb);
*new_skb = NULL;
return false;
num_vports = p_hwfn->qm_info.num_vports;
+ if (num_vports < 2) {
+ DP_NOTICE(p_hwfn, "Unexpected num_vports: %d\n", num_vports);
+ return -EINVAL;
+ }
+
/* Accounting for the vports which are configured for WFQ explicitly */
for (i = 0; i < num_vports; i++) {
u32 tmp_speed;
if (p_time->hour > 23)
p_time->hour = 0;
if (p_time->min > 59)
- p_time->hour = 0;
+ p_time->min = 0;
if (p_time->msec > 999)
p_time->msec = 0;
if (p_time->usec > 999)
}
vf = qed_iov_get_vf_info(QED_LEADING_HWFN(cdev), (u16)vfid, true);
+ if (!vf)
+ return -EINVAL;
+
vport_id = vf->vport_id;
return qed_configure_vport_wfq(cdev, vport_id, rate);
/* Validate that the VF has a configured vport */
vf = qed_iov_get_vf_info(hwfn, i, true);
- if (!vf->vport_instance)
+ if (!vf || !vf->vport_instance)
continue;
memset(¶ms, 0, sizeof(params));
struct net_device *netdev = dev_get_drvdata(&pdev->dev);
struct emac_adapter *adpt = netdev_priv(netdev);
+ netif_carrier_off(netdev);
+ netif_tx_disable(netdev);
+
unregister_netdev(netdev);
netif_napi_del(&adpt->rx_q.napi);
+ free_irq(adpt->irq.irq, &adpt->irq);
+ cancel_work_sync(&adpt->work_thread);
+
emac_clks_teardown(adpt);
put_device(&adpt->phydev->mdio.dev);
phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
}
- /* Indicate that the MAC is responsible for managing PHY PM */
- phydev->mac_managed_pm = true;
phy_attached_info(phydev);
return 0;
{
struct platform_device *pdev = priv->pdev;
struct device *dev = &pdev->dev;
+ struct phy_device *phydev;
+ struct device_node *pn;
int error;
/* Bitbang init */
if (error)
goto out_free_bus;
+ pn = of_parse_phandle(dev->of_node, "phy-handle", 0);
+ phydev = of_phy_find_device(pn);
+ if (phydev) {
+ phydev->mac_managed_pm = true;
+ put_device(&phydev->mdio.dev);
+ }
+ of_node_put(pn);
+
return 0;
out_free_bus:
u16 pkt_len;
u32 get_ts;
+ if (*quota <= 0)
+ return true;
+
boguscnt = min_t(int, gq->ring_size, *quota);
limit = boguscnt;
desc = &gq->rx_ring[gq->cur];
while ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY) {
- if (--boguscnt < 0)
- break;
dma_rmb();
pkt_len = le16_to_cpu(desc->desc.info_ds) & RX_DS;
skb = gq->skbs[gq->cur];
gq->cur = rswitch_next_queue_index(gq, true, 1);
desc = &gq->rx_ring[gq->cur];
+
+ if (--boguscnt <= 0)
+ break;
}
num = rswitch_get_num_cur_queues(gq);
goto err;
gq->dirty = rswitch_next_queue_index(gq, false, num);
- *quota -= limit - (++boguscnt);
+ *quota -= limit - boguscnt;
return boguscnt <= 0;
rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, true);
rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, true);
- iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDIE);
+ if (bitmap_empty(rdev->priv->opened_ports, RSWITCH_NUM_PORTS))
+ iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDIE);
+
+ bitmap_set(rdev->priv->opened_ports, rdev->port, 1);
return 0;
};
struct rswitch_gwca_ts_info *ts_info, *ts_info2;
netif_tx_stop_all_queues(ndev);
+ bitmap_clear(rdev->priv->opened_ports, rdev->port, 1);
- iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDID);
+ if (bitmap_empty(rdev->priv->opened_ports, RSWITCH_NUM_PORTS))
+ iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDID);
list_for_each_entry_safe(ts_info, ts_info2, &rdev->priv->gwca.ts_info_list, list) {
if (ts_info->port != rdev->port)
struct rcar_gen4_ptp_private *ptp_priv;
struct rswitch_device *rdev[RSWITCH_NUM_PORTS];
+ DECLARE_BITMAP(opened_ports, RSWITCH_NUM_PORTS);
struct rswitch_gwca gwca;
struct rswitch_etha etha[RSWITCH_NUM_PORTS];
if (mdp->cd->register_type != SH_ETH_REG_GIGABIT)
phy_set_max_speed(phydev, SPEED_100);
- /* Indicate that the MAC is responsible for managing PHY PM */
- phydev->mac_managed_pm = true;
phy_attached_info(phydev);
return 0;
struct bb_info *bitbang;
struct platform_device *pdev = mdp->pdev;
struct device *dev = &mdp->pdev->dev;
+ struct phy_device *phydev;
+ struct device_node *pn;
/* create bit control struct for PHY */
bitbang = devm_kzalloc(dev, sizeof(struct bb_info), GFP_KERNEL);
if (ret)
goto out_free_bus;
+ pn = of_parse_phandle(dev->of_node, "phy-handle", 0);
+ phydev = of_phy_find_device(pn);
+ if (phydev) {
+ phydev->mac_managed_pm = true;
+ put_device(&phydev->mdio.dev);
+ }
+ of_node_put(pn);
+
return 0;
out_free_bus:
unsigned int frpbs;
unsigned int frpes;
unsigned int addr64;
+ unsigned int host_dma_width;
unsigned int rssen;
unsigned int vlhash;
unsigned int sphen;
struct device_node *np = dev->of_node;
int err = 0;
- if (of_get_property(np, "snps,rmii_refclk_ext", NULL))
- dwmac->rmii_refclk_ext = true;
+ dwmac->rmii_refclk_ext = of_property_read_bool(np, "snps,rmii_refclk_ext");
dwmac->clk_tx = devm_clk_get(dev, "tx");
if (IS_ERR(dwmac->clk_tx)) {
goto err_parse_dt;
}
- plat_dat->addr64 = dwmac->ops->addr_width;
+ plat_dat->host_dma_width = dwmac->ops->addr_width;
plat_dat->init = imx_dwmac_init;
plat_dat->exit = imx_dwmac_exit;
plat_dat->clks_config = imx_dwmac_clks_config;
intel_priv->is_pse = true;
plat->bus_id = 2;
- plat->addr64 = 32;
+ plat->host_dma_width = 32;
plat->clk_ptp_rate = 200000000;
intel_priv->is_pse = true;
plat->bus_id = 3;
- plat->addr64 = 32;
+ plat->host_dma_width = 32;
plat->clk_ptp_rate = 200000000;
plat->use_phy_wol = priv_plat->mac_wol ? 0 : 1;
plat->riwt_off = 1;
plat->maxmtu = ETH_DATA_LEN;
- plat->addr64 = priv_plat->variant->dma_bit_mask;
+ plat->host_dma_width = priv_plat->variant->dma_bit_mask;
plat->bsp_priv = priv_plat;
plat->init = mediatek_dwmac_init;
plat->clks_config = mediatek_dwmac_clks_config;
struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
- if (priv->dma_cap.addr64 <= 32)
+ if (priv->dma_cap.host_dma_width <= 32)
gfp |= GFP_DMA32;
if (!buf->page) {
unsigned int entry = rx_q->dirty_rx;
gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
- if (priv->dma_cap.addr64 <= 32)
+ if (priv->dma_cap.host_dma_width <= 32)
gfp |= GFP_DMA32;
while (dirty-- > 0) {
seq_printf(seq, "\tFlexible RX Parser: %s\n",
priv->dma_cap.frpsel ? "Y" : "N");
seq_printf(seq, "\tEnhanced Addressing: %d\n",
- priv->dma_cap.addr64);
+ priv->dma_cap.host_dma_width);
seq_printf(seq, "\tReceive Side Scaling: %s\n",
priv->dma_cap.rssen ? "Y" : "N");
seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
dev_info(priv->device, "SPH feature enabled\n");
}
- /* The current IP register MAC_HW_Feature1[ADDR64] only define
- * 32/40/64 bit width, but some SOC support others like i.MX8MP
- * support 34 bits but it map to 40 bits width in MAC_HW_Feature1[ADDR64].
- * So overwrite dma_cap.addr64 according to HW real design.
+ /* Ideally our host DMA address width is the same as for the
+ * device. However, it may differ and then we have to use our
+ * host DMA width for allocation and the device DMA width for
+ * register handling.
*/
- if (priv->plat->addr64)
- priv->dma_cap.addr64 = priv->plat->addr64;
+ if (priv->plat->host_dma_width)
+ priv->dma_cap.host_dma_width = priv->plat->host_dma_width;
+ else
+ priv->dma_cap.host_dma_width = priv->dma_cap.addr64;
- if (priv->dma_cap.addr64) {
+ if (priv->dma_cap.host_dma_width) {
ret = dma_set_mask_and_coherent(device,
- DMA_BIT_MASK(priv->dma_cap.addr64));
+ DMA_BIT_MASK(priv->dma_cap.host_dma_width));
if (!ret) {
- dev_info(priv->device, "Using %d bits DMA width\n",
- priv->dma_cap.addr64);
+ dev_info(priv->device, "Using %d/%d bits DMA host/device width\n",
+ priv->dma_cap.host_dma_width, priv->dma_cap.addr64);
/*
* If more than 32 bits can be addressed, make sure to
goto error_hw_init;
}
- priv->dma_cap.addr64 = 32;
+ priv->dma_cap.host_dma_width = 32;
}
}
hp = mdesc_grab();
+ if (!hp)
+ return -ENODEV;
+
rmac = mdesc_get_property(hp, vdev->mp, remote_macaddr_prop, &len);
err = -ENODEV;
if (!rmac) {
if (model)
strcpy(np->vpd.model, model);
- if (of_find_property(dp, "hot-swappable-phy", NULL)) {
+ if (of_property_read_bool(dp, "hot-swappable-phy")) {
np->flags |= (NIU_FLAGS_10G | NIU_FLAGS_FIBER |
NIU_FLAGS_HOTPLUG_PHY);
}
hp = mdesc_grab();
+ if (!hp)
+ return -ENODEV;
+
vp = vnet_find_parent(hp, vdev->mp, vdev);
if (IS_ERR(vp)) {
pr_err("Cannot find port parent vnet\n");
val = lower_32_bits(cycles);
am65_cpts_write32(cpts, val, genf[req->index].length);
+ am65_cpts_write32(cpts, 0, genf[req->index].control);
+ am65_cpts_write32(cpts, 0, genf[req->index].ppm_hi);
+ am65_cpts_write32(cpts, 0, genf[req->index].ppm_low);
+
cpts->genf_enable |= BIT(req->index);
} else {
am65_cpts_write32(cpts, 0, genf[req->index].length);
if (IS_ERR(priv->gmii_sel))
return PTR_ERR(priv->gmii_sel);
- if (of_find_property(pdev->dev.of_node, "rmii-clock-ext", NULL))
- priv->rmii_clock_external = true;
+ priv->rmii_clock_external = of_property_read_bool(pdev->dev.of_node, "rmii-clock-ext");
dev_set_drvdata(&pdev->dev, priv);
/* init the hw stats lock */
spin_lock_init(&gbe_dev->hw_stats_lock);
- if (of_find_property(node, "enable-ale", NULL)) {
- gbe_dev->enable_ale = true;
+ gbe_dev->enable_ale = of_property_read_bool(node, "enable-ale");
+ if (gbe_dev->enable_ale)
dev_info(dev, "ALE enabled\n");
- } else {
- gbe_dev->enable_ale = false;
+ else
dev_dbg(dev, "ALE bypass enabled*\n");
- }
ret = of_property_read_u32(node, "tx-queue",
&gbe_dev->tx_queue_id);
/* set up the hardware pointers in each descriptor */
for (i = 0; i < no; i++, descr++) {
+ dma_addr_t cpu_addr;
+
gelic_descr_set_status(descr, GELIC_DESCR_DMA_NOT_IN_USE);
- descr->bus_addr =
- dma_map_single(ctodev(card), descr,
- GELIC_DESCR_SIZE,
- DMA_BIDIRECTIONAL);
- if (!descr->bus_addr)
+ cpu_addr = dma_map_single(ctodev(card), descr,
+ GELIC_DESCR_SIZE, DMA_BIDIRECTIONAL);
+
+ if (dma_mapping_error(ctodev(card), cpu_addr))
goto iommu_error;
+ descr->bus_addr = cpu_to_be32(cpu_addr);
descr->next = descr + 1;
descr->prev = descr - 1;
}
*
* allocates a new rx skb, iommu-maps it and attaches it to the descriptor.
* Activate the descriptor state-wise
+ *
+ * Gelic RX sk_buffs must be aligned to GELIC_NET_RXBUF_ALIGN and the length
+ * must be a multiple of GELIC_NET_RXBUF_ALIGN.
*/
static int gelic_descr_prepare_rx(struct gelic_card *card,
struct gelic_descr *descr)
{
+ static const unsigned int rx_skb_size =
+ ALIGN(GELIC_NET_MAX_FRAME, GELIC_NET_RXBUF_ALIGN) +
+ GELIC_NET_RXBUF_ALIGN - 1;
+ dma_addr_t cpu_addr;
int offset;
- unsigned int bufsize;
if (gelic_descr_get_status(descr) != GELIC_DESCR_DMA_NOT_IN_USE)
dev_info(ctodev(card), "%s: ERROR status\n", __func__);
- /* we need to round up the buffer size to a multiple of 128 */
- bufsize = ALIGN(GELIC_NET_MAX_MTU, GELIC_NET_RXBUF_ALIGN);
- /* and we need to have it 128 byte aligned, therefore we allocate a
- * bit more */
- descr->skb = dev_alloc_skb(bufsize + GELIC_NET_RXBUF_ALIGN - 1);
+ descr->skb = netdev_alloc_skb(*card->netdev, rx_skb_size);
if (!descr->skb) {
descr->buf_addr = 0; /* tell DMAC don't touch memory */
return -ENOMEM;
}
- descr->buf_size = cpu_to_be32(bufsize);
+ descr->buf_size = cpu_to_be32(rx_skb_size);
descr->dmac_cmd_status = 0;
descr->result_size = 0;
descr->valid_size = 0;
if (offset)
skb_reserve(descr->skb, GELIC_NET_RXBUF_ALIGN - offset);
/* io-mmu-map the skb */
- descr->buf_addr = cpu_to_be32(dma_map_single(ctodev(card),
- descr->skb->data,
- GELIC_NET_MAX_MTU,
- DMA_FROM_DEVICE));
- if (!descr->buf_addr) {
+ cpu_addr = dma_map_single(ctodev(card), descr->skb->data,
+ GELIC_NET_MAX_FRAME, DMA_FROM_DEVICE);
+ descr->buf_addr = cpu_to_be32(cpu_addr);
+ if (dma_mapping_error(ctodev(card), cpu_addr)) {
dev_kfree_skb_any(descr->skb);
descr->skb = NULL;
dev_info(ctodev(card),
buf = dma_map_single(ctodev(card), skb->data, skb->len, DMA_TO_DEVICE);
- if (!buf) {
+ if (dma_mapping_error(ctodev(card), buf)) {
dev_err(ctodev(card),
"dma map 2 failed (%p, %i). Dropping packet\n",
skb->data, skb->len);
data_error = be32_to_cpu(descr->data_error);
/* unmap skb buffer */
dma_unmap_single(ctodev(card), be32_to_cpu(descr->buf_addr),
- GELIC_NET_MAX_MTU,
+ GELIC_NET_MAX_FRAME,
DMA_FROM_DEVICE);
skb_put(skb, be32_to_cpu(descr->valid_size)?
#define GELIC_NET_RX_DESCRIPTORS 128 /* num of descriptors */
#define GELIC_NET_TX_DESCRIPTORS 128 /* num of descriptors */
-#define GELIC_NET_MAX_MTU VLAN_ETH_FRAME_LEN
-#define GELIC_NET_MIN_MTU VLAN_ETH_ZLEN
+#define GELIC_NET_MAX_FRAME 2312
+#define GELIC_NET_MAX_MTU 2294
+#define GELIC_NET_MIN_MTU 64
#define GELIC_NET_RXBUF_ALIGN 128
#define GELIC_CARD_RX_CSUM_DEFAULT 1 /* hw chksum */
#define GELIC_NET_WATCHDOG_TIMEOUT 5*HZ
struct resource res;
int ret;
- if (of_get_property(vptr->dev->of_node, "no-eeprom", NULL))
- vptr->no_eeprom = 1;
+ vptr->no_eeprom = of_property_read_bool(vptr->dev->of_node, "no-eeprom");
ret = of_address_to_resource(vptr->dev->of_node, 0, &res);
if (ret) {
struct device *dev;
struct pci_dev *pdev;
struct net_device *netdev;
- int no_eeprom;
+ bool no_eeprom;
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
u8 ip_addr[4];
* endianness mode. Default for OF devices is big-endian.
*/
little_endian = false;
- if (temac_np) {
- if (of_get_property(temac_np, "little-endian", NULL))
- little_endian = true;
- } else if (pdata) {
+ if (temac_np)
+ little_endian = of_property_read_bool(temac_np, "little-endian");
+ else if (pdata)
little_endian = pdata->reg_little_endian;
- }
+
if (little_endian) {
lp->temac_ior = _temac_ior_le;
lp->temac_iow = _temac_iow_le;
xirc2ps_detach(struct pcmcia_device *link)
{
struct net_device *dev = link->priv;
+ struct local_info *local = netdev_priv(dev);
+
+ netif_carrier_off(dev);
+ netif_tx_disable(dev);
+ cancel_work_sync(&local->tx_timeout_task);
dev_dbg(&link->dev, "detach\n");
switch (reg_id) {
case INTER_EE_SRC_CH_IRQ_MSK:
case INTER_EE_SRC_EV_CH_IRQ_MSK:
+ return gsi->version >= IPA_VERSION_3_5;
+
+ case HW_PARAM_2:
+ return gsi->version >= IPA_VERSION_3_5_1;
+
+ case HW_PARAM_4:
+ return gsi->version >= IPA_VERSION_5_0;
+
case CH_C_CNTXT_0:
case CH_C_CNTXT_1:
case CH_C_CNTXT_2:
case CH_CMD:
case EV_CH_CMD:
case GENERIC_CMD:
- case HW_PARAM_2:
case CNTXT_TYPE_IRQ:
case CNTXT_TYPE_IRQ_MSK:
case CNTXT_SRC_CH_IRQ:
#include <linux/bits.h>
+struct platform_device;
+
+struct gsi;
+
/**
* DOC: GSI Registers
*
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2022 Linaro Ltd.
+ * Copyright (C) 2019-2023 Linaro Ltd.
*/
#include <linux/io.h>
enum ipa_version version = ipa->version;
switch (reg_id) {
+ case FILT_ROUT_HASH_EN:
+ return version == IPA_VERSION_4_2;
+
+ case FILT_ROUT_HASH_FLUSH:
+ return version < IPA_VERSION_5_0 && version != IPA_VERSION_4_2;
+
+ case FILT_ROUT_CACHE_FLUSH:
+ case ENDP_FILTER_CACHE_CFG:
+ case ENDP_ROUTER_CACHE_CFG:
+ return version >= IPA_VERSION_5_0;
+
case IPA_BCR:
case COUNTER_CFG:
return version < IPA_VERSION_4_5;
case SRC_RSRC_GRP_45_RSRC_TYPE:
case DST_RSRC_GRP_45_RSRC_TYPE:
return version <= IPA_VERSION_3_1 ||
- version == IPA_VERSION_4_5;
+ version == IPA_VERSION_4_5 ||
+ version == IPA_VERSION_5_0;
case SRC_RSRC_GRP_67_RSRC_TYPE:
case DST_RSRC_GRP_67_RSRC_TYPE:
- return version <= IPA_VERSION_3_1;
+ return version <= IPA_VERSION_3_1 ||
+ version == IPA_VERSION_5_0;
case ENDP_FILTER_ROUTER_HSH_CFG:
- return version != IPA_VERSION_4_2;
+ return version < IPA_VERSION_5_0 &&
+ version != IPA_VERSION_4_2;
case IRQ_SUSPEND_EN:
case IRQ_SUSPEND_CLR:
case SHARED_MEM_SIZE:
case QSB_MAX_WRITES:
case QSB_MAX_READS:
- case FILT_ROUT_HASH_EN:
- case FILT_ROUT_CACHE_CFG:
- case FILT_ROUT_HASH_FLUSH:
- case FILT_ROUT_CACHE_FLUSH:
case STATE_AGGR_ACTIVE:
case LOCAL_PKT_PROC_CNTXT:
case AGGR_FORCE_CLOSE:
case ENDP_INIT_RSRC_GRP:
case ENDP_INIT_SEQ:
case ENDP_STATUS:
- case ENDP_FILTER_CACHE_CFG:
- case ENDP_ROUTER_CACHE_CFG:
case IPA_IRQ_STTS:
case IPA_IRQ_EN:
case IPA_IRQ_CLR:
SHARED_MEM_SIZE,
QSB_MAX_WRITES,
QSB_MAX_READS,
- FILT_ROUT_HASH_EN, /* Not IPA v5.0+ */
- FILT_ROUT_CACHE_CFG, /* IPA v5.0+ */
- FILT_ROUT_HASH_FLUSH, /* Not IPA v5.0+ */
+ FILT_ROUT_HASH_EN, /* IPA v4.2 */
+ FILT_ROUT_HASH_FLUSH, /* Not IPA v4.2 nor IPA v5.0+ */
FILT_ROUT_CACHE_FLUSH, /* IPA v5.0+ */
STATE_AGGR_ACTIVE,
IPA_BCR, /* Not IPA v4.5+ */
TIMERS_PULSE_GRAN_CFG, /* IPA v4.5+ */
SRC_RSRC_GRP_01_RSRC_TYPE,
SRC_RSRC_GRP_23_RSRC_TYPE,
- SRC_RSRC_GRP_45_RSRC_TYPE, /* Not IPA v3.5+, IPA v4.5 */
- SRC_RSRC_GRP_67_RSRC_TYPE, /* Not IPA v3.5+ */
+ SRC_RSRC_GRP_45_RSRC_TYPE, /* Not IPA v3.5+; IPA v4.5, IPA v5.0 */
+ SRC_RSRC_GRP_67_RSRC_TYPE, /* Not IPA v3.5+; IPA v5.0 */
DST_RSRC_GRP_01_RSRC_TYPE,
DST_RSRC_GRP_23_RSRC_TYPE,
- DST_RSRC_GRP_45_RSRC_TYPE, /* Not IPA v3.5+, IPA v4.5 */
- DST_RSRC_GRP_67_RSRC_TYPE, /* Not IPA v3.5+ */
+ DST_RSRC_GRP_45_RSRC_TYPE, /* Not IPA v3.5+; IPA v4.5, IPA v5.0 */
+ DST_RSRC_GRP_67_RSRC_TYPE, /* Not IPA v3.5+; IPA v5.0 */
ENDP_INIT_CTRL, /* Not IPA v4.2+ for TX, not IPA v4.0+ for RX */
ENDP_INIT_CFG,
ENDP_INIT_NAT, /* TX only */
GEN_QMB_1_MAX_READS_BEATS, /* IPA v4.0+ */
};
-/* FILT_ROUT_CACHE_CFG register */
-enum ipa_reg_filt_rout_cache_cfg_field_id {
- ROUTER_CACHE_EN,
- FILTER_CACHE_EN,
- LOW_PRI_HASH_HIT_DISABLE,
- LRU_EVICTION_THRESHOLD,
-};
-
/* FILT_ROUT_HASH_EN and FILT_ROUT_HASH_FLUSH registers */
enum ipa_reg_filt_rout_hash_field_id {
IPV6_ROUTER_HASH,
#define _REG_H_
#include <linux/types.h>
-#include <linux/bits.h>
+#include <linux/log2.h>
+#include <linux/bug.h>
/**
* struct reg - A register descriptor
0x0001004c + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(CH_C_DOORBELL_0, ch_c_doorbell_0,
- 0x0001e000 + 0x4000 * GSI_EE_AP, 0x08);
+ 0x00011000 + 0x4000 * GSI_EE_AP, 0x08);
REG_STRIDE(EV_CH_E_DOORBELL_0, ev_ch_e_doorbell_0,
- 0x0001e100 + 0x4000 * GSI_EE_AP, 0x08);
+ 0x00011100 + 0x4000 * GSI_EE_AP, 0x08);
static const u32 reg_gsi_status_fmask[] = {
[ENABLED] = BIT(0),
/* Bits 1-31 reserved */
};
-REG_FIELDS(GSI_STATUS, gsi_status, 0x0001f000 + 0x4000 * GSI_EE_AP);
+REG_FIELDS(GSI_STATUS, gsi_status, 0x00012000 + 0x4000 * GSI_EE_AP);
static const u32 reg_ch_cmd_fmask[] = {
[CH_CHID] = GENMASK(7, 0),
[CH_OPCODE] = GENMASK(31, 24),
};
-REG_FIELDS(CH_CMD, ch_cmd, 0x0001f008 + 0x4000 * GSI_EE_AP);
+REG_FIELDS(CH_CMD, ch_cmd, 0x00012008 + 0x4000 * GSI_EE_AP);
static const u32 reg_ev_ch_cmd_fmask[] = {
[EV_CHID] = GENMASK(7, 0),
[EV_OPCODE] = GENMASK(31, 24),
};
-REG_FIELDS(EV_CH_CMD, ev_ch_cmd, 0x0001f010 + 0x4000 * GSI_EE_AP);
+REG_FIELDS(EV_CH_CMD, ev_ch_cmd, 0x00012010 + 0x4000 * GSI_EE_AP);
static const u32 reg_generic_cmd_fmask[] = {
[GENERIC_OPCODE] = GENMASK(4, 0),
/* Bits 14-31 reserved */
};
-REG_FIELDS(GENERIC_CMD, generic_cmd, 0x0001f018 + 0x4000 * GSI_EE_AP);
+REG_FIELDS(GENERIC_CMD, generic_cmd, 0x00012018 + 0x4000 * GSI_EE_AP);
static const u32 reg_hw_param_2_fmask[] = {
[IRAM_SIZE] = GENMASK(2, 0),
[GSI_USE_INTER_EE] = BIT(31),
};
-REG_FIELDS(HW_PARAM_2, hw_param_2, 0x0001f040 + 0x4000 * GSI_EE_AP);
+REG_FIELDS(HW_PARAM_2, hw_param_2, 0x00012040 + 0x4000 * GSI_EE_AP);
-REG(CNTXT_TYPE_IRQ, cntxt_type_irq, 0x0001f080 + 0x4000 * GSI_EE_AP);
+REG(CNTXT_TYPE_IRQ, cntxt_type_irq, 0x00012080 + 0x4000 * GSI_EE_AP);
-REG(CNTXT_TYPE_IRQ_MSK, cntxt_type_irq_msk, 0x0001f088 + 0x4000 * GSI_EE_AP);
+REG(CNTXT_TYPE_IRQ_MSK, cntxt_type_irq_msk, 0x00012088 + 0x4000 * GSI_EE_AP);
-REG(CNTXT_SRC_CH_IRQ, cntxt_src_ch_irq, 0x0001f090 + 0x4000 * GSI_EE_AP);
+REG(CNTXT_SRC_CH_IRQ, cntxt_src_ch_irq, 0x00012090 + 0x4000 * GSI_EE_AP);
-REG(CNTXT_SRC_EV_CH_IRQ, cntxt_src_ev_ch_irq, 0x0001f094 + 0x4000 * GSI_EE_AP);
+REG(CNTXT_SRC_EV_CH_IRQ, cntxt_src_ev_ch_irq, 0x00012094 + 0x4000 * GSI_EE_AP);
REG(CNTXT_SRC_CH_IRQ_MSK, cntxt_src_ch_irq_msk,
- 0x0001f098 + 0x4000 * GSI_EE_AP);
+ 0x00012098 + 0x4000 * GSI_EE_AP);
REG(CNTXT_SRC_EV_CH_IRQ_MSK, cntxt_src_ev_ch_irq_msk,
- 0x0001f09c + 0x4000 * GSI_EE_AP);
+ 0x0001209c + 0x4000 * GSI_EE_AP);
REG(CNTXT_SRC_CH_IRQ_CLR, cntxt_src_ch_irq_clr,
- 0x0001f0a0 + 0x4000 * GSI_EE_AP);
+ 0x000120a0 + 0x4000 * GSI_EE_AP);
REG(CNTXT_SRC_EV_CH_IRQ_CLR, cntxt_src_ev_ch_irq_clr,
- 0x0001f0a4 + 0x4000 * GSI_EE_AP);
+ 0x000120a4 + 0x4000 * GSI_EE_AP);
-REG(CNTXT_SRC_IEOB_IRQ, cntxt_src_ieob_irq, 0x0001f0b0 + 0x4000 * GSI_EE_AP);
+REG(CNTXT_SRC_IEOB_IRQ, cntxt_src_ieob_irq, 0x000120b0 + 0x4000 * GSI_EE_AP);
REG(CNTXT_SRC_IEOB_IRQ_MSK, cntxt_src_ieob_irq_msk,
- 0x0001f0b8 + 0x4000 * GSI_EE_AP);
+ 0x000120b8 + 0x4000 * GSI_EE_AP);
REG(CNTXT_SRC_IEOB_IRQ_CLR, cntxt_src_ieob_irq_clr,
- 0x0001f0c0 + 0x4000 * GSI_EE_AP);
+ 0x000120c0 + 0x4000 * GSI_EE_AP);
-REG(CNTXT_GLOB_IRQ_STTS, cntxt_glob_irq_stts, 0x0001f100 + 0x4000 * GSI_EE_AP);
+REG(CNTXT_GLOB_IRQ_STTS, cntxt_glob_irq_stts, 0x00012100 + 0x4000 * GSI_EE_AP);
-REG(CNTXT_GLOB_IRQ_EN, cntxt_glob_irq_en, 0x0001f108 + 0x4000 * GSI_EE_AP);
+REG(CNTXT_GLOB_IRQ_EN, cntxt_glob_irq_en, 0x00012108 + 0x4000 * GSI_EE_AP);
-REG(CNTXT_GLOB_IRQ_CLR, cntxt_glob_irq_clr, 0x0001f110 + 0x4000 * GSI_EE_AP);
+REG(CNTXT_GLOB_IRQ_CLR, cntxt_glob_irq_clr, 0x00012110 + 0x4000 * GSI_EE_AP);
-REG(CNTXT_GSI_IRQ_STTS, cntxt_gsi_irq_stts, 0x0001f118 + 0x4000 * GSI_EE_AP);
+REG(CNTXT_GSI_IRQ_STTS, cntxt_gsi_irq_stts, 0x00012118 + 0x4000 * GSI_EE_AP);
-REG(CNTXT_GSI_IRQ_EN, cntxt_gsi_irq_en, 0x0001f120 + 0x4000 * GSI_EE_AP);
+REG(CNTXT_GSI_IRQ_EN, cntxt_gsi_irq_en, 0x00012120 + 0x4000 * GSI_EE_AP);
-REG(CNTXT_GSI_IRQ_CLR, cntxt_gsi_irq_clr, 0x0001f128 + 0x4000 * GSI_EE_AP);
+REG(CNTXT_GSI_IRQ_CLR, cntxt_gsi_irq_clr, 0x00012128 + 0x4000 * GSI_EE_AP);
static const u32 reg_cntxt_intset_fmask[] = {
[INTYPE] = BIT(0)
/* Bits 1-31 reserved */
};
-REG_FIELDS(CNTXT_INTSET, cntxt_intset, 0x0001f180 + 0x4000 * GSI_EE_AP);
+REG_FIELDS(CNTXT_INTSET, cntxt_intset, 0x00012180 + 0x4000 * GSI_EE_AP);
-REG_FIELDS(ERROR_LOG, error_log, 0x0001f200 + 0x4000 * GSI_EE_AP);
+REG_FIELDS(ERROR_LOG, error_log, 0x00012200 + 0x4000 * GSI_EE_AP);
-REG(ERROR_LOG_CLR, error_log_clr, 0x0001f210 + 0x4000 * GSI_EE_AP);
+REG(ERROR_LOG_CLR, error_log_clr, 0x00012210 + 0x4000 * GSI_EE_AP);
static const u32 reg_cntxt_scratch_0_fmask[] = {
[INTER_EE_RESULT] = GENMASK(2, 0),
/* Bits 8-31 reserved */
};
-REG_FIELDS(CNTXT_SCRATCH_0, cntxt_scratch_0, 0x0001f400 + 0x4000 * GSI_EE_AP);
+REG_FIELDS(CNTXT_SCRATCH_0, cntxt_scratch_0, 0x00012400 + 0x4000 * GSI_EE_AP);
static const struct reg *reg_array[] = {
[INTER_EE_SRC_CH_IRQ_MSK] = ®_inter_ee_src_ch_irq_msk,
};
REG_STRIDE_FIELDS(CH_C_CNTXT_0, ch_c_cntxt_0,
- 0x0001c000 + 0x4000 * GSI_EE_AP, 0x80);
+ 0x0000f000 + 0x4000 * GSI_EE_AP, 0x80);
static const u32 reg_ch_c_cntxt_1_fmask[] = {
[CH_R_LENGTH] = GENMASK(19, 0),
};
REG_STRIDE_FIELDS(CH_C_CNTXT_1, ch_c_cntxt_1,
- 0x0001c004 + 0x4000 * GSI_EE_AP, 0x80);
+ 0x0000f004 + 0x4000 * GSI_EE_AP, 0x80);
-REG_STRIDE(CH_C_CNTXT_2, ch_c_cntxt_2, 0x0001c008 + 0x4000 * GSI_EE_AP, 0x80);
+REG_STRIDE(CH_C_CNTXT_2, ch_c_cntxt_2, 0x0000f008 + 0x4000 * GSI_EE_AP, 0x80);
-REG_STRIDE(CH_C_CNTXT_3, ch_c_cntxt_3, 0x0001c00c + 0x4000 * GSI_EE_AP, 0x80);
+REG_STRIDE(CH_C_CNTXT_3, ch_c_cntxt_3, 0x0000f00c + 0x4000 * GSI_EE_AP, 0x80);
static const u32 reg_ch_c_qos_fmask[] = {
[WRR_WEIGHT] = GENMASK(3, 0),
/* Bits 25-31 reserved */
};
-REG_STRIDE_FIELDS(CH_C_QOS, ch_c_qos, 0x0001c05c + 0x4000 * GSI_EE_AP, 0x80);
+REG_STRIDE_FIELDS(CH_C_QOS, ch_c_qos, 0x0000f05c + 0x4000 * GSI_EE_AP, 0x80);
static const u32 reg_error_log_fmask[] = {
[ERR_ARG3] = GENMASK(3, 0),
};
REG_STRIDE(CH_C_SCRATCH_0, ch_c_scratch_0,
- 0x0001c060 + 0x4000 * GSI_EE_AP, 0x80);
+ 0x0000f060 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(CH_C_SCRATCH_1, ch_c_scratch_1,
- 0x0001c064 + 0x4000 * GSI_EE_AP, 0x80);
+ 0x0000f064 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(CH_C_SCRATCH_2, ch_c_scratch_2,
- 0x0001c068 + 0x4000 * GSI_EE_AP, 0x80);
+ 0x0000f068 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(CH_C_SCRATCH_3, ch_c_scratch_3,
- 0x0001c06c + 0x4000 * GSI_EE_AP, 0x80);
+ 0x0000f06c + 0x4000 * GSI_EE_AP, 0x80);
static const u32 reg_ev_ch_e_cntxt_0_fmask[] = {
[EV_CHTYPE] = GENMASK(3, 0),
};
REG_STRIDE_FIELDS(EV_CH_E_CNTXT_0, ev_ch_e_cntxt_0,
- 0x0001d000 + 0x4000 * GSI_EE_AP, 0x80);
+ 0x00010000 + 0x4000 * GSI_EE_AP, 0x80);
static const u32 reg_ev_ch_e_cntxt_1_fmask[] = {
[R_LENGTH] = GENMASK(15, 0),
};
REG_STRIDE_FIELDS(EV_CH_E_CNTXT_1, ev_ch_e_cntxt_1,
- 0x0001d004 + 0x4000 * GSI_EE_AP, 0x80);
+ 0x00010004 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_CNTXT_2, ev_ch_e_cntxt_2,
- 0x0001d008 + 0x4000 * GSI_EE_AP, 0x80);
+ 0x00010008 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_CNTXT_3, ev_ch_e_cntxt_3,
- 0x0001d00c + 0x4000 * GSI_EE_AP, 0x80);
+ 0x0001000c + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_CNTXT_4, ev_ch_e_cntxt_4,
- 0x0001d010 + 0x4000 * GSI_EE_AP, 0x80);
+ 0x00010010 + 0x4000 * GSI_EE_AP, 0x80);
static const u32 reg_ev_ch_e_cntxt_8_fmask[] = {
[EV_MODT] = GENMASK(15, 0),
};
REG_STRIDE_FIELDS(EV_CH_E_CNTXT_8, ev_ch_e_cntxt_8,
- 0x0001d020 + 0x4000 * GSI_EE_AP, 0x80);
+ 0x00010020 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_CNTXT_9, ev_ch_e_cntxt_9,
- 0x0001d024 + 0x4000 * GSI_EE_AP, 0x80);
+ 0x00010024 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_CNTXT_10, ev_ch_e_cntxt_10,
- 0x0001d028 + 0x4000 * GSI_EE_AP, 0x80);
+ 0x00010028 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_CNTXT_11, ev_ch_e_cntxt_11,
- 0x0001d02c + 0x4000 * GSI_EE_AP, 0x80);
+ 0x0001002c + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_CNTXT_12, ev_ch_e_cntxt_12,
- 0x0001d030 + 0x4000 * GSI_EE_AP, 0x80);
+ 0x00010030 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_CNTXT_13, ev_ch_e_cntxt_13,
- 0x0001d034 + 0x4000 * GSI_EE_AP, 0x80);
+ 0x00010034 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_SCRATCH_0, ev_ch_e_scratch_0,
- 0x0001d048 + 0x4000 * GSI_EE_AP, 0x80);
+ 0x00010048 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_SCRATCH_1, ev_ch_e_scratch_1,
- 0x0001d04c + 0x4000 * GSI_EE_AP, 0x80);
+ 0x0001004c + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(CH_C_DOORBELL_0, ch_c_doorbell_0,
0x00011000 + 0x4000 * GSI_EE_AP, 0x08);
goto out;
skb->dev = addr->master->dev;
+ skb->skb_iif = skb->dev->ifindex;
len = skb->len + ETH_HLEN;
ipvlan_count_rx(addr->master, len, true, false);
out:
MODULE_LICENSE("GPL");
/**
- * acpi_mdiobus_register - Register mii_bus and create PHYs from the ACPI ASL.
+ * __acpi_mdiobus_register - Register mii_bus and create PHYs from the ACPI ASL.
* @mdio: pointer to mii_bus structure
* @fwnode: pointer to fwnode of MDIO bus. This fwnode is expected to represent
+ * @owner: module owning this @mdio object.
* an ACPI device object corresponding to the MDIO bus and its children are
* expected to correspond to the PHY devices on that bus.
*
* This function registers the mii_bus structure and registers a phy_device
* for each child node of @fwnode.
*/
-int acpi_mdiobus_register(struct mii_bus *mdio, struct fwnode_handle *fwnode)
+int __acpi_mdiobus_register(struct mii_bus *mdio, struct fwnode_handle *fwnode,
+ struct module *owner)
{
struct fwnode_handle *child;
u32 addr;
/* Mask out all PHYs from auto probing. */
mdio->phy_mask = GENMASK(31, 0);
- ret = mdiobus_register(mdio);
+ ret = __mdiobus_register(mdio, owner);
if (ret)
return ret;
}
return 0;
}
-EXPORT_SYMBOL(acpi_mdiobus_register);
+EXPORT_SYMBOL(__acpi_mdiobus_register);
if (i >= ARRAY_SIZE(nexus->buses))
break;
}
+ fwnode_handle_put(fwn);
return 0;
err_release_regions:
EXPORT_SYMBOL(of_mdiobus_child_is_phy);
/**
- * of_mdiobus_register - Register mii_bus and create PHYs from the device tree
+ * __of_mdiobus_register - Register mii_bus and create PHYs from the device tree
* @mdio: pointer to mii_bus structure
* @np: pointer to device_node of MDIO bus.
+ * @owner: module owning the @mdio object.
*
* This function registers the mii_bus structure and registers a phy_device
* for each child node of @np.
*/
-int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
+int __of_mdiobus_register(struct mii_bus *mdio, struct device_node *np,
+ struct module *owner)
{
struct device_node *child;
bool scanphys = false;
int addr, rc;
if (!np)
- return mdiobus_register(mdio);
+ return __mdiobus_register(mdio, owner);
/* Do not continue if the node is disabled */
if (!of_device_is_available(np))
of_property_read_u32(np, "reset-post-delay-us", &mdio->reset_post_delay_us);
/* Register the MDIO bus */
- rc = mdiobus_register(mdio);
+ rc = __mdiobus_register(mdio, owner);
if (rc)
return rc;
mdiobus_unregister(mdio);
return rc;
}
-EXPORT_SYMBOL(of_mdiobus_register);
+EXPORT_SYMBOL(__of_mdiobus_register);
/**
* of_mdio_find_device - Given a device tree node, find the mdio_device
#if IS_ENABLED(CONFIG_OF_MDIO)
/**
- * devm_of_mdiobus_register - Resource managed variant of of_mdiobus_register()
+ * __devm_of_mdiobus_register - Resource managed variant of of_mdiobus_register()
* @dev: Device to register mii_bus for
* @mdio: MII bus structure to register
* @np: Device node to parse
+ * @owner: Owning module
*/
-int devm_of_mdiobus_register(struct device *dev, struct mii_bus *mdio,
- struct device_node *np)
+int __devm_of_mdiobus_register(struct device *dev, struct mii_bus *mdio,
+ struct device_node *np, struct module *owner)
{
struct mdiobus_devres *dr;
int ret;
if (!dr)
return -ENOMEM;
- ret = of_mdiobus_register(mdio, np);
+ ret = __of_mdiobus_register(mdio, np, owner);
if (ret) {
devres_free(dr);
return ret;
devres_add(dev, dr);
return 0;
}
-EXPORT_SYMBOL(devm_of_mdiobus_register);
+EXPORT_SYMBOL(__devm_of_mdiobus_register);
#endif /* CONFIG_OF_MDIO */
MODULE_LICENSE("GPL");
u16 pwd[3] = {0, 0, 0};
struct ethtool_wolinfo *wol_conf = wol;
- mutex_lock(&phydev->lock);
rc = phy_select_page(phydev, MSCC_PHY_PAGE_EXTENDED_2);
- if (rc < 0) {
- rc = phy_restore_page(phydev, rc, rc);
- goto out_unlock;
- }
+ if (rc < 0)
+ return phy_restore_page(phydev, rc, rc);
if (wol->wolopts & WAKE_MAGIC) {
/* Store the device address for the magic packet */
rc = phy_restore_page(phydev, rc, rc > 0 ? 0 : rc);
if (rc < 0)
- goto out_unlock;
+ return rc;
if (wol->wolopts & WAKE_MAGIC) {
/* Enable the WOL interrupt */
reg_val |= MII_VSC85XX_INT_MASK_WOL;
rc = phy_write(phydev, MII_VSC85XX_INT_MASK, reg_val);
if (rc)
- goto out_unlock;
+ return rc;
} else {
/* Disable the WOL interrupt */
reg_val = phy_read(phydev, MII_VSC85XX_INT_MASK);
reg_val &= (~MII_VSC85XX_INT_MASK_WOL);
rc = phy_write(phydev, MII_VSC85XX_INT_MASK, reg_val);
if (rc)
- goto out_unlock;
+ return rc;
}
/* Clear WOL iterrupt status */
reg_val = phy_read(phydev, MII_VSC85XX_INT_STATUS);
-out_unlock:
- mutex_unlock(&phydev->lock);
-
- return rc;
+ return 0;
}
static void vsc85xx_wol_get(struct phy_device *phydev,
u16 pwd[3] = {0, 0, 0};
struct ethtool_wolinfo *wol_conf = wol;
- mutex_lock(&phydev->lock);
rc = phy_select_page(phydev, MSCC_PHY_PAGE_EXTENDED_2);
if (rc < 0)
- goto out_unlock;
+ goto out_restore_page;
reg_val = __phy_read(phydev, MSCC_PHY_WOL_MAC_CONTROL);
if (reg_val & SECURE_ON_ENABLE)
}
}
-out_unlock:
+out_restore_page:
phy_restore_page(phydev, rc, rc > 0 ? 0 : rc);
- mutex_unlock(&phydev->lock);
}
#if IS_ENABLED(CONFIG_OF_MDIO)
#define SGMII_ABILITY BIT(0)
#define VEND1_MII_BASIC_CONFIG 0xAFC6
-#define MII_BASIC_CONFIG_REV BIT(8)
+#define MII_BASIC_CONFIG_REV BIT(4)
#define MII_BASIC_CONFIG_SGMII 0x9
#define MII_BASIC_CONFIG_RGMII 0x7
#define MII_BASIC_CONFIG_RMII 0x5
return NULL;
}
+static void phy_process_state_change(struct phy_device *phydev,
+ enum phy_state old_state)
+{
+ if (old_state != phydev->state) {
+ phydev_dbg(phydev, "PHY state change %s -> %s\n",
+ phy_state_to_str(old_state),
+ phy_state_to_str(phydev->state));
+ if (phydev->drv && phydev->drv->link_change_notify)
+ phydev->drv->link_change_notify(phydev);
+ }
+}
+
static void phy_link_up(struct phy_device *phydev)
{
phydev->phy_link_change(phydev, true);
void phy_stop(struct phy_device *phydev)
{
struct net_device *dev = phydev->attached_dev;
+ enum phy_state old_state;
if (!phy_is_started(phydev) && phydev->state != PHY_DOWN) {
WARN(1, "called from state %s\n",
}
mutex_lock(&phydev->lock);
+ old_state = phydev->state;
if (phydev->state == PHY_CABLETEST) {
phy_abort_cable_test(phydev);
sfp_upstream_stop(phydev->sfp_bus);
phydev->state = PHY_HALTED;
+ phy_process_state_change(phydev, old_state);
mutex_unlock(&phydev->lock);
if (err < 0)
phy_error(phydev);
- if (old_state != phydev->state) {
- phydev_dbg(phydev, "PHY state change %s -> %s\n",
- phy_state_to_str(old_state),
- phy_state_to_str(phydev->state));
- if (phydev->drv && phydev->drv->link_change_notify)
- phydev->drv->link_change_notify(phydev);
- }
+ phy_process_state_change(phydev, old_state);
/* Only re-schedule a PHY state machine change if we are polling the
* PHY, if PHY_MAC_INTERRUPT is set, then we will be moving
break;
}
+ /* Force a poll to re-read the hardware signal state after
+ * sfp_sm_mod_probe() changed state_hw_mask.
+ */
+ mod_delayed_work(system_wq, &sfp->poll, 1);
+
err = sfp_hwmon_insert(sfp);
if (err)
dev_warn(sfp->dev, "hwmon probe failed: %pe\n",
static int lan87xx_read_status(struct phy_device *phydev)
{
struct smsc_phy_priv *priv = phydev->priv;
+ int err;
- int err = genphy_read_status(phydev);
+ err = genphy_read_status(phydev);
+ if (err)
+ return err;
if (!phydev->link && priv->energy_enable && phydev->irq == PHY_POLL) {
/* Disable EDPD to wake up PHY */
static int ax88772_init_mdio(struct usbnet *dev)
{
struct asix_common_private *priv = dev->driver_priv;
+ int ret;
- priv->mdio = devm_mdiobus_alloc(&dev->udev->dev);
+ priv->mdio = mdiobus_alloc();
if (!priv->mdio)
return -ENOMEM;
snprintf(priv->mdio->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
dev->udev->bus->busnum, dev->udev->devnum);
- return devm_mdiobus_register(&dev->udev->dev, priv->mdio);
+ ret = mdiobus_register(priv->mdio);
+ if (ret) {
+ netdev_err(dev->net, "Could not register MDIO bus (err %d)\n", ret);
+ mdiobus_free(priv->mdio);
+ priv->mdio = NULL;
+ }
+
+ return ret;
+}
+
+static void ax88772_mdio_unregister(struct asix_common_private *priv)
+{
+ mdiobus_unregister(priv->mdio);
+ mdiobus_free(priv->mdio);
}
static int ax88772_init_phy(struct usbnet *dev)
ret = ax88772_init_mdio(dev);
if (ret)
- return ret;
+ goto mdio_err;
ret = ax88772_phylink_setup(dev);
if (ret)
- return ret;
+ goto phylink_err;
ret = ax88772_init_phy(dev);
if (ret)
- phylink_destroy(priv->phylink);
+ goto initphy_err;
+ return 0;
+
+initphy_err:
+ phylink_destroy(priv->phylink);
+phylink_err:
+ ax88772_mdio_unregister(priv);
+mdio_err:
return ret;
}
phylink_disconnect_phy(priv->phylink);
rtnl_unlock();
phylink_destroy(priv->phylink);
+ ax88772_mdio_unregister(priv);
asix_rx_fixup_common_free(dev->driver_priv);
}
size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
+ if (unlikely(size > skb->len)) {
+ netif_dbg(dev, rx_err, dev->net,
+ "size err rx_cmd_a=0x%08x\n",
+ rx_cmd_a);
+ return 0;
+ }
+
if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
netif_dbg(dev, rx_err, dev->net,
"Error rx_cmd_a=0x%08x", rx_cmd_a);
} else {
- u32 frame_len = size - ETH_FCS_LEN;
+ u32 frame_len;
struct sk_buff *skb2;
+ if (unlikely(size < ETH_FCS_LEN)) {
+ netif_dbg(dev, rx_err, dev->net,
+ "size err rx_cmd_a=0x%08x\n",
+ rx_cmd_a);
+ return 0;
+ }
+
+ frame_len = size - ETH_FCS_LEN;
+
skb2 = napi_alloc_skb(&dev->napi, frame_len);
if (!skb2)
return 0;
}
static inline int
-pl_clear_QuickLink_features(struct usbnet *dev, int val)
-{
- return pl_vendor_req(dev, 1, (u8) val, 0);
-}
-
-static inline int
pl_set_QuickLink_features(struct usbnet *dev, int val)
{
return pl_vendor_req(dev, 3, (u8) val, 0);
size = (rx_cmd_a & RX_CMD_A_LEN) - RXW_PADDING;
align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
+ if (unlikely(size > skb->len)) {
+ netif_dbg(dev, rx_err, dev->net,
+ "size err rx_cmd_a=0x%08x\n",
+ rx_cmd_a);
+ return 0;
+ }
+
if (unlikely(rx_cmd_a & RX_CMD_A_RED)) {
netif_dbg(dev, rx_err, dev->net,
"Error rx_cmd_a=0x%08x\n", rx_cmd_a);
size = (u16)((header & RX_STS_FL_) >> 16);
align_count = (4 - ((size + NET_IP_ALIGN) % 4)) % 4;
+ if (unlikely(size > skb->len)) {
+ netif_dbg(dev, rx_err, dev->net,
+ "size err header=0x%08x\n", header);
+ return 0;
+ }
+
if (unlikely(header & RX_STS_ES_)) {
netif_dbg(dev, rx_err, dev->net,
"Error header=0x%08x\n", header);
u32 frame_sz;
if (skb_shared(skb) || skb_head_is_locked(skb) ||
- skb_shinfo(skb)->nr_frags) {
+ skb_shinfo(skb)->nr_frags ||
+ skb_headroom(skb) < XDP_PACKET_HEADROOM) {
u32 size, len, max_head_size, off;
struct sk_buff *nskb;
struct page *page;
consume_skb(skb);
skb = nskb;
- } else if (skb_headroom(skb) < XDP_PACKET_HEADROOM &&
- pskb_expand_head(skb, VETH_XDP_HEADROOM, 0, GFP_ATOMIC)) {
- goto drop;
}
/* SKB "head" area always have tailroom for skb_shared_info */
return 0;
}
+static void veth_set_xdp_features(struct net_device *dev)
+{
+ struct veth_priv *priv = netdev_priv(dev);
+ struct net_device *peer;
+
+ peer = rtnl_dereference(priv->peer);
+ if (peer && peer->real_num_tx_queues <= dev->real_num_rx_queues) {
+ xdp_features_t val = NETDEV_XDP_ACT_BASIC |
+ NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_RX_SG;
+
+ if (priv->_xdp_prog || veth_gro_requested(dev))
+ val |= NETDEV_XDP_ACT_NDO_XMIT |
+ NETDEV_XDP_ACT_NDO_XMIT_SG;
+ xdp_set_features_flag(dev, val);
+ } else {
+ xdp_clear_features_flag(dev);
+ }
+}
+
static int veth_set_channels(struct net_device *dev,
struct ethtool_channels *ch)
{
if (peer)
netif_carrier_on(peer);
}
+
+ /* update XDP supported features */
+ veth_set_xdp_features(dev);
+ if (peer)
+ veth_set_xdp_features(peer);
+
return err;
revert:
err = veth_napi_enable(dev);
if (err)
return err;
+
+ xdp_features_set_redirect_target(dev, true);
} else {
+ xdp_features_clear_redirect_target(dev);
veth_napi_del(dev);
}
return 0;
peer->hw_features &= ~NETIF_F_GSO_SOFTWARE;
peer->max_mtu = max_mtu;
}
+
+ xdp_features_set_redirect_target(dev, true);
}
if (old_prog) {
if (!prog) {
+ if (!veth_gro_requested(dev))
+ xdp_features_clear_redirect_target(dev);
+
if (dev->flags & IFF_UP)
veth_disable_xdp(dev);
struct veth_xdp_buff *_ctx = (void *)ctx;
if (!_ctx->skb)
- return -EOPNOTSUPP;
+ return -ENODATA;
*timestamp = skb_hwtstamps(_ctx->skb)->hwtstamp;
return 0;
struct veth_xdp_buff *_ctx = (void *)ctx;
if (!_ctx->skb)
- return -EOPNOTSUPP;
+ return -ENODATA;
*hash = skb_get_hash(_ctx->skb);
return 0;
dev->hw_enc_features = VETH_FEATURES;
dev->mpls_features = NETIF_F_HW_CSUM | NETIF_F_GSO_SOFTWARE;
netif_set_tso_max_size(dev, GSO_MAX_SIZE);
-
- dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
- NETDEV_XDP_ACT_NDO_XMIT | NETDEV_XDP_ACT_RX_SG |
- NETDEV_XDP_ACT_NDO_XMIT_SG;
}
/*
goto err_queues;
veth_disable_gro(dev);
+ /* update XDP supported features */
+ veth_set_xdp_features(dev);
+ veth_set_xdp_features(peer);
+
return 0;
err_queues:
static struct sk_buff *page_to_skb(struct virtnet_info *vi,
struct receive_queue *rq,
struct page *page, unsigned int offset,
- unsigned int len, unsigned int truesize)
+ unsigned int len, unsigned int truesize,
+ unsigned int headroom)
{
struct sk_buff *skb;
struct virtio_net_hdr_mrg_rxbuf *hdr;
else
hdr_padded_len = sizeof(struct padded_vnet_hdr);
- buf = p;
+ buf = p - headroom;
len -= hdr_len;
offset += hdr_padded_len;
p += hdr_padded_len;
- tailroom = truesize - hdr_padded_len - len;
+ tailroom = truesize - headroom - hdr_padded_len - len;
shinfo_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
return skb;
}
+static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi)
+{
+ unsigned int len;
+ unsigned int packets = 0;
+ unsigned int bytes = 0;
+ void *ptr;
+
+ while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
+ if (likely(!is_xdp_frame(ptr))) {
+ struct sk_buff *skb = ptr;
+
+ pr_debug("Sent skb %p\n", skb);
+
+ bytes += skb->len;
+ napi_consume_skb(skb, in_napi);
+ } else {
+ struct xdp_frame *frame = ptr_to_xdp(ptr);
+
+ bytes += xdp_get_frame_len(frame);
+ xdp_return_frame(frame);
+ }
+ packets++;
+ }
+
+ /* Avoid overhead when no packets have been processed
+ * happens when called speculatively from start_xmit.
+ */
+ if (!packets)
+ return;
+
+ u64_stats_update_begin(&sq->stats.syncp);
+ sq->stats.bytes += bytes;
+ sq->stats.packets += packets;
+ u64_stats_update_end(&sq->stats.syncp);
+}
+
+static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
+{
+ if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
+ return false;
+ else if (q < vi->curr_queue_pairs)
+ return true;
+ else
+ return false;
+}
+
+static void check_sq_full_and_disable(struct virtnet_info *vi,
+ struct net_device *dev,
+ struct send_queue *sq)
+{
+ bool use_napi = sq->napi.weight;
+ int qnum;
+
+ qnum = sq - vi->sq;
+
+ /* If running out of space, stop queue to avoid getting packets that we
+ * are then unable to transmit.
+ * An alternative would be to force queuing layer to requeue the skb by
+ * returning NETDEV_TX_BUSY. However, NETDEV_TX_BUSY should not be
+ * returned in a normal path of operation: it means that driver is not
+ * maintaining the TX queue stop/start state properly, and causes
+ * the stack to do a non-trivial amount of useless work.
+ * Since most packets only take 1 or 2 ring slots, stopping the queue
+ * early means 16 slots are typically wasted.
+ */
+ if (sq->vq->num_free < 2+MAX_SKB_FRAGS) {
+ netif_stop_subqueue(dev, qnum);
+ if (use_napi) {
+ if (unlikely(!virtqueue_enable_cb_delayed(sq->vq)))
+ virtqueue_napi_schedule(&sq->napi, sq->vq);
+ } else if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
+ /* More just got used, free them then recheck. */
+ free_old_xmit_skbs(sq, false);
+ if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
+ netif_start_subqueue(dev, qnum);
+ virtqueue_disable_cb(sq->vq);
+ }
+ }
+ }
+}
+
static int __virtnet_xdp_xmit_one(struct virtnet_info *vi,
struct send_queue *sq,
struct xdp_frame *xdpf)
}
ret = nxmit;
+ if (!is_xdp_raw_buffer_queue(vi, sq - vi->sq))
+ check_sq_full_and_disable(vi, dev, sq);
+
if (flags & XDP_XMIT_FLUSH) {
if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq))
kicks = 1;
{
struct page *page = buf;
struct sk_buff *skb =
- page_to_skb(vi, rq, page, 0, len, PAGE_SIZE);
+ page_to_skb(vi, rq, page, 0, len, PAGE_SIZE, 0);
stats->bytes += len - vi->hdr_len;
if (unlikely(!skb))
switch (act) {
case XDP_PASS:
+ head_skb = build_skb_from_xdp_buff(dev, vi, &xdp, xdp_frags_truesz);
+ if (unlikely(!head_skb))
+ goto err_xdp_frags;
+
if (unlikely(xdp_page != page))
put_page(page);
- head_skb = build_skb_from_xdp_buff(dev, vi, &xdp, xdp_frags_truesz);
rcu_read_unlock();
return head_skb;
case XDP_TX:
rcu_read_unlock();
skip_xdp:
- head_skb = page_to_skb(vi, rq, page, offset, len, truesize);
+ head_skb = page_to_skb(vi, rq, page, offset, len, truesize, headroom);
curr_skb = head_skb;
if (unlikely(!curr_skb))
return stats.packets;
}
-static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi)
-{
- unsigned int len;
- unsigned int packets = 0;
- unsigned int bytes = 0;
- void *ptr;
-
- while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
- if (likely(!is_xdp_frame(ptr))) {
- struct sk_buff *skb = ptr;
-
- pr_debug("Sent skb %p\n", skb);
-
- bytes += skb->len;
- napi_consume_skb(skb, in_napi);
- } else {
- struct xdp_frame *frame = ptr_to_xdp(ptr);
-
- bytes += xdp_get_frame_len(frame);
- xdp_return_frame(frame);
- }
- packets++;
- }
-
- /* Avoid overhead when no packets have been processed
- * happens when called speculatively from start_xmit.
- */
- if (!packets)
- return;
-
- u64_stats_update_begin(&sq->stats.syncp);
- sq->stats.bytes += bytes;
- sq->stats.packets += packets;
- u64_stats_update_end(&sq->stats.syncp);
-}
-
-static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
-{
- if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
- return false;
- else if (q < vi->curr_queue_pairs)
- return true;
- else
- return false;
-}
-
static void virtnet_poll_cleantx(struct receive_queue *rq)
{
struct virtnet_info *vi = rq->vq->vdev->priv;
nf_reset_ct(skb);
}
- /* If running out of space, stop queue to avoid getting packets that we
- * are then unable to transmit.
- * An alternative would be to force queuing layer to requeue the skb by
- * returning NETDEV_TX_BUSY. However, NETDEV_TX_BUSY should not be
- * returned in a normal path of operation: it means that driver is not
- * maintaining the TX queue stop/start state properly, and causes
- * the stack to do a non-trivial amount of useless work.
- * Since most packets only take 1 or 2 ring slots, stopping the queue
- * early means 16 slots are typically wasted.
- */
- if (sq->vq->num_free < 2+MAX_SKB_FRAGS) {
- netif_stop_subqueue(dev, qnum);
- if (use_napi) {
- if (unlikely(!virtqueue_enable_cb_delayed(sq->vq)))
- virtqueue_napi_schedule(&sq->napi, sq->vq);
- } else if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
- /* More just got used, free them then recheck. */
- free_old_xmit_skbs(sq, false);
- if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
- netif_start_subqueue(dev, qnum);
- virtqueue_disable_cb(sq->vq);
- }
- }
- }
+ check_sq_full_and_disable(vi, dev, sq);
if (kick || netif_xmit_stopped(txq)) {
if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
uhdlc_priv->dev = &pdev->dev;
uhdlc_priv->ut_info = ut_info;
- if (of_get_property(np, "fsl,tdm-interface", NULL))
- uhdlc_priv->tsa = 1;
-
- if (of_get_property(np, "fsl,ucc-internal-loopback", NULL))
- uhdlc_priv->loopback = 1;
-
- if (of_get_property(np, "fsl,hdlc-bus", NULL))
- uhdlc_priv->hdlc_bus = 1;
+ uhdlc_priv->tsa = of_property_read_bool(np, "fsl,tdm-interface");
+ uhdlc_priv->loopback = of_property_read_bool(np, "fsl,ucc-internal-loopback");
+ uhdlc_priv->hdlc_bus = of_property_read_bool(np, "fsl,hdlc-bus");
if (uhdlc_priv->tsa == 1) {
utdm = kzalloc(sizeof(*utdm), GFP_KERNEL);
rcu_read_lock();
do {
- while (likely(!mvmtxq->stopped &&
+ while (likely(!test_bit(IWL_MVM_TXQ_STATE_STOP_FULL,
+ &mvmtxq->state) &&
+ !test_bit(IWL_MVM_TXQ_STATE_STOP_REDIRECT,
+ &mvmtxq->state) &&
!test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status))) {
skb = ieee80211_tx_dequeue(hw, txq);
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_mac80211(txq);
- /*
- * Please note that racing is handled very carefully here:
- * mvmtxq->txq_id is updated during allocation, and mvmtxq->list is
- * deleted afterwards.
- * This means that if:
- * mvmtxq->txq_id != INVALID_QUEUE && list_empty(&mvmtxq->list):
- * queue is allocated and we can TX.
- * mvmtxq->txq_id != INVALID_QUEUE && !list_empty(&mvmtxq->list):
- * a race, should defer the frame.
- * mvmtxq->txq_id == INVALID_QUEUE && list_empty(&mvmtxq->list):
- * need to allocate the queue and defer the frame.
- * mvmtxq->txq_id == INVALID_QUEUE && !list_empty(&mvmtxq->list):
- * queue is already scheduled for allocation, no need to allocate,
- * should defer the frame.
- */
-
- /* If the queue is allocated TX and return. */
- if (!txq->sta || mvmtxq->txq_id != IWL_MVM_INVALID_QUEUE) {
- /*
- * Check that list is empty to avoid a race where txq_id is
- * already updated, but the queue allocation work wasn't
- * finished
- */
- if (unlikely(txq->sta && !list_empty(&mvmtxq->list)))
- return;
-
+ if (likely(test_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state)) ||
+ !txq->sta) {
iwl_mvm_mac_itxq_xmit(hw, txq);
return;
}
- /* The list is being deleted only after the queue is fully allocated. */
- if (!list_empty(&mvmtxq->list))
- return;
+ /* iwl_mvm_mac_itxq_xmit() will later be called by the worker
+ * to handle any packets we leave on the txq now
+ */
- list_add_tail(&mvmtxq->list, &mvm->add_stream_txqs);
- schedule_work(&mvm->add_stream_wk);
+ spin_lock_bh(&mvm->add_stream_lock);
+ /* The list is being deleted only after the queue is fully allocated. */
+ if (list_empty(&mvmtxq->list) &&
+ /* recheck under lock */
+ !test_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state)) {
+ list_add_tail(&mvmtxq->list, &mvm->add_stream_txqs);
+ schedule_work(&mvm->add_stream_wk);
+ }
+ spin_unlock_bh(&mvm->add_stream_lock);
}
#define CHECK_BA_TRIGGER(_mvm, _trig, _tid_bm, _tid, _fmt...) \
struct list_head list;
u16 txq_id;
atomic_t tx_request;
- bool stopped;
+#define IWL_MVM_TXQ_STATE_STOP_FULL 0
+#define IWL_MVM_TXQ_STATE_STOP_REDIRECT 1
+#define IWL_MVM_TXQ_STATE_READY 2
+ unsigned long state;
};
static inline struct iwl_mvm_txq *
struct iwl_mvm_tvqm_txq_info tvqm_info[IWL_MAX_TVQM_QUEUES];
};
struct work_struct add_stream_wk; /* To add streams to queues */
+ spinlock_t add_stream_lock;
const char *nvm_file_name;
struct iwl_nvm_data *nvm_data;
INIT_DELAYED_WORK(&mvm->scan_timeout_dwork, iwl_mvm_scan_timeout_wk);
INIT_WORK(&mvm->add_stream_wk, iwl_mvm_add_new_dqa_stream_wk);
INIT_LIST_HEAD(&mvm->add_stream_txqs);
+ spin_lock_init(&mvm->add_stream_lock);
init_waitqueue_head(&mvm->rx_sync_waitq);
txq = sta->txq[tid];
mvmtxq = iwl_mvm_txq_from_mac80211(txq);
- mvmtxq->stopped = !start;
+ if (start)
+ clear_bit(IWL_MVM_TXQ_STATE_STOP_FULL, &mvmtxq->state);
+ else
+ set_bit(IWL_MVM_TXQ_STATE_STOP_FULL, &mvmtxq->state);
if (start && mvmsta->sta_state != IEEE80211_STA_NOTEXIST)
iwl_mvm_mac_itxq_xmit(mvm->hw, txq);
struct iwl_mvm_txq *mvmtxq =
iwl_mvm_txq_from_tid(sta, tid);
- mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
+ spin_lock_bh(&mvm->add_stream_lock);
list_del_init(&mvmtxq->list);
+ clear_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state);
+ mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
+ spin_unlock_bh(&mvm->add_stream_lock);
}
/* Regardless if this is a reserved TXQ for a STA - mark it as false */
disable_agg_tids |= BIT(tid);
mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
- mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
+ spin_lock_bh(&mvm->add_stream_lock);
list_del_init(&mvmtxq->list);
+ clear_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state);
+ mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
+ spin_unlock_bh(&mvm->add_stream_lock);
}
mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */
queue, iwl_mvm_ac_to_tx_fifo[ac]);
/* Stop the queue and wait for it to empty */
- txq->stopped = true;
+ set_bit(IWL_MVM_TXQ_STATE_STOP_REDIRECT, &txq->state);
ret = iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(queue));
if (ret) {
out:
/* Continue using the queue */
- txq->stopped = false;
+ clear_bit(IWL_MVM_TXQ_STATE_STOP_REDIRECT, &txq->state);
return ret;
}
* a queue in the function itself.
*/
if (iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, tid)) {
+ spin_lock_bh(&mvm->add_stream_lock);
list_del_init(&mvmtxq->list);
+ spin_unlock_bh(&mvm->add_stream_lock);
continue;
}
- list_del_init(&mvmtxq->list);
+ /* now we're ready, any remaining races/concurrency will be
+ * handled in iwl_mvm_mac_itxq_xmit()
+ */
+ set_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state);
+
local_bh_disable();
+ spin_lock(&mvm->add_stream_lock);
+ list_del_init(&mvmtxq->list);
+ spin_unlock(&mvm->add_stream_lock);
+
iwl_mvm_mac_itxq_xmit(mvm->hw, txq);
local_bh_enable();
}
struct iwl_mvm_txq *mvmtxq =
iwl_mvm_txq_from_mac80211(sta->txq[i]);
+ spin_lock_bh(&mvm->add_stream_lock);
mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
list_del_init(&mvmtxq->list);
+ clear_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state);
+ spin_unlock_bh(&mvm->add_stream_lock);
}
}
.can_ext_scan = true,
};
-static const struct of_device_id mwifiex_pcie_of_match_table[] = {
+static const struct of_device_id mwifiex_pcie_of_match_table[] __maybe_unused = {
{ .compatible = "pci11ab,2b42" },
{ .compatible = "pci1b4b,2b42" },
{ }
{"EXTLAST", NULL, 0, 0xFE},
};
-static const struct of_device_id mwifiex_sdio_of_match_table[] = {
+static const struct of_device_id mwifiex_sdio_of_match_table[] __maybe_unused = {
{ .compatible = "marvell,sd8787" },
{ .compatible = "marvell,sd8897" },
{ .compatible = "marvell,sd8978" },
if (ret)
return ret;
+ set_bit(MT76_STATE_REGISTERED, &phy->state);
phy->dev->phys[phy->band_idx] = phy;
return 0;
{
struct mt76_dev *dev = phy->dev;
+ if (!test_bit(MT76_STATE_REGISTERED, &phy->state))
+ return;
+
if (IS_ENABLED(CONFIG_MT76_LEDS))
mt76_led_cleanup(phy);
mt76_tx_status_check(dev, true);
return ret;
WARN_ON(mt76_worker_setup(hw, &dev->tx_worker, NULL, "tx"));
+ set_bit(MT76_STATE_REGISTERED, &phy->state);
sched_set_fifo_low(dev->tx_worker.task);
return 0;
{
struct ieee80211_hw *hw = dev->hw;
+ if (!test_bit(MT76_STATE_REGISTERED, &dev->phy.state))
+ return;
+
if (IS_ENABLED(CONFIG_MT76_LEDS))
mt76_led_cleanup(&dev->phy);
mt76_tx_status_check(dev, true);
enum {
MT76_STATE_INITIALIZED,
+ MT76_STATE_REGISTERED,
MT76_STATE_RUNNING,
MT76_STATE_MCU_RUNNING,
MT76_SCANNING,
int mt76_connac_mcu_sta_wed_update(struct mt76_dev *dev, struct sk_buff *skb)
{
+ if (!mt76_is_mmio(dev))
+ return 0;
+
if (!mtk_wed_device_active(&dev->mmio.wed))
return 0;
ieee80211_hw_set(hw, SUPPORTS_RX_DECAP_OFFLOAD);
ieee80211_hw_set(hw, SUPPORTS_MULTI_BSSID);
ieee80211_hw_set(hw, WANT_MONITOR_VIF);
- ieee80211_hw_set(hw, SUPPORTS_VHT_EXT_NSS_BW);
hw->max_tx_fragments = 4;
}
if (phy->mt76->cap.has_5ghz) {
+ struct ieee80211_sta_vht_cap *vht_cap;
+
+ vht_cap = &phy->mt76->sband_5g.sband.vht_cap;
phy->mt76->sband_5g.sband.ht_cap.cap |=
IEEE80211_HT_CAP_LDPC_CODING |
IEEE80211_HT_CAP_MAX_AMSDU;
IEEE80211_HT_MPDU_DENSITY_4;
if (is_mt7915(&dev->mt76)) {
- phy->mt76->sband_5g.sband.vht_cap.cap |=
+ vht_cap->cap |=
IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991 |
IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
+
+ if (!dev->dbdc_support)
+ vht_cap->cap |=
+ IEEE80211_VHT_CAP_SHORT_GI_160 |
+ IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ |
+ FIELD_PREP(IEEE80211_VHT_CAP_EXT_NSS_BW_MASK, 1);
} else {
- phy->mt76->sband_5g.sband.vht_cap.cap |=
+ vht_cap->cap |=
IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 |
IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
/* mt7916 dbdc with 2g 2x2 bw40 and 5g 2x2 bw160c */
- phy->mt76->sband_5g.sband.vht_cap.cap |=
+ vht_cap->cap |=
IEEE80211_VHT_CAP_SHORT_GI_160 |
IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ;
}
+
+ if (!is_mt7915(&dev->mt76) || !dev->dbdc_support)
+ ieee80211_hw_set(hw, SUPPORTS_VHT_EXT_NSS_BW);
}
mt76_set_stream_caps(phy->mt76, true);
int sts = hweight8(phy->mt76->chainmask);
u8 c, sts_160 = sts;
- /* mt7915 doesn't support bw160 */
- if (is_mt7915(&dev->mt76))
- sts_160 = 0;
+ /* Can do 1/2 of STS in 160Mhz mode for mt7915 */
+ if (is_mt7915(&dev->mt76)) {
+ if (!dev->dbdc_support)
+ sts_160 /= 2;
+ else
+ sts_160 = 0;
+ }
#ifdef CONFIG_MAC80211_MESH
if (vif == NL80211_IFTYPE_MESH_POINT)
int i, idx = 0, nss = hweight8(phy->mt76->antenna_mask);
u16 mcs_map = 0;
u16 mcs_map_160 = 0;
- u8 nss_160 = nss;
+ u8 nss_160;
- /* Can't do 160MHz with mt7915 */
- if (is_mt7915(&dev->mt76))
+ if (!is_mt7915(&dev->mt76))
+ nss_160 = nss;
+ else if (!dev->dbdc_support)
+ /* Can do 1/2 of NSS streams in 160Mhz mode for mt7915 */
+ nss_160 = nss / 2;
+ else
+ /* Can't do 160MHz with mt7915 dbdc */
nss_160 = 0;
for (i = 0; i < 8; i++) {
dev_info(&spi->dev, "selected chip family is %s\n",
pdev_data->family->name);
- if (of_find_property(dt_node, "clock-xtal", NULL))
- pdev_data->ref_clock_xtal = true;
+ pdev_data->ref_clock_xtal = of_property_read_bool(dt_node, "clock-xtal");
/* optional clock frequency params */
of_property_read_u32(dt_node, "ref-clock-frequency",
print_hex_dump_debug("PN533 TX: ", DUMP_PREFIX_NONE, 16, 1,
out->data, out->len, false);
+ arg.phy = phy;
init_completion(&arg.done);
cntx = phy->out_urb->context;
phy->out_urb->context = &arg;
void ndlc_remove(struct llt_ndlc *ndlc)
{
- st_nci_remove(ndlc->ndev);
-
/* cancel timers */
del_timer_sync(&ndlc->t1_timer);
del_timer_sync(&ndlc->t2_timer);
ndlc->t2_active = false;
ndlc->t1_active = false;
+ /* cancel work */
+ cancel_work_sync(&ndlc->sm_work);
+
+ st_nci_remove(ndlc->ndev);
skb_queue_purge(&ndlc->rcv_q);
skb_queue_purge(&ndlc->send_q);
range = page_address(ns->ctrl->discard_page);
}
- __rq_for_each_bio(bio, req) {
- u64 slba = nvme_sect_to_lba(ns, bio->bi_iter.bi_sector);
- u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift;
-
- if (n < segments) {
- range[n].cattr = cpu_to_le32(0);
- range[n].nlb = cpu_to_le32(nlb);
- range[n].slba = cpu_to_le64(slba);
+ if (queue_max_discard_segments(req->q) == 1) {
+ u64 slba = nvme_sect_to_lba(ns, blk_rq_pos(req));
+ u32 nlb = blk_rq_sectors(req) >> (ns->lba_shift - 9);
+
+ range[0].cattr = cpu_to_le32(0);
+ range[0].nlb = cpu_to_le32(nlb);
+ range[0].slba = cpu_to_le64(slba);
+ n = 1;
+ } else {
+ __rq_for_each_bio(bio, req) {
+ u64 slba = nvme_sect_to_lba(ns, bio->bi_iter.bi_sector);
+ u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift;
+
+ if (n < segments) {
+ range[n].cattr = cpu_to_le32(0);
+ range[n].nlb = cpu_to_le32(nlb);
+ range[n].slba = cpu_to_le64(slba);
+ }
+ n++;
}
- n++;
}
if (WARN_ON_ONCE(n != segments)) {
else
ctrl->max_zeroes_sectors = 0;
- if (nvme_ctrl_limited_cns(ctrl))
+ if (ctrl->subsys->subtype != NVME_NQN_NVME ||
+ nvme_ctrl_limited_cns(ctrl))
return 0;
id = kzalloc(sizeof(*id), GFP_KERNEL);
return (struct nvme_uring_cmd_pdu *)&ioucmd->pdu;
}
-static void nvme_uring_task_meta_cb(struct io_uring_cmd *ioucmd)
+static void nvme_uring_task_meta_cb(struct io_uring_cmd *ioucmd,
+ unsigned issue_flags)
{
struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
struct request *req = pdu->req;
blk_rq_unmap_user(req->bio);
blk_mq_free_request(req);
- io_uring_cmd_done(ioucmd, status, result);
+ io_uring_cmd_done(ioucmd, status, result, issue_flags);
}
-static void nvme_uring_task_cb(struct io_uring_cmd *ioucmd)
+static void nvme_uring_task_cb(struct io_uring_cmd *ioucmd,
+ unsigned issue_flags)
{
struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
if (pdu->bio)
blk_rq_unmap_user(pdu->bio);
- io_uring_cmd_done(ioucmd, pdu->nvme_status, pdu->u.result);
+ io_uring_cmd_done(ioucmd, pdu->nvme_status, pdu->u.result, issue_flags);
}
static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req,
* Otherwise, move the completion to task work.
*/
if (cookie != NULL && blk_rq_is_poll(req))
- nvme_uring_task_cb(ioucmd);
+ nvme_uring_task_cb(ioucmd, IO_URING_F_UNLOCKED);
else
io_uring_cmd_complete_in_task(ioucmd, nvme_uring_task_cb);
* Otherwise, move the completion to task work.
*/
if (cookie != NULL && blk_rq_is_poll(req))
- nvme_uring_task_meta_cb(ioucmd);
+ nvme_uring_task_meta_cb(ioucmd, IO_URING_F_UNLOCKED);
else
io_uring_cmd_complete_in_task(ioucmd, nvme_uring_task_meta_cb);
return;
nvme_req(rq)->flags |= NVME_MPATH_IO_STATS;
- nvme_req(rq)->start_time = bdev_start_io_acct(disk->part0,
- blk_rq_bytes(rq) >> SECTOR_SHIFT,
- req_op(rq), jiffies);
+ nvme_req(rq)->start_time = bdev_start_io_acct(disk->part0, req_op(rq),
+ jiffies);
}
EXPORT_SYMBOL_GPL(nvme_mpath_start_request);
if (!(nvme_req(rq)->flags & NVME_MPATH_IO_STATS))
return;
bdev_end_io_acct(ns->head->disk->part0, req_op(rq),
- nvme_req(rq)->start_time);
+ blk_rq_bytes(rq) >> SECTOR_SHIFT,
+ nvme_req(rq)->start_time);
}
void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl)
nvme_dev_unmap(dev);
out_uninit_ctrl:
nvme_uninit_ctrl(&dev->ctrl);
+ nvme_put_ctrl(&dev->ctrl);
return result;
}
.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
{ PCI_DEVICE(0x2646, 0x501E), /* KINGSTON OM3PGP4xxxxQ OS21011 NVMe SSD */
.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
+ { PCI_DEVICE(0x1f40, 0x1202), /* Netac Technologies Co. NV3000 NVMe SSD */
+ .driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x1f40, 0x5236), /* Netac Technologies Co. NV7000 NVMe SSD */
.driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x1e4B, 0x1001), /* MAXIO MAP1001 */
.driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x1d97, 0x2263), /* Lexar NM610 */
.driver_data = NVME_QUIRK_BOGUS_NID, },
+ { PCI_DEVICE(0x1d97, 0x1d97), /* Lexar NM620 */
+ .driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x1d97, 0x2269), /* Lexar NM760 */
.driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0061),
return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0;
}
+static inline void *nvme_tcp_req_cmd_pdu(struct nvme_tcp_request *req)
+{
+ return req->pdu;
+}
+
+static inline void *nvme_tcp_req_data_pdu(struct nvme_tcp_request *req)
+{
+ /* use the pdu space in the back for the data pdu */
+ return req->pdu + sizeof(struct nvme_tcp_cmd_pdu) -
+ sizeof(struct nvme_tcp_data_pdu);
+}
+
static inline size_t nvme_tcp_inline_data_size(struct nvme_tcp_request *req)
{
if (nvme_is_fabrics(req->req.cmd))
static void nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req)
{
- struct nvme_tcp_data_pdu *data = req->pdu;
+ struct nvme_tcp_data_pdu *data = nvme_tcp_req_data_pdu(req);
struct nvme_tcp_queue *queue = req->queue;
struct request *rq = blk_mq_rq_from_pdu(req);
u32 h2cdata_sent = req->pdu_len;
static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req)
{
struct nvme_tcp_queue *queue = req->queue;
- struct nvme_tcp_cmd_pdu *pdu = req->pdu;
+ struct nvme_tcp_cmd_pdu *pdu = nvme_tcp_req_cmd_pdu(req);
bool inline_data = nvme_tcp_has_inline_data(req);
u8 hdgst = nvme_tcp_hdgst_len(queue);
int len = sizeof(*pdu) + hdgst - req->offset;
static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req)
{
struct nvme_tcp_queue *queue = req->queue;
- struct nvme_tcp_data_pdu *pdu = req->pdu;
+ struct nvme_tcp_data_pdu *pdu = nvme_tcp_req_data_pdu(req);
u8 hdgst = nvme_tcp_hdgst_len(queue);
int len = sizeof(*pdu) - req->offset + hdgst;
int ret;
{
struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl;
- struct nvme_tcp_cmd_pdu *pdu = req->pdu;
+ struct nvme_tcp_cmd_pdu *pdu = nvme_tcp_req_cmd_pdu(req);
u8 opc = pdu->cmd.common.opcode, fctype = pdu->cmd.fabrics.fctype;
int qid = nvme_tcp_queue_id(req->queue);
struct request *rq)
{
struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
- struct nvme_tcp_cmd_pdu *pdu = req->pdu;
+ struct nvme_tcp_cmd_pdu *pdu = nvme_tcp_req_cmd_pdu(req);
struct nvme_command *c = &pdu->cmd;
c->common.flags |= NVME_CMD_SGL_METABUF;
struct request *rq)
{
struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
- struct nvme_tcp_cmd_pdu *pdu = req->pdu;
+ struct nvme_tcp_cmd_pdu *pdu = nvme_tcp_req_cmd_pdu(req);
struct nvme_tcp_queue *queue = req->queue;
u8 hdgst = nvme_tcp_hdgst_len(queue), ddgst = 0;
blk_status_t ret;
static int __init nvme_tcp_init_module(void)
{
+ BUILD_BUG_ON(sizeof(struct nvme_tcp_hdr) != 8);
+ BUILD_BUG_ON(sizeof(struct nvme_tcp_cmd_pdu) != 72);
+ BUILD_BUG_ON(sizeof(struct nvme_tcp_data_pdu) != 24);
+ BUILD_BUG_ON(sizeof(struct nvme_tcp_rsp_pdu) != 24);
+ BUILD_BUG_ON(sizeof(struct nvme_tcp_r2t_pdu) != 24);
+ BUILD_BUG_ON(sizeof(struct nvme_tcp_icreq_pdu) != 128);
+ BUILD_BUG_ON(sizeof(struct nvme_tcp_icresp_pdu) != 128);
+ BUILD_BUG_ON(sizeof(struct nvme_tcp_term_pdu) != 24);
+
nvme_tcp_wq = alloc_workqueue("nvme_tcp_wq",
WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
if (!nvme_tcp_wq)
void nvmet_req_complete(struct nvmet_req *req, u16 status)
{
+ struct nvmet_sq *sq = req->sq;
+
__nvmet_req_complete(req, status);
- percpu_ref_put(&req->sq->ref);
+ percpu_ref_put(&sq->ref);
}
EXPORT_SYMBOL_GPL(nvmet_req_complete);
"#nvmem-cell-cells",
index, &cell_spec);
if (ret)
- return ERR_PTR(ret);
+ return ERR_PTR(-ENOENT);
if (cell_spec.args_count > 1)
return ERR_PTR(-EINVAL);
}
EXPORT_SYMBOL_GPL(pci_bus_resource_n);
+void pci_bus_remove_resource(struct pci_bus *bus, struct resource *res)
+{
+ struct pci_bus_resource *bus_res, *tmp;
+ int i;
+
+ for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) {
+ if (bus->resource[i] == res) {
+ bus->resource[i] = NULL;
+ return;
+ }
+ }
+
+ list_for_each_entry_safe(bus_res, tmp, &bus->resources, list) {
+ if (bus_res->res == res) {
+ list_del(&bus_res->list);
+ kfree(bus_res);
+ return;
+ }
+ }
+}
+
void pci_bus_remove_resources(struct pci_bus *bus)
{
int i;
u_cmd.insize > EC_MAX_MSG_BYTES)
return -EINVAL;
- s_cmd = kmalloc(sizeof(*s_cmd) + max(u_cmd.outsize, u_cmd.insize),
+ s_cmd = kzalloc(sizeof(*s_cmd) + max(u_cmd.outsize, u_cmd.insize),
GFP_KERNEL);
if (!s_cmd)
return -ENOMEM;
for (i = 0; i < AXP288_FG_INTR_NUM; i++) {
pirq = platform_get_irq(pdev, i);
+ if (pirq < 0)
+ continue;
ret = regmap_irq_get_virq(axp20x->regmap_irqc, pirq);
if (ret < 0)
return dev_err_probe(dev, ret, "getting vIRQ %d\n", pirq);
struct bq24190_dev_info *bdi = i2c_get_clientdata(client);
int error;
+ cancel_delayed_work_sync(&bdi->input_current_limit_work);
error = pm_runtime_resume_and_get(bdi->dev);
if (error < 0)
dev_warn(bdi->dev, "pm_runtime_get failed: %i\n", error);
port->psy_current_max = 0;
break;
default:
- dev_err(dev, "Port %d: default case!\n", port->port_number);
+ dev_dbg(dev, "Port %d: default case!\n", port->port_number);
port->psy_usb_type = POWER_SUPPLY_USB_TYPE_SDP;
}
if (!IS_ERR_OR_NULL(charger->usb_phy))
usb_unregister_notifier(charger->usb_phy, &charger->otg_nb);
+ cancel_work_sync(&charger->otg_work);
power_supply_unregister(charger->battery);
power_supply_unregister(charger->usb);
regmap_bulk_read(rk808->regmap, RK817_GAS_GAUGE_Q_PRES_H3,
bulk_reg, 4);
tmp = get_unaligned_be32(bulk_reg);
- if (tmp < 0)
- tmp = 0;
boot_charge_mah = ADC_TO_CHARGE_UAH(tmp,
charger->res_div) / 1000;
/*
regmap_bulk_read(rk808->regmap, RK817_GAS_GAUGE_Q_PRES_H3,
bulk_reg, 4);
tmp = get_unaligned_be32(bulk_reg);
- if (tmp < 0)
- tmp = 0;
boot_charge_mah = ADC_TO_CHARGE_UAH(tmp, charger->res_div) / 1000;
regmap_bulk_read(rk808->regmap, RK817_GAS_GAUGE_OCV_VOL_H,
bulk_reg, 2);
rcu_read_unlock();
mutex_unlock(&h->init_mutex);
- if (alua_rtpg_queue(pg, sdev, qdata, true))
+ if (alua_rtpg_queue(pg, sdev, qdata, true)) {
fn = NULL;
- else
+ } else {
+ kfree(qdata);
err = SCSI_DH_DEV_OFFLINED;
+ }
kref_put(&pg->kref, release_port_group);
out:
if (fn)
struct Scsi_Host *shost = dev_to_shost(dev);
struct device *parent = dev->parent;
- /* In case scsi_remove_host() has not been called. */
- scsi_proc_hostdir_rm(shost->hostt);
-
/* Wait for functions invoked through call_rcu(&scmd->rcu, ...) */
rcu_barrier();
void mpi3mr_flush_cmds_for_unrecovered_controller(struct mpi3mr_ioc *mrioc);
void mpi3mr_free_enclosure_list(struct mpi3mr_ioc *mrioc);
int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc);
+void mpi3mr_expander_node_remove(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_sas_node *sas_expander);
#endif /*MPI3MR_H_INCLUDED*/
mpi3mr_print_ioc_info(mrioc);
- dprint_init(mrioc, "allocating config page buffers\n");
- mrioc->cfg_page = dma_alloc_coherent(&mrioc->pdev->dev,
- MPI3MR_DEFAULT_CFG_PAGE_SZ, &mrioc->cfg_page_dma, GFP_KERNEL);
if (!mrioc->cfg_page) {
- retval = -1;
- goto out_failed_noretry;
+ dprint_init(mrioc, "allocating config page buffers\n");
+ mrioc->cfg_page_sz = MPI3MR_DEFAULT_CFG_PAGE_SZ;
+ mrioc->cfg_page = dma_alloc_coherent(&mrioc->pdev->dev,
+ mrioc->cfg_page_sz, &mrioc->cfg_page_dma, GFP_KERNEL);
+ if (!mrioc->cfg_page) {
+ retval = -1;
+ goto out_failed_noretry;
+ }
}
- mrioc->cfg_page_sz = MPI3MR_DEFAULT_CFG_PAGE_SZ;
-
- retval = mpi3mr_alloc_reply_sense_bufs(mrioc);
- if (retval) {
- ioc_err(mrioc,
- "%s :Failed to allocated reply sense buffers %d\n",
- __func__, retval);
- goto out_failed_noretry;
+ if (!mrioc->init_cmds.reply) {
+ retval = mpi3mr_alloc_reply_sense_bufs(mrioc);
+ if (retval) {
+ ioc_err(mrioc,
+ "%s :Failed to allocated reply sense buffers %d\n",
+ __func__, retval);
+ goto out_failed_noretry;
+ }
}
- retval = mpi3mr_alloc_chain_bufs(mrioc);
- if (retval) {
- ioc_err(mrioc, "Failed to allocated chain buffers %d\n",
- retval);
- goto out_failed_noretry;
+ if (!mrioc->chain_sgl_list) {
+ retval = mpi3mr_alloc_chain_bufs(mrioc);
+ if (retval) {
+ ioc_err(mrioc, "Failed to allocated chain buffers %d\n",
+ retval);
+ goto out_failed_noretry;
+ }
}
retval = mpi3mr_issue_iocinit(mrioc);
mrioc->admin_req_base, mrioc->admin_req_dma);
mrioc->admin_req_base = NULL;
}
-
+ if (mrioc->cfg_page) {
+ dma_free_coherent(&mrioc->pdev->dev, mrioc->cfg_page_sz,
+ mrioc->cfg_page, mrioc->cfg_page_dma);
+ mrioc->cfg_page = NULL;
+ }
if (mrioc->pel_seqnum_virt) {
dma_free_coherent(&mrioc->pdev->dev, mrioc->pel_seqnum_sz,
mrioc->pel_seqnum_virt, mrioc->pel_seqnum_dma);
mrioc->pel_seqnum_virt = NULL;
}
+ kfree(mrioc->throttle_groups);
+ mrioc->throttle_groups = NULL;
+
kfree(mrioc->logdata_buf);
mrioc->logdata_buf = NULL;
struct workqueue_struct *wq;
unsigned long flags;
struct mpi3mr_tgt_dev *tgtdev, *tgtdev_next;
+ struct mpi3mr_hba_port *port, *hba_port_next;
+ struct mpi3mr_sas_node *sas_expander, *sas_expander_next;
if (!shost)
return;
mpi3mr_free_mem(mrioc);
mpi3mr_cleanup_resources(mrioc);
+ spin_lock_irqsave(&mrioc->sas_node_lock, flags);
+ list_for_each_entry_safe_reverse(sas_expander, sas_expander_next,
+ &mrioc->sas_expander_list, list) {
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+ mpi3mr_expander_node_remove(mrioc, sas_expander);
+ spin_lock_irqsave(&mrioc->sas_node_lock, flags);
+ }
+ list_for_each_entry_safe(port, hba_port_next, &mrioc->hba_port_table_list, list) {
+ ioc_info(mrioc,
+ "removing hba_port entry: %p port: %d from hba_port list\n",
+ port, port->port_id);
+ list_del(&port->list);
+ kfree(port);
+ }
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+
+ if (mrioc->sas_hba.num_phys) {
+ kfree(mrioc->sas_hba.phy);
+ mrioc->sas_hba.phy = NULL;
+ mrioc->sas_hba.num_phys = 0;
+ }
+
spin_lock(&mrioc_list_lock);
list_del(&mrioc->list);
spin_unlock(&mrioc_list_lock);
#include "mpi3mr.h"
-static void mpi3mr_expander_node_remove(struct mpi3mr_ioc *mrioc,
- struct mpi3mr_sas_node *sas_expander);
-
/**
* mpi3mr_post_transport_req - Issue transport requests and wait
* @mrioc: Adapter instance reference
*
* Return nothing.
*/
-static void mpi3mr_expander_node_remove(struct mpi3mr_ioc *mrioc,
+void mpi3mr_expander_node_remove(struct mpi3mr_ioc *mrioc,
struct mpi3mr_sas_node *sas_expander)
{
struct mpi3mr_sas_port *mr_sas_port, *next;
goto out_fail;
}
port = sas_port_alloc_num(sas_node->parent_dev);
- if ((sas_port_add(port))) {
+ if (!port || (sas_port_add(port))) {
ioc_err(ioc, "failure at %s:%d/%s()!\n",
__FILE__, __LINE__, __func__);
goto out_fail;
mpt3sas_port->remote_identify.sas_address;
}
+ if (!rphy) {
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto out_delete_port;
+ }
+
rphy->identify = mpt3sas_port->remote_identify;
if ((sas_rphy_add(rphy))) {
__FILE__, __LINE__, __func__);
sas_rphy_free(rphy);
rphy = NULL;
+ goto out_delete_port;
}
if (mpt3sas_port->remote_identify.device_type == SAS_END_DEVICE) {
rphy_to_expander_device(rphy), hba_port->port_id);
return mpt3sas_port;
- out_fail:
+out_delete_port:
+ sas_port_delete(port);
+
+out_fail:
list_for_each_entry_safe(mpt3sas_phy, next, &mpt3sas_port->phy_list,
port_siblings)
list_del(&mpt3sas_phy->port_siblings);
}
req->outstanding_cmds[index] = NULL;
+
+ qla_put_fw_resources(sp->qpair, &sp->iores);
return sp;
}
}
bsg_reply->reply_payload_rcv_len = 0;
- qla_put_fw_resources(sp->qpair, &sp->iores);
done:
/* Return the vendor specific reply to API */
bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
sp = req->outstanding_cmds[cnt];
if (sp) {
+ /*
+ * perform lockless completion during driver unload
+ */
+ if (qla2x00_chip_is_down(vha)) {
+ req->outstanding_cmds[cnt] = NULL;
+ spin_unlock_irqrestore(qp->qp_lock_ptr, flags);
+ sp->done(sp, res);
+ spin_lock_irqsave(qp->qp_lock_ptr, flags);
+ continue;
+ }
+
switch (sp->cmd_type) {
case TYPE_SRB:
qla2x00_abort_srb(qp, sp, res, &flags);
unsigned char vpd_header[SCSI_VPD_HEADER_SIZE] __aligned(4);
int result;
+ if (sdev->no_vpd_size)
+ return SCSI_DEFAULT_VPD_LEN;
+
/*
* Fetch the VPD page header to find out how big the page
* is. This is done to prevent problems on legacy devices
{"3PARdata", "VV", NULL, BLIST_REPORTLUN2},
{"ADAPTEC", "AACRAID", NULL, BLIST_FORCELUN},
{"ADAPTEC", "Adaptec 5400S", NULL, BLIST_FORCELUN},
- {"AIX", "VDASD", NULL, BLIST_TRY_VPD_PAGES},
+ {"AIX", "VDASD", NULL, BLIST_TRY_VPD_PAGES | BLIST_NO_VPD_SIZE},
{"AFT PRO", "-IX CF", "0.0>", BLIST_FORCELUN},
{"BELKIN", "USB 2 HS-CF", "1.95", BLIST_FORCELUN | BLIST_INQUIRY_36},
{"BROWNIE", "1200U3P", NULL, BLIST_NOREPORTLUN},
{"HPE", "OPEN-", "*", BLIST_REPORTLUN2 | BLIST_TRY_VPD_PAGES},
{"IBM", "AuSaV1S2", NULL, BLIST_FORCELUN},
{"IBM", "ProFibre 4000R", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
+ {"IBM", "2076", NULL, BLIST_NO_VPD_SIZE},
{"IBM", "2105", NULL, BLIST_RETRY_HWERROR},
{"iomega", "jaz 1GB", "J.86", BLIST_NOTQ | BLIST_NOLUN},
{"IOMEGA", "ZIP", NULL, BLIST_NOTQ | BLIST_NOLUN},
{"SGI", "RAID5", "*", BLIST_SPARSELUN},
{"SGI", "TP9100", "*", BLIST_REPORTLUN2},
{"SGI", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
+ {"SKhynix", "H28U74301AMR", NULL, BLIST_SKIP_VPD_PAGES},
{"IBM", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
{"SUN", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
{"DELL", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
else if (*bflags & BLIST_SKIP_VPD_PAGES)
sdev->skip_vpd_pages = 1;
+ if (*bflags & BLIST_NO_VPD_SIZE)
+ sdev->no_vpd_size = 1;
+
transport_configure_device(&sdev->sdev_gendev);
if (sdev->host->hostt->slave_configure) {
{ LLCC_CVP, 28, 512, 3, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
{ LLCC_APTCM, 30, 1024, 3, 1, 0x0, 0x1, 1, 0, 0, 1, 0, 0 },
{ LLCC_WRCACHE, 31, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
- { LLCC_CVPFW, 32, 512, 1, 0, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
- { LLCC_CPUSS1, 33, 2048, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
- { LLCC_CPUHWT, 36, 512, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
+ { LLCC_CVPFW, 17, 512, 1, 0, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
+ { LLCC_CPUSS1, 3, 2048, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
+ { LLCC_CPUHWT, 5, 512, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
};
static const struct llcc_slice_config sdm845_data[] = {
struct reserved_mem *rmem;
struct qcom_rmtfs_mem *rmtfs_mem;
u32 client_id;
- u32 num_vmids, vmid[NUM_MAX_VMIDS];
+ u32 vmid[NUM_MAX_VMIDS];
+ int num_vmids;
int ret, i;
rmem = of_reserved_mem_lookup(node);
}
num_vmids = of_property_count_u32_elems(node, "qcom,vmid");
- if (num_vmids < 0) {
- dev_err(&pdev->dev, "failed to count qcom,vmid elements: %d\n", ret);
+ if (num_vmids == -EINVAL) {
+ /* qcom,vmid is optional */
+ num_vmids = 0;
+ } else if (num_vmids < 0) {
+ dev_err(&pdev->dev, "failed to count qcom,vmid elements: %d\n", num_vmids);
goto remove_cdev;
} else if (num_vmids > NUM_MAX_VMIDS) {
dev_warn(&pdev->dev,
goto out;
}
+ /* Open session with loaded TA */
+ handle_open_session(arg, &session_info, param);
+ if (arg->ret != TEEC_SUCCESS) {
+ pr_err("open_session failed %d\n", arg->ret);
+ handle_unload_ta(ta_handle);
+ kref_put(&sess->refcount, destroy_session);
+ goto out;
+ }
+
/* Find an empty session index for the given TA */
spin_lock(&sess->lock);
i = find_first_zero_bit(sess->sess_mask, TEE_NUM_SESSIONS);
- if (i < TEE_NUM_SESSIONS)
+ if (i < TEE_NUM_SESSIONS) {
+ sess->session_info[i] = session_info;
+ set_session_id(ta_handle, i, &arg->session);
set_bit(i, sess->sess_mask);
+ }
spin_unlock(&sess->lock);
if (i >= TEE_NUM_SESSIONS) {
pr_err("reached maximum session count %d\n", TEE_NUM_SESSIONS);
+ handle_close_session(ta_handle, session_info);
handle_unload_ta(ta_handle);
kref_put(&sess->refcount, destroy_session);
rc = -ENOMEM;
goto out;
}
- /* Open session with loaded TA */
- handle_open_session(arg, &session_info, param);
- if (arg->ret != TEEC_SUCCESS) {
- pr_err("open_session failed %d\n", arg->ret);
- spin_lock(&sess->lock);
- clear_bit(i, sess->sess_mask);
- spin_unlock(&sess->lock);
- handle_unload_ta(ta_handle);
- kref_put(&sess->refcount, destroy_session);
- goto out;
- }
-
- sess->session_info[i] = session_info;
- set_session_id(ta_handle, i, &arg->session);
out:
free_pages((u64)ta, get_order(ta_size));
return rc;
struct thermal_instance *pos;
struct thermal_zone_device *pos1;
struct thermal_cooling_device *pos2;
+ bool upper_no_limit;
int result;
if (trip >= tz->num_trips || trip < 0)
/* lower default 0, upper default max_state */
lower = lower == THERMAL_NO_LIMIT ? 0 : lower;
- upper = upper == THERMAL_NO_LIMIT ? cdev->max_state : upper;
+
+ if (upper == THERMAL_NO_LIMIT) {
+ upper = cdev->max_state;
+ upper_no_limit = true;
+ } else {
+ upper_no_limit = false;
+ }
if (lower > upper || upper > cdev->max_state)
return -EINVAL;
dev->cdev = cdev;
dev->trip = trip;
dev->upper = upper;
+ dev->upper_no_limit = upper_no_limit;
dev->lower = lower;
dev->target = THERMAL_NO_TARGET;
dev->weight = weight;
}
EXPORT_SYMBOL_GPL(devm_thermal_of_cooling_device_register);
+static bool thermal_cooling_device_present(struct thermal_cooling_device *cdev)
+{
+ struct thermal_cooling_device *pos = NULL;
+
+ list_for_each_entry(pos, &thermal_cdev_list, node) {
+ if (pos == cdev)
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * thermal_cooling_device_update - Update a cooling device object
+ * @cdev: Target cooling device.
+ *
+ * Update @cdev to reflect a change of the underlying hardware or platform.
+ *
+ * Must be called when the maximum cooling state of @cdev becomes invalid and so
+ * its .get_max_state() callback needs to be run to produce the new maximum
+ * cooling state value.
+ */
+void thermal_cooling_device_update(struct thermal_cooling_device *cdev)
+{
+ struct thermal_instance *ti;
+ unsigned long state;
+
+ if (IS_ERR_OR_NULL(cdev))
+ return;
+
+ /*
+ * Hold thermal_list_lock throughout the update to prevent the device
+ * from going away while being updated.
+ */
+ mutex_lock(&thermal_list_lock);
+
+ if (!thermal_cooling_device_present(cdev))
+ goto unlock_list;
+
+ /*
+ * Update under the cdev lock to prevent the state from being set beyond
+ * the new limit concurrently.
+ */
+ mutex_lock(&cdev->lock);
+
+ if (cdev->ops->get_max_state(cdev, &cdev->max_state))
+ goto unlock;
+
+ thermal_cooling_device_stats_reinit(cdev);
+
+ list_for_each_entry(ti, &cdev->thermal_instances, cdev_node) {
+ if (ti->upper == cdev->max_state)
+ continue;
+
+ if (ti->upper < cdev->max_state) {
+ if (ti->upper_no_limit)
+ ti->upper = cdev->max_state;
+
+ continue;
+ }
+
+ ti->upper = cdev->max_state;
+ if (ti->lower > ti->upper)
+ ti->lower = ti->upper;
+
+ if (ti->target == THERMAL_NO_TARGET)
+ continue;
+
+ if (ti->target > ti->upper)
+ ti->target = ti->upper;
+ }
+
+ if (cdev->ops->get_cur_state(cdev, &state) || state > cdev->max_state)
+ goto unlock;
+
+ thermal_cooling_device_stats_update(cdev, state);
+
+unlock:
+ mutex_unlock(&cdev->lock);
+
+unlock_list:
+ mutex_unlock(&thermal_list_lock);
+}
+EXPORT_SYMBOL_GPL(thermal_cooling_device_update);
+
static void __unbind(struct thermal_zone_device *tz, int mask,
struct thermal_cooling_device *cdev)
{
int i;
const struct thermal_zone_params *tzp;
struct thermal_zone_device *tz;
- struct thermal_cooling_device *pos = NULL;
if (!cdev)
return;
mutex_lock(&thermal_list_lock);
- list_for_each_entry(pos, &thermal_cdev_list, node)
- if (pos == cdev)
- break;
- if (pos != cdev) {
- /* thermal cooling device not found */
+
+ if (!thermal_cooling_device_present(cdev)) {
mutex_unlock(&thermal_list_lock);
return;
}
+
list_del(&cdev->node);
/* Unbind all thermal zones associated with 'this' cdev */
struct thermal_trip trip;
result = thermal_zone_get_trip(tz, count, &trip);
- if (result)
+ if (result || !trip.temperature)
set_bit(count, &tz->trips_disabled);
}
struct list_head tz_node; /* node in tz->thermal_instances */
struct list_head cdev_node; /* node in cdev->thermal_instances */
unsigned int weight; /* The weight of the cooling device */
+ bool upper_no_limit;
};
#define to_thermal_zone(_dev) \
void thermal_zone_destroy_device_groups(struct thermal_zone_device *);
void thermal_cooling_device_setup_sysfs(struct thermal_cooling_device *);
void thermal_cooling_device_destroy_sysfs(struct thermal_cooling_device *cdev);
+void thermal_cooling_device_stats_reinit(struct thermal_cooling_device *cdev);
/* used only at binding time */
ssize_t trip_point_show(struct device *, struct device_attribute *, char *);
ssize_t weight_show(struct device *, struct device_attribute *, char *);
{
struct cooling_dev_stats *stats = cdev->stats;
+ lockdep_assert_held(&cdev->lock);
+
if (!stats)
return;
struct device_attribute *attr, char *buf)
{
struct thermal_cooling_device *cdev = to_cooling_device(dev);
- struct cooling_dev_stats *stats = cdev->stats;
- int ret;
+ struct cooling_dev_stats *stats;
+ int ret = 0;
+
+ mutex_lock(&cdev->lock);
+
+ stats = cdev->stats;
+ if (!stats)
+ goto unlock;
spin_lock(&stats->lock);
ret = sprintf(buf, "%u\n", stats->total_trans);
spin_unlock(&stats->lock);
+unlock:
+ mutex_unlock(&cdev->lock);
+
return ret;
}
char *buf)
{
struct thermal_cooling_device *cdev = to_cooling_device(dev);
- struct cooling_dev_stats *stats = cdev->stats;
+ struct cooling_dev_stats *stats;
ssize_t len = 0;
int i;
+ mutex_lock(&cdev->lock);
+
+ stats = cdev->stats;
+ if (!stats)
+ goto unlock;
+
spin_lock(&stats->lock);
+
update_time_in_state(stats);
for (i = 0; i <= cdev->max_state; i++) {
}
spin_unlock(&stats->lock);
+unlock:
+ mutex_unlock(&cdev->lock);
+
return len;
}
size_t count)
{
struct thermal_cooling_device *cdev = to_cooling_device(dev);
- struct cooling_dev_stats *stats = cdev->stats;
- int i, states = cdev->max_state + 1;
+ struct cooling_dev_stats *stats;
+ int i, states;
+
+ mutex_lock(&cdev->lock);
+
+ stats = cdev->stats;
+ if (!stats)
+ goto unlock;
+
+ states = cdev->max_state + 1;
spin_lock(&stats->lock);
spin_unlock(&stats->lock);
+unlock:
+ mutex_unlock(&cdev->lock);
+
return count;
}
struct device_attribute *attr, char *buf)
{
struct thermal_cooling_device *cdev = to_cooling_device(dev);
- struct cooling_dev_stats *stats = cdev->stats;
+ struct cooling_dev_stats *stats;
ssize_t len = 0;
int i, j;
+ mutex_lock(&cdev->lock);
+
+ stats = cdev->stats;
+ if (!stats) {
+ len = -ENODATA;
+ goto unlock;
+ }
+
len += snprintf(buf + len, PAGE_SIZE - len, " From : To\n");
len += snprintf(buf + len, PAGE_SIZE - len, " : ");
for (i = 0; i <= cdev->max_state; i++) {
break;
len += snprintf(buf + len, PAGE_SIZE - len, "state%2u ", i);
}
- if (len >= PAGE_SIZE)
- return PAGE_SIZE;
+ if (len >= PAGE_SIZE) {
+ len = PAGE_SIZE;
+ goto unlock;
+ }
len += snprintf(buf + len, PAGE_SIZE - len, "\n");
if (len >= PAGE_SIZE) {
pr_warn_once("Thermal transition table exceeds PAGE_SIZE. Disabling\n");
- return -EFBIG;
+ len = -EFBIG;
}
+
+unlock:
+ mutex_unlock(&cdev->lock);
+
return len;
}
unsigned long states = cdev->max_state + 1;
int var;
+ lockdep_assert_held(&cdev->lock);
+
var = sizeof(*stats);
var += sizeof(*stats->time_in_state) * states;
var += sizeof(*stats->trans_table) * states * states;
static void cooling_device_stats_destroy(struct thermal_cooling_device *cdev)
{
+ lockdep_assert_held(&cdev->lock);
+
kfree(cdev->stats);
cdev->stats = NULL;
}
cooling_device_stats_destroy(cdev);
}
+void thermal_cooling_device_stats_reinit(struct thermal_cooling_device *cdev)
+{
+ cooling_device_stats_destroy(cdev);
+ cooling_device_stats_setup(cdev);
+}
+
/* these helper will be used only at the time of bindig */
ssize_t
trip_point_show(struct device *dev, struct device_attribute *attr, char *buf)
snprintf(dir_name, sizeof(dir_name), "port%d", port->port);
parent = debugfs_lookup(dir_name, port->sw->debugfs_dir);
- debugfs_remove_recursive(debugfs_lookup("margining", parent));
+ if (parent)
+ debugfs_remove_recursive(debugfs_lookup("margining", parent));
kfree(port->usb4->margining);
port->usb4->margining = NULL;
static void margining_switch_remove(struct tb_switch *sw)
{
+ struct tb_port *upstream, *downstream;
struct tb_switch *parent_sw;
- struct tb_port *downstream;
u64 route = tb_route(sw);
if (!route)
return;
- /*
- * Upstream is removed with the router itself but we need to
- * remove the downstream port margining directory.
- */
+ upstream = tb_upstream_port(sw);
parent_sw = tb_switch_parent(sw);
downstream = tb_port_at(route, parent_sw);
+
+ margining_port_remove(upstream);
margining_port_remove(downstream);
}
#define QUIRK_AUTO_CLEAR_INT BIT(0)
#define QUIRK_E2E BIT(1)
-static int ring_interrupt_index(struct tb_ring *ring)
+static int ring_interrupt_index(const struct tb_ring *ring)
{
int bit = ring->hop;
if (!ring->is_tx)
{
int reg = REG_RING_INTERRUPT_BASE +
ring_interrupt_index(ring) / 32 * 4;
- int bit = ring_interrupt_index(ring) & 31;
- int mask = 1 << bit;
+ int interrupt_bit = ring_interrupt_index(ring) & 31;
+ int mask = 1 << interrupt_bit;
u32 old, new;
if (ring->irq > 0) {
u32 step, shift, ivr, misc;
void __iomem *ivr_base;
+ int auto_clear_bit;
int index;
if (ring->is_tx)
else
index = ring->hop + ring->nhi->hop_count;
- if (ring->nhi->quirks & QUIRK_AUTO_CLEAR_INT) {
- /*
- * Ask the hardware to clear interrupt status
- * bits automatically since we already know
- * which interrupt was triggered.
- */
- misc = ioread32(ring->nhi->iobase + REG_DMA_MISC);
- if (!(misc & REG_DMA_MISC_INT_AUTO_CLEAR)) {
- misc |= REG_DMA_MISC_INT_AUTO_CLEAR;
- iowrite32(misc, ring->nhi->iobase + REG_DMA_MISC);
- }
- }
+ /*
+ * Intel routers support a bit that isn't part of
+ * the USB4 spec to ask the hardware to clear
+ * interrupt status bits automatically since
+ * we already know which interrupt was triggered.
+ *
+ * Other routers explicitly disable auto-clear
+ * to prevent conditions that may occur where two
+ * MSIX interrupts are simultaneously active and
+ * reading the register clears both of them.
+ */
+ misc = ioread32(ring->nhi->iobase + REG_DMA_MISC);
+ if (ring->nhi->quirks & QUIRK_AUTO_CLEAR_INT)
+ auto_clear_bit = REG_DMA_MISC_INT_AUTO_CLEAR;
+ else
+ auto_clear_bit = REG_DMA_MISC_DISABLE_AUTO_CLEAR;
+ if (!(misc & auto_clear_bit))
+ iowrite32(misc | auto_clear_bit,
+ ring->nhi->iobase + REG_DMA_MISC);
ivr_base = ring->nhi->iobase + REG_INT_VEC_ALLOC_BASE;
step = index / REG_INT_VEC_ALLOC_REGS * REG_INT_VEC_ALLOC_BITS;
dev_dbg(&ring->nhi->pdev->dev,
"%s interrupt at register %#x bit %d (%#x -> %#x)\n",
- active ? "enabling" : "disabling", reg, bit, old, new);
+ active ? "enabling" : "disabling", reg, interrupt_bit, old, new);
if (new == old)
dev_WARN(&ring->nhi->pdev->dev,
static void ring_clear_msix(const struct tb_ring *ring)
{
+ int bit;
+
if (ring->nhi->quirks & QUIRK_AUTO_CLEAR_INT)
return;
+ bit = ring_interrupt_index(ring) & 31;
if (ring->is_tx)
- ioread32(ring->nhi->iobase + REG_RING_NOTIFY_BASE);
+ iowrite32(BIT(bit), ring->nhi->iobase + REG_RING_INT_CLEAR);
else
- ioread32(ring->nhi->iobase + REG_RING_NOTIFY_BASE +
- 4 * (ring->nhi->hop_count / 32));
+ iowrite32(BIT(bit), ring->nhi->iobase + REG_RING_INT_CLEAR +
+ 4 * (ring->nhi->hop_count / 32));
}
static irqreturn_t ring_msix(int irq, void *data)
/*
* three bitfields: tx, rx, rx overflow
- * Every bitfield contains one bit for every hop (REG_HOP_COUNT). Registers are
- * cleared on read. New interrupts are fired only after ALL registers have been
+ * Every bitfield contains one bit for every hop (REG_HOP_COUNT).
+ * New interrupts are fired only after ALL registers have been
* read (even those containing only disabled rings).
*/
#define REG_RING_NOTIFY_BASE 0x37800
#define RING_NOTIFY_REG_COUNT(nhi) ((31 + 3 * nhi->hop_count) / 32)
+#define REG_RING_INT_CLEAR 0x37808
/*
* two bitfields: rx, tx
#define REG_DMA_MISC 0x39864
#define REG_DMA_MISC_INT_AUTO_CLEAR BIT(2)
+#define REG_DMA_MISC_DISABLE_AUTO_CLEAR BIT(17)
#define REG_INMAIL_DATA 0x39900
}
}
+static void quirk_clx_disable(struct tb_switch *sw)
+{
+ sw->quirks |= QUIRK_NO_CLX;
+ tb_sw_dbg(sw, "disabling CL states\n");
+}
+
+static void quirk_usb3_maximum_bandwidth(struct tb_switch *sw)
+{
+ struct tb_port *port;
+
+ tb_switch_for_each_port(sw, port) {
+ if (!tb_port_is_usb3_down(port))
+ continue;
+ port->max_bw = 16376;
+ tb_port_dbg(port, "USB3 maximum bandwidth limited to %u Mb/s\n",
+ port->max_bw);
+ }
+}
+
struct tb_quirk {
u16 hw_vendor_id;
u16 hw_device_id;
* DP buffers.
*/
{ 0x8087, 0x0b26, 0x0000, 0x0000, quirk_dp_credit_allocation },
+ /*
+ * Limit the maximum USB3 bandwidth for the following Intel USB4
+ * host routers due to a hardware issue.
+ */
+ { 0x8087, PCI_DEVICE_ID_INTEL_ADL_NHI0, 0x0000, 0x0000,
+ quirk_usb3_maximum_bandwidth },
+ { 0x8087, PCI_DEVICE_ID_INTEL_ADL_NHI1, 0x0000, 0x0000,
+ quirk_usb3_maximum_bandwidth },
+ { 0x8087, PCI_DEVICE_ID_INTEL_RPL_NHI0, 0x0000, 0x0000,
+ quirk_usb3_maximum_bandwidth },
+ { 0x8087, PCI_DEVICE_ID_INTEL_RPL_NHI1, 0x0000, 0x0000,
+ quirk_usb3_maximum_bandwidth },
+ { 0x8087, PCI_DEVICE_ID_INTEL_MTL_M_NHI0, 0x0000, 0x0000,
+ quirk_usb3_maximum_bandwidth },
+ { 0x8087, PCI_DEVICE_ID_INTEL_MTL_P_NHI0, 0x0000, 0x0000,
+ quirk_usb3_maximum_bandwidth },
+ { 0x8087, PCI_DEVICE_ID_INTEL_MTL_P_NHI1, 0x0000, 0x0000,
+ quirk_usb3_maximum_bandwidth },
+ /*
+ * CLx is not supported on AMD USB4 Yellow Carp and Pink Sardine platforms.
+ */
+ { 0x0438, 0x0208, 0x0000, 0x0000, quirk_clx_disable },
+ { 0x0438, 0x0209, 0x0000, 0x0000, quirk_clx_disable },
+ { 0x0438, 0x020a, 0x0000, 0x0000, quirk_clx_disable },
+ { 0x0438, 0x020b, 0x0000, 0x0000, quirk_clx_disable },
};
/**
return ret;
}
+static void tb_retimer_set_inbound_sbtx(struct tb_port *port)
+{
+ int i;
+
+ for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++)
+ usb4_port_retimer_set_inbound_sbtx(port, i);
+}
+
+static void tb_retimer_unset_inbound_sbtx(struct tb_port *port)
+{
+ int i;
+
+ for (i = TB_MAX_RETIMER_INDEX; i >= 1; i--)
+ usb4_port_retimer_unset_inbound_sbtx(port, i);
+}
+
static ssize_t nvm_authenticate_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
rt->auth_status = 0;
if (val) {
+ tb_retimer_set_inbound_sbtx(rt->port);
if (val == AUTHENTICATE_ONLY) {
ret = tb_retimer_nvm_authenticate(rt, true);
} else {
}
exit_unlock:
+ tb_retimer_unset_inbound_sbtx(rt->port);
mutex_unlock(&rt->tb->lock);
exit_rpm:
pm_runtime_mark_last_busy(&rt->dev);
* Enable sideband channel for each retimer. We can do this
* regardless whether there is device connected or not.
*/
- for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++)
- usb4_port_retimer_set_inbound_sbtx(port, i);
+ tb_retimer_set_inbound_sbtx(port);
/*
* Before doing anything else, read the authentication status.
break;
}
+ tb_retimer_unset_inbound_sbtx(port);
+
if (!last_idx)
return 0;
USB4_SB_OPCODE_ROUTER_OFFLINE = 0x4e45534c, /* "LSEN" */
USB4_SB_OPCODE_ENUMERATE_RETIMERS = 0x4d554e45, /* "ENUM" */
USB4_SB_OPCODE_SET_INBOUND_SBTX = 0x5055534c, /* "LSUP" */
+ USB4_SB_OPCODE_UNSET_INBOUND_SBTX = 0x50555355, /* "USUP" */
USB4_SB_OPCODE_QUERY_LAST_RETIMER = 0x5453414c, /* "LAST" */
USB4_SB_OPCODE_GET_NVM_SECTOR_SIZE = 0x53534e47, /* "GNSS" */
USB4_SB_OPCODE_NVM_SET_OFFSET = 0x53504f42, /* "BOPS" */
dev_warn(&sw->dev, "reading DROM failed: %d\n", ret);
tb_sw_dbg(sw, "uid: %#llx\n", sw->uid);
- tb_check_quirks(sw);
-
ret = tb_switch_set_uuid(sw);
if (ret) {
dev_err(&sw->dev, "failed to set UUID\n");
}
}
+ tb_check_quirks(sw);
+
tb_switch_default_link_ports(sw);
ret = tb_switch_update_link_attributes(sw);
#define NVM_MAX_SIZE SZ_512K
#define NVM_DATA_DWORDS 16
+/* Keep link controller awake during update */
+#define QUIRK_FORCE_POWER_LINK_CONTROLLER BIT(0)
+/* Disable CLx if not supported */
+#define QUIRK_NO_CLX BIT(1)
+
/**
* struct tb_nvm - Structure holding NVM information
* @dev: Owner of the NVM
* @group: Bandwidth allocation group the adapter is assigned to. Only
* used for DP IN adapters for now.
* @group_list: The adapter is linked to the group's list of ports through this
+ * @max_bw: Maximum possible bandwidth through this adapter if set to
+ * non-zero.
*
* In USB4 terminology this structure represents an adapter (protocol or
* lane adapter).
unsigned int dma_credits;
struct tb_bandwidth_group *group;
struct list_head group_list;
+ unsigned int max_bw;
};
/**
*/
static inline bool tb_switch_is_clx_supported(const struct tb_switch *sw)
{
+ if (sw->quirks & QUIRK_NO_CLX)
+ return false;
+
return tb_switch_is_usb4(sw) || tb_switch_is_titan_ridge(sw);
}
int usb4_port_sw_margin_errors(struct tb_port *port, u32 *errors);
int usb4_port_retimer_set_inbound_sbtx(struct tb_port *port, u8 index);
+int usb4_port_retimer_unset_inbound_sbtx(struct tb_port *port, u8 index);
int usb4_port_retimer_read(struct tb_port *port, u8 index, u8 reg, void *buf,
u8 size);
int usb4_port_retimer_write(struct tb_port *port, u8 index, u8 reg,
void usb4_port_device_remove(struct usb4_port *usb4);
int usb4_port_device_resume(struct usb4_port *usb4);
-/* Keep link controller awake during update */
-#define QUIRK_FORCE_POWER_LINK_CONTROLLER BIT(0)
-
void tb_check_quirks(struct tb_switch *sw);
#ifdef CONFIG_ACPI
}
/**
+ * usb4_port_retimer_unset_inbound_sbtx() - Disable sideband channel transactions
+ * @port: USB4 port
+ * @index: Retimer index
+ *
+ * Disables sideband channel transations on SBTX. The reverse of
+ * usb4_port_retimer_set_inbound_sbtx().
+ */
+int usb4_port_retimer_unset_inbound_sbtx(struct tb_port *port, u8 index)
+{
+ return usb4_port_retimer_op(port, index,
+ USB4_SB_OPCODE_UNSET_INBOUND_SBTX, 500);
+}
+
+/**
* usb4_port_retimer_read() - Read from retimer sideband registers
* @port: USB4 port
* @index: Retimer index
usb4_port_retimer_nvm_read_block, &info);
}
+static inline unsigned int
+usb4_usb3_port_max_bandwidth(const struct tb_port *port, unsigned int bw)
+{
+ /* Take the possible bandwidth limitation into account */
+ if (port->max_bw)
+ return min(bw, port->max_bw);
+ return bw;
+}
+
/**
* usb4_usb3_port_max_link_rate() - Maximum support USB3 link rate
* @port: USB3 adapter port
return ret;
lr = (val & ADP_USB3_CS_4_MSLR_MASK) >> ADP_USB3_CS_4_MSLR_SHIFT;
- return lr == ADP_USB3_CS_4_MSLR_20G ? 20000 : 10000;
+ ret = lr == ADP_USB3_CS_4_MSLR_20G ? 20000 : 10000;
+
+ return usb4_usb3_port_max_bandwidth(port, ret);
}
/**
return 0;
lr = val & ADP_USB3_CS_4_ALR_MASK;
- return lr == ADP_USB3_CS_4_ALR_20G ? 20000 : 10000;
+ ret = lr == ADP_USB3_CS_4_ALR_20G ? 20000 : 10000;
+
+ return usb4_usb3_port_max_bandwidth(port, ret);
}
static int usb4_usb3_port_cm_request(struct tb_port *port, bool request)
int downstream_bw)
{
u32 val, ubw, dbw, scale;
- int ret;
+ int ret, max_bw;
- /* Read the used scale, hardware default is 0 */
- ret = tb_port_read(port, &scale, TB_CFG_PORT,
- port->cap_adap + ADP_USB3_CS_3, 1);
+ /* Figure out suitable scale */
+ scale = 0;
+ max_bw = max(upstream_bw, downstream_bw);
+ while (scale < 64) {
+ if (mbps_to_usb3_bw(max_bw, scale) < 4096)
+ break;
+ scale++;
+ }
+
+ if (WARN_ON(scale >= 64))
+ return -EINVAL;
+
+ ret = tb_port_write(port, &scale, TB_CFG_PORT,
+ port->cap_adap + ADP_USB3_CS_3, 1);
if (ret)
return ret;
- scale &= ADP_USB3_CS_3_SCALE_MASK;
ubw = mbps_to_usb3_bw(upstream_bw, scale);
dbw = mbps_to_usb3_bw(downstream_bw, scale);
+ tb_port_dbg(port, "scaled bandwidth %u/%u, scale %u\n", ubw, dbw, scale);
+
ret = tb_port_read(port, &val, TB_CFG_PORT,
port->cap_adap + ADP_USB3_CS_2, 1);
if (ret)
int irq;
int vtermno;
grant_ref_t gntref;
+ spinlock_t ring_lock;
};
static LIST_HEAD(xenconsoles);
XENCONS_RING_IDX cons, prod;
struct xencons_interface *intf = xencons->intf;
int sent = 0;
+ unsigned long flags;
+ spin_lock_irqsave(&xencons->ring_lock, flags);
cons = intf->out_cons;
prod = intf->out_prod;
mb(); /* update queue values before going on */
if ((prod - cons) > sizeof(intf->out)) {
+ spin_unlock_irqrestore(&xencons->ring_lock, flags);
pr_err_once("xencons: Illegal ring page indices");
return -EINVAL;
}
wmb(); /* write ring before updating pointer */
intf->out_prod = prod;
+ spin_unlock_irqrestore(&xencons->ring_lock, flags);
if (sent)
notify_daemon(xencons);
int recv = 0;
struct xencons_info *xencons = vtermno_to_xencons(vtermno);
unsigned int eoiflag = 0;
+ unsigned long flags;
if (xencons == NULL)
return -EINVAL;
intf = xencons->intf;
+ spin_lock_irqsave(&xencons->ring_lock, flags);
cons = intf->in_cons;
prod = intf->in_prod;
mb(); /* get pointers before reading ring */
if ((prod - cons) > sizeof(intf->in)) {
+ spin_unlock_irqrestore(&xencons->ring_lock, flags);
pr_err_once("xencons: Illegal ring page indices");
return -EINVAL;
}
xencons->out_cons = intf->out_cons;
xencons->out_cons_same = 0;
}
+ if (!recv && xencons->out_cons_same++ > 1) {
+ eoiflag = XEN_EOI_FLAG_SPURIOUS;
+ }
+ spin_unlock_irqrestore(&xencons->ring_lock, flags);
+
if (recv) {
notify_daemon(xencons);
- } else if (xencons->out_cons_same++ > 1) {
- eoiflag = XEN_EOI_FLAG_SPURIOUS;
}
xen_irq_lateeoi(xencons->irq, eoiflag);
info = kzalloc(sizeof(struct xencons_info), GFP_KERNEL);
if (!info)
return -ENOMEM;
+ spin_lock_init(&info->ring_lock);
} else if (info->intf != NULL) {
/* already configured */
return 0;
static int xencons_info_pv_init(struct xencons_info *info, int vtermno)
{
+ spin_lock_init(&info->ring_lock);
info->evtchn = xen_start_info->console.domU.evtchn;
/* GFN == MFN for PV guest */
info->intf = gfn_to_virt(xen_start_info->console.domU.mfn);
info = kzalloc(sizeof(struct xencons_info), GFP_KERNEL);
if (!info)
return -ENOMEM;
+ spin_lock_init(&info->ring_lock);
}
info->irq = bind_virq_to_irq(VIRQ_CONSOLE, 0, false);
info = kzalloc(sizeof(struct xencons_info), GFP_KERNEL);
if (!info)
return -ENOMEM;
+ spin_lock_init(&info->ring_lock);
dev_set_drvdata(&dev->dev, info);
info->xbdev = dev;
info->vtermno = xenbus_devid_to_vtermno(devid);
if (!serdev)
continue;
- serdev->dev.of_node = node;
+ device_set_node(&serdev->dev, of_fwnode_handle(node));
err = serdev_device_add(serdev);
if (err) {
memset(&up, 0, sizeof(up));
up.port.mapbase = regs->start;
up.port.irq = irq;
- up.port.type = PORT_UNKNOWN;
- up.port.flags = UPF_BOOT_AUTOCONF | UPF_FIXED_PORT | UPF_IOREMAP;
+ up.port.type = PORT_16750;
+ up.port.flags = UPF_FIXED_PORT | UPF_IOREMAP | UPF_FIXED_TYPE;
up.port.dev = &pdev->dev;
up.port.private_data = priv;
iir = port->serial_in(port, UART_IIR);
if (iir & UART_IIR_NO_INT) {
- spin_unlock(&up->port.lock);
+ spin_unlock_irqrestore(&up->port.lock, flags);
return 0;
}
if (unlikely(up->lsr_saved_flags & UART_LSR_BI)) {
up->lsr_saved_flags &= ~UART_LSR_BI;
port->serial_in(port, UART_RX);
- spin_unlock(&up->port.lock);
+ spin_unlock_irqrestore(&up->port.lock, flags);
return 1;
}
tristate "Aspeed Virtual UART"
depends on SERIAL_8250
depends on OF
- depends on REGMAP && MFD_SYSCON
+ depends on MFD_SYSCON
depends on ARCH_ASPEED || COMPILE_TEST
+ select REGMAP
help
If you want to use the virtual UART (VUART) device on Aspeed
BMC platforms, enable this option. This enables the 16550A-
tristate "Microchip 8250 based serial port"
depends on SERIAL_8250 && PCI
select SERIAL_8250_PCILIB
- default SERIAL_8250
help
Select this option if you have a setup with Microchip PCIe
Switch with serial port enabled and wish to enable 8250
config SERIAL_FSL_LPUART_CONSOLE
bool "Console on Freescale lpuart serial port"
- depends on SERIAL_FSL_LPUART
+ depends on SERIAL_FSL_LPUART=y
select SERIAL_CORE_CONSOLE
select SERIAL_EARLYCON
help
struct dma_chan *chan = sport->dma_rx_chan;
dmaengine_terminate_sync(chan);
+ del_timer_sync(&sport->lpuart_timer);
dma_unmap_sg(chan->device->dev, &sport->rx_sgl, 1, DMA_FROM_DEVICE);
kfree(sport->rx_ring.buf);
sport->rx_ring.tail = 0;
static void lpuart_dma_shutdown(struct lpuart_port *sport)
{
if (sport->lpuart_dma_rx_use) {
- del_timer_sync(&sport->lpuart_timer);
lpuart_dma_rx_free(&sport->port);
sport->lpuart_dma_rx_use = false;
}
* Since timer function acqures sport->port.lock, need to stop before
* acquring same lock because otherwise del_timer_sync() can deadlock.
*/
- if (old && sport->lpuart_dma_rx_use) {
- del_timer_sync(&sport->lpuart_timer);
+ if (old && sport->lpuart_dma_rx_use)
lpuart_dma_rx_free(&sport->port);
- }
spin_lock_irqsave(&sport->port.lock, flags);
* Since timer function acqures sport->port.lock, need to stop before
* acquring same lock because otherwise del_timer_sync() can deadlock.
*/
- if (old && sport->lpuart_dma_rx_use) {
- del_timer_sync(&sport->lpuart_timer);
+ if (old && sport->lpuart_dma_rx_use)
lpuart_dma_rx_free(&sport->port);
- }
spin_lock_irqsave(&sport->port.lock, flags);
/* update the per-port timeout */
uart_update_timeout(port, termios->c_cflag, baud);
- /* wait transmit engin complete */
- lpuart32_write(&sport->port, 0, UARTMODIR);
- lpuart32_wait_bit_set(&sport->port, UARTSTAT, UARTSTAT_TC);
+ /*
+ * LPUART Transmission Complete Flag may never be set while queuing a break
+ * character, so skip waiting for transmission complete when UARTCTRL_SBK is
+ * asserted.
+ */
+ if (!(old_ctrl & UARTCTRL_SBK)) {
+ lpuart32_write(&sport->port, 0, UARTMODIR);
+ lpuart32_wait_bit_set(&sport->port, UARTSTAT, UARTSTAT_TC);
+ }
/* disable transmit and receive */
lpuart32_write(&sport->port, old_ctrl & ~(UARTCTRL_TE | UARTCTRL_RE),
* cannot resume as expected, hence gracefully release the
* Rx DMA path before suspend and start Rx DMA path on resume.
*/
- del_timer_sync(&sport->lpuart_timer);
lpuart_dma_rx_free(&sport->port);
/* Disable Rx DMA to use UART port as wakeup source */
if (!qcom_geni_serial_main_active(uport))
return;
- if (port->rx_dma_addr) {
+ if (port->tx_dma_addr) {
geni_se_tx_dma_unprep(&port->se, port->tx_dma_addr,
port->tx_remaining);
port->tx_dma_addr = 0;
if (port->tx_dma_addr)
return;
- xmit_size = uart_circ_chars_pending(xmit);
- if (xmit_size < WAKEUP_CHARS)
- uart_write_wakeup(uport);
+ if (uart_circ_empty(xmit))
+ return;
xmit_size = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
static void qcom_geni_serial_shutdown(struct uart_port *uport)
{
disable_irq(uport->irq);
+
+ if (uart_console(uport))
+ return;
+
qcom_geni_serial_stop_tx(uport);
qcom_geni_serial_stop_rx(uport);
}
int c;
unsigned int vpitch = op->op == KD_FONT_OP_GET_TALL ? op->height : 32;
+ if (vpitch > max_font_height)
+ return -EINVAL;
+
if (op->data) {
font.data = kvmalloc(max_font_size, GFP_KERNEL);
if (!font.data)
scaling->window_start_t = curr_t;
scaling->tot_busy_t = 0;
- if (hba->outstanding_reqs) {
+ if (scaling->active_reqs) {
scaling->busy_start_t = curr_t;
scaling->is_busy_started = true;
} else {
spin_lock_irqsave(hba->host->host_lock, flags);
hba->clk_scaling.active_reqs--;
- if (!hba->outstanding_reqs && scaling->is_busy_started) {
+ if (!scaling->active_reqs && scaling->is_busy_started) {
scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
scaling->busy_start_t));
scaling->busy_start_t = 0;
return NULL;
}
+ if (func->devfn != PCI_DEV_FN_HOST_DEVICE &&
+ func->devfn != PCI_DEV_FN_OTG) {
+ return NULL;
+ }
+
return func;
}
case USB_REQ_SET_ISOCH_DELAY:
ret = cdnsp_ep0_set_isoch_delay(pdev, ctrl);
break;
- case USB_REQ_SET_INTERFACE:
- /*
- * Add request into pending list to block sending status stage
- * by libcomposite.
- */
- list_add_tail(&pdev->ep0_preq.list,
- &pdev->ep0_preq.pep->pending_list);
-
- ret = cdnsp_ep0_delegate_req(pdev, ctrl);
- if (ret == -EBUSY)
- ret = 0;
-
- list_del(&pdev->ep0_preq.list);
- break;
default:
ret = cdnsp_ep0_delegate_req(pdev, ctrl);
break;
else
ret = cdnsp_ep0_delegate_req(pdev, ctrl);
- if (!len)
- pdev->ep0_stage = CDNSP_STATUS_STAGE;
-
if (ret == USB_GADGET_DELAYED_STATUS) {
trace_cdnsp_ep0_status_stage("delayed");
return;
out:
if (ret < 0)
cdnsp_ep0_stall(pdev);
- else if (pdev->ep0_stage == CDNSP_STATUS_STAGE)
+ else if (!len && pdev->ep0_stage != CDNSP_STATUS_STAGE)
cdnsp_status_stage(pdev);
}
#define PLAT_DRIVER_NAME "cdns-usbssp"
#define CDNS_VENDOR_ID 0x17cd
-#define CDNS_DEVICE_ID 0x0100
+#define CDNS_DEVICE_ID 0x0200
+#define CDNS_DRD_ID 0x0100
#define CDNS_DRD_IF (PCI_CLASS_SERIAL_USB << 8 | 0x80)
static struct pci_dev *cdnsp_get_second_fun(struct pci_dev *pdev)
{
- struct pci_dev *func;
-
/*
* Gets the second function.
- * It's little tricky, but this platform has two function.
- * The fist keeps resources for Host/Device while the second
- * keeps resources for DRD/OTG.
+ * Platform has two function. The fist keeps resources for
+ * Host/Device while the secon keeps resources for DRD/OTG.
*/
- func = pci_get_device(pdev->vendor, pdev->device, NULL);
- if (!func)
- return NULL;
+ if (pdev->device == CDNS_DEVICE_ID)
+ return pci_get_device(pdev->vendor, CDNS_DRD_ID, NULL);
+ else if (pdev->device == CDNS_DRD_ID)
+ return pci_get_device(pdev->vendor, CDNS_DEVICE_ID, NULL);
- if (func->devfn == pdev->devfn) {
- func = pci_get_device(pdev->vendor, pdev->device, func);
- if (!func)
- return NULL;
- }
-
- return func;
+ return NULL;
}
static int cdnsp_pci_probe(struct pci_dev *pdev,
PCI_CLASS_SERIAL_USB_DEVICE, PCI_ANY_ID },
{ PCI_VENDOR_ID_CDNS, CDNS_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,
CDNS_DRD_IF, PCI_ANY_ID },
+ { PCI_VENDOR_ID_CDNS, CDNS_DRD_ID, PCI_ANY_ID, PCI_ANY_ID,
+ CDNS_DRD_IF, PCI_ANY_ID },
{ 0, }
};
* @in_lpm: if the core in low power mode
* @wakeup_int: if wakeup interrupt occur
* @rev: The revision number for controller
+ * @mutex: protect code from concorrent running when doing role switch
*/
struct ci_hdrc {
struct device *dev;
bool in_lpm;
bool wakeup_int;
enum ci_revision rev;
+ struct mutex mutex;
};
static inline struct ci_role_driver *ci_role(struct ci_hdrc *ci)
strlen(ci->roles[role]->name)))
break;
- if (role == CI_ROLE_END || role == ci->role)
+ if (role == CI_ROLE_END)
return -EINVAL;
+ mutex_lock(&ci->mutex);
+
+ if (role == ci->role) {
+ mutex_unlock(&ci->mutex);
+ return n;
+ }
+
pm_runtime_get_sync(dev);
disable_irq(ci->irq);
ci_role_stop(ci);
ci_handle_vbus_change(ci);
enable_irq(ci->irq);
pm_runtime_put_sync(dev);
+ mutex_unlock(&ci->mutex);
return (ret == 0) ? n : ret;
}
return -ENOMEM;
spin_lock_init(&ci->lock);
+ mutex_init(&ci->mutex);
ci->dev = dev;
ci->platdata = dev_get_platdata(dev);
ci->imx28_write_fix = !!(ci->platdata->flags &
void ci_handle_id_switch(struct ci_hdrc *ci)
{
- enum ci_role role = ci_otg_role(ci);
+ enum ci_role role;
+ mutex_lock(&ci->mutex);
+ role = ci_otg_role(ci);
if (role != ci->role) {
dev_dbg(ci->dev, "switching from %s to %s\n",
ci_role(ci)->name, ci->roles[role]->name);
if (role == CI_ROLE_GADGET)
ci_handle_vbus_change(ci);
}
+ mutex_unlock(&ci->mutex);
}
/**
* ci_otg_work - perform otg (vbus/id) event handle
spin_unlock_irqrestore(&hsotg->lock, flags);
- dwc2_force_mode(hsotg, (hsotg->dr_mode == USB_DR_MODE_HOST));
+ dwc2_force_mode(hsotg, (hsotg->dr_mode == USB_DR_MODE_HOST) ||
+ (hsotg->role_sw_default_mode == USB_DR_MODE_HOST));
}
static int dwc2_ovr_avalid(struct dwc2_hsotg *hsotg, bool valid)
hsotg->gadget.dev.of_node = hsotg->dev->of_node;
hsotg->gadget.speed = USB_SPEED_UNKNOWN;
- if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL ||
- (hsotg->dr_mode == USB_DR_MODE_OTG && dwc2_is_device_mode(hsotg))) {
+ if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL) {
ret = dwc2_lowlevel_hw_enable(hsotg);
if (ret)
goto err;
if (!IS_ERR_OR_NULL(hsotg->uphy))
otg_set_peripheral(hsotg->uphy->otg, NULL);
- if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL ||
- (hsotg->dr_mode == USB_DR_MODE_OTG && dwc2_is_device_mode(hsotg)))
+ if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL)
dwc2_lowlevel_hw_disable(hsotg);
return 0;
return 0;
}
-static void __dwc2_disable_regulators(void *data)
-{
- struct dwc2_hsotg *hsotg = data;
-
- regulator_bulk_disable(ARRAY_SIZE(hsotg->supplies), hsotg->supplies);
-}
-
static int __dwc2_lowlevel_hw_enable(struct dwc2_hsotg *hsotg)
{
struct platform_device *pdev = to_platform_device(hsotg->dev);
if (ret)
return ret;
- ret = devm_add_action_or_reset(&pdev->dev,
- __dwc2_disable_regulators, hsotg);
- if (ret)
- return ret;
-
if (hsotg->clk) {
ret = clk_prepare_enable(hsotg->clk);
if (ret)
if (hsotg->clk)
clk_disable_unprepare(hsotg->clk);
- return 0;
+ return regulator_bulk_disable(ARRAY_SIZE(hsotg->supplies), hsotg->supplies);
}
/**
dwc2_debugfs_init(hsotg);
/* Gadget code manages lowlevel hw on its own */
- if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL ||
- (hsotg->dr_mode == USB_DR_MODE_OTG && dwc2_is_device_mode(hsotg)))
+ if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL)
dwc2_lowlevel_hw_disable(hsotg);
#if IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL) || \
if (hsotg->params.activate_stm_id_vb_detection)
regulator_disable(hsotg->usb33d);
error:
- if (hsotg->dr_mode != USB_DR_MODE_PERIPHERAL)
+ if (hsotg->ll_hw_enabled)
dwc2_lowlevel_hw_disable(hsotg);
return retval;
}
* change quirk.
* @dis_tx_ipgap_linecheck_quirk: set if we disable u2mac linestate
* check during HS transmit.
- * @resume-hs-terminations: Set if we enable quirk for fixing improper crc
+ * @resume_hs_terminations: Set if we enable quirk for fixing improper crc
* generation after resume from suspend.
* @parkmode_disable_ss_quirk: set if we need to disable all SuperSpeed
* instances in park mode.
*/
static int __dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force, bool interrupt)
{
+ struct dwc3 *dwc = dep->dwc;
struct dwc3_gadget_ep_cmd_params params;
u32 cmd;
int ret;
WARN_ON_ONCE(ret);
dep->resource_index = 0;
- if (!interrupt)
+ if (!interrupt) {
+ if (!DWC3_IP_IS(DWC3) || DWC3_VER_IS_PRIOR(DWC3, 310A))
+ mdelay(1);
dep->flags &= ~DWC3_EP_TRANSFER_STARTED;
- else if (!ret)
+ } else if (!ret) {
dep->flags |= DWC3_EP_END_TRANSFER_PENDING;
+ }
dep->flags &= ~DWC3_EP_DELAY_STOP;
return ret;
* enabled, the EndTransfer command will have completed upon
* returning from this function.
*
- * This mode is NOT available on the DWC_usb31 IP.
+ * This mode is NOT available on the DWC_usb31 IP. In this
+ * case, if the IOC bit is not set, then delay by 1ms
+ * after issuing the EndTransfer command. This allows for the
+ * controller to handle the command completely before DWC3
+ * remove requests attempts to unmap USB request buffers.
*/
__dwc3_stop_active_transfer(dep, force, interrupt);
sizeof(url_descriptor->URL)
- WEBUSB_URL_DESCRIPTOR_HEADER_LENGTH + landing_page_offset);
- if (ctrl->wLength < WEBUSB_URL_DESCRIPTOR_HEADER_LENGTH
- + landing_page_length)
- landing_page_length = ctrl->wLength
- - WEBUSB_URL_DESCRIPTOR_HEADER_LENGTH + landing_page_offset;
+ if (w_length < WEBUSB_URL_DESCRIPTOR_HEADER_LENGTH + landing_page_length)
+ landing_page_length = w_length
+ - WEBUSB_URL_DESCRIPTOR_HEADER_LENGTH + landing_page_offset;
memcpy(url_descriptor->URL,
cdev->landing_page + landing_page_offset,
uac = g_audio->uac;
card = uac->card;
if (card)
- snd_card_free(card);
+ snd_card_free_when_closed(card);
kfree(uac->p_prm.reqs);
kfree(uac->c_prm.reqs);
{ USB_DEVICE(VENDOR_ID_GENESYS, 0x0608) }, /* Genesys Logic GL850G USB 2.0 */
{ USB_DEVICE(VENDOR_ID_GENESYS, 0x0610) }, /* Genesys Logic GL852G USB 2.0 */
{ USB_DEVICE(VENDOR_ID_MICROCHIP, 0x2514) }, /* USB2514B USB 2.0 */
+ { USB_DEVICE(VENDOR_ID_MICROCHIP, 0x2517) }, /* USB2517 USB 2.0 */
{ USB_DEVICE(VENDOR_ID_REALTEK, 0x0411) }, /* RTS5411 USB 3.1 */
{ USB_DEVICE(VENDOR_ID_REALTEK, 0x5411) }, /* RTS5411 USB 2.1 */
{ USB_DEVICE(VENDOR_ID_REALTEK, 0x0414) }, /* RTS5414 USB 3.2 */
static const struct of_device_id onboard_hub_match[] = {
{ .compatible = "usb424,2514", .data = µchip_usb424_data, },
+ { .compatible = "usb424,2517", .data = µchip_usb424_data, },
{ .compatible = "usb451,8140", .data = &ti_tusb8041_data, },
{ .compatible = "usb451,8142", .data = &ti_tusb8041_data, },
{ .compatible = "usb5e3,608", .data = &genesys_gl850g_data, },
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_BROKEN_FUA),
+/* Reported by: Yaroslav Furman <yaro330@gmail.com> */
+UNUSUAL_DEV(0x152d, 0x0583, 0x0000, 0x9999,
+ "JMicron",
+ "JMS583Gen 2",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_NO_REPORT_OPCODES),
+
/* Reported-by: Thinh Nguyen <thinhn@synopsys.com> */
UNUSUAL_DEV(0x154b, 0xf00b, 0x0000, 0x9999,
"PNY",
static void tcpm_queue_vdm(struct tcpm_port *port, const u32 header,
const u32 *data, int cnt)
{
+ u32 vdo_hdr = port->vdo_data[0];
+
WARN_ON(!mutex_is_locked(&port->lock));
- /* Make sure we are not still processing a previous VDM packet */
- WARN_ON(port->vdm_state > VDM_STATE_DONE);
+ /* If is sending discover_identity, handle received message first */
+ if (PD_VDO_SVDM(vdo_hdr) && PD_VDO_CMD(vdo_hdr) == CMD_DISCOVER_IDENT) {
+ port->send_discover = true;
+ mod_send_discover_delayed_work(port, SEND_DISCOVER_RETRY_MS);
+ } else {
+ /* Make sure we are not still processing a previous VDM packet */
+ WARN_ON(port->vdm_state > VDM_STATE_DONE);
+ }
port->vdo_count = cnt + 1;
port->vdo_data[0] = header;
switch (PD_VDO_CMD(vdo_hdr)) {
case CMD_DISCOVER_IDENT:
res = tcpm_ams_start(port, DISCOVER_IDENTITY);
- if (res == 0)
+ if (res == 0) {
port->send_discover = false;
- else if (res == -EAGAIN)
+ } else if (res == -EAGAIN) {
+ port->vdo_data[0] = 0;
mod_send_discover_delayed_work(port,
SEND_DISCOVER_RETRY_MS);
+ }
break;
case CMD_DISCOVER_SVID:
res = tcpm_ams_start(port, DISCOVER_SVIDS);
unsigned long timeout;
port->vdm_retries = 0;
+ port->vdo_data[0] = 0;
port->vdm_state = VDM_STATE_BUSY;
timeout = vdm_ready_timeout(vdo_hdr);
mod_vdm_delayed_work(port, timeout);
case SOFT_RESET:
port->message_id = 0;
port->rx_msgid = -1;
+ /* remove existing capabilities */
+ usb_power_delivery_unregister_capabilities(port->partner_source_caps);
+ port->partner_source_caps = NULL;
tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
tcpm_ams_finish(port);
if (port->pwr_role == TYPEC_SOURCE) {
case SOFT_RESET_SEND:
port->message_id = 0;
port->rx_msgid = -1;
+ /* remove existing capabilities */
+ usb_power_delivery_unregister_capabilities(port->partner_source_caps);
+ port->partner_source_caps = NULL;
if (tcpm_pd_send_control(port, PD_CTRL_SOFT_RESET))
tcpm_set_state_cond(port, hard_reset_state(port), 0);
else
tcpm_set_state(port, SNK_STARTUP, 0);
break;
case PR_SWAP_SNK_SRC_SINK_OFF:
+ /* will be source, remove existing capabilities */
+ usb_power_delivery_unregister_capabilities(port->partner_source_caps);
+ port->partner_source_caps = NULL;
/*
* Prevent vbus discharge circuit from turning on during PR_SWAP
* as this is not a disconnect.
return NULL;
}
-static int ucsi_register_port(struct ucsi *ucsi, int index)
+static int ucsi_register_port(struct ucsi *ucsi, struct ucsi_connector *con)
{
struct usb_power_delivery_desc desc = { ucsi->cap.pd_version};
struct usb_power_delivery_capabilities_desc pd_caps;
struct usb_power_delivery_capabilities *pd_cap;
- struct ucsi_connector *con = &ucsi->connector[index];
struct typec_capability *cap = &con->typec_cap;
enum typec_accessory *accessory = cap->accessory;
enum usb_role u_role = USB_ROLE_NONE;
init_completion(&con->complete);
mutex_init(&con->lock);
INIT_LIST_HEAD(&con->partner_tasks);
- con->num = index + 1;
con->ucsi = ucsi;
cap->fwnode = ucsi_find_fwnode(con);
*/
static int ucsi_init(struct ucsi *ucsi)
{
- struct ucsi_connector *con;
- u64 command;
+ struct ucsi_connector *con, *connector;
+ u64 command, ntfy;
int ret;
int i;
}
/* Enable basic notifications */
- ucsi->ntfy = UCSI_ENABLE_NTFY_CMD_COMPLETE | UCSI_ENABLE_NTFY_ERROR;
- command = UCSI_SET_NOTIFICATION_ENABLE | ucsi->ntfy;
+ ntfy = UCSI_ENABLE_NTFY_CMD_COMPLETE | UCSI_ENABLE_NTFY_ERROR;
+ command = UCSI_SET_NOTIFICATION_ENABLE | ntfy;
ret = ucsi_send_command(ucsi, command, NULL, 0);
if (ret < 0)
goto err_reset;
}
/* Allocate the connectors. Released in ucsi_unregister() */
- ucsi->connector = kcalloc(ucsi->cap.num_connectors + 1,
- sizeof(*ucsi->connector), GFP_KERNEL);
- if (!ucsi->connector) {
+ connector = kcalloc(ucsi->cap.num_connectors + 1, sizeof(*connector), GFP_KERNEL);
+ if (!connector) {
ret = -ENOMEM;
goto err_reset;
}
/* Register all connectors */
for (i = 0; i < ucsi->cap.num_connectors; i++) {
- ret = ucsi_register_port(ucsi, i);
+ connector[i].num = i + 1;
+ ret = ucsi_register_port(ucsi, &connector[i]);
if (ret)
goto err_unregister;
}
/* Enable all notifications */
- ucsi->ntfy = UCSI_ENABLE_NTFY_ALL;
- command = UCSI_SET_NOTIFICATION_ENABLE | ucsi->ntfy;
+ ntfy = UCSI_ENABLE_NTFY_ALL;
+ command = UCSI_SET_NOTIFICATION_ENABLE | ntfy;
ret = ucsi_send_command(ucsi, command, NULL, 0);
if (ret < 0)
goto err_unregister;
+ ucsi->connector = connector;
+ ucsi->ntfy = ntfy;
return 0;
err_unregister:
- for (con = ucsi->connector; con->port; con++) {
+ for (con = connector; con->port; con++) {
ucsi_unregister_partner(con);
ucsi_unregister_altmodes(con, UCSI_RECIPIENT_CON);
ucsi_unregister_port_psy(con);
typec_unregister_port(con->port);
con->port = NULL;
}
-
- kfree(ucsi->connector);
- ucsi->connector = NULL;
-
+ kfree(connector);
err_reset:
memset(&ucsi->cap, 0, sizeof(ucsi->cap));
ucsi_reset_ppm(ucsi);
if (ret)
goto out_clear_bit;
- if (!wait_for_completion_timeout(&ua->complete, HZ))
+ if (!wait_for_completion_timeout(&ua->complete, 5 * HZ))
ret = -ETIMEDOUT;
out_clear_bit:
struct mlx5_control_vq cvq;
struct workqueue_struct *wq;
unsigned int group2asid[MLX5_VDPA_NUMVQ_GROUPS];
+ bool suspended;
};
int mlx5_vdpa_alloc_pd(struct mlx5_vdpa_dev *dev, u32 *pdn, u16 uid);
if (err)
goto err_mr;
- if (!(mvdev->status & VIRTIO_CONFIG_S_DRIVER_OK))
+ if (!(mvdev->status & VIRTIO_CONFIG_S_DRIVER_OK) || mvdev->suspended)
goto err_mr;
restore_channels_info(ndev);
clear_vqs_ready(ndev);
mlx5_vdpa_destroy_mr(&ndev->mvdev);
ndev->mvdev.status = 0;
+ ndev->mvdev.suspended = false;
ndev->cur_num_vqs = 0;
ndev->mvdev.cvq.received_desc = 0;
ndev->mvdev.cvq.completed_desc = 0;
struct mlx5_vdpa_virtqueue *mvq;
int i;
+ mlx5_vdpa_info(mvdev, "suspending device\n");
+
down_write(&ndev->reslock);
ndev->nb_registered = false;
mlx5_notifier_unregister(mvdev->mdev, &ndev->nb);
suspend_vq(ndev, mvq);
}
mlx5_vdpa_cvq_suspend(mvdev);
+ mvdev->suspended = true;
up_write(&ndev->reslock);
return 0;
}
(uintptr_t)vq->device_addr);
vq->vring.last_avail_idx = last_avail_idx;
+
+ /*
+ * Since vdpa_sim does not support receive inflight descriptors as a
+ * destination of a migration, let's set both avail_idx and used_idx
+ * the same at vq start. This is how vhost-user works in a
+ * VHOST_SET_VRING_BASE call.
+ *
+ * Although the simple fix is to set last_used_idx at
+ * vdpasim_set_vq_state, it would be reset at vdpasim_queue_ready.
+ */
+ vq->vring.last_used_idx = last_avail_idx;
vq->vring.notify = vdpasim_vq_notify;
}
struct virtio_pci_modern_device *mdev = NULL;
mdev = vp_vdpa_mgtdev->mdev;
- vp_modern_remove(mdev);
vdpa_mgmtdev_unregister(&vp_vdpa_mgtdev->mgtdev);
+ vp_modern_remove(mdev);
kfree(vp_vdpa_mgtdev->mgtdev.id_table);
kfree(mdev);
kfree(vp_vdpa_mgtdev);
if (migf->pre_copy_initial_bytes > *pos) {
info.initial_bytes = migf->pre_copy_initial_bytes - *pos;
} else {
- buf = mlx5vf_get_data_buff_from_pos(migf, *pos, &end_of_data);
- if (buf) {
- info.dirty_bytes = buf->start_pos + buf->length - *pos;
- } else {
- if (!end_of_data) {
- ret = -EINVAL;
- goto err_migf_unlock;
- }
- info.dirty_bytes = inc_length;
- }
+ info.dirty_bytes = migf->max_pos - *pos;
+ if (!info.dirty_bytes)
+ end_of_data = true;
+ info.dirty_bytes += inc_length;
}
if (!end_of_data || !inc_length) {
err_attach:
iommu_domain_free(v->domain);
+ v->domain = NULL;
return ret;
}
vhost_vdpa_remove_as(v, asid);
}
+ vhost_vdpa_free_domain(v);
vhost_dev_cleanup(&v->vdev);
kfree(v->vdev.vqs);
}
vhost_vdpa_clean_irq(v);
vhost_vdpa_reset(v);
vhost_dev_stop(&v->vdev);
- vhost_vdpa_free_domain(v);
vhost_vdpa_config_put(v);
vhost_vdpa_cleanup(v);
mutex_unlock(&d->mutex);
board->caps = CLCD_CAP_ALL;
board->check = clcdfb_check;
board->decode = clcdfb_decode;
- if (of_find_property(node, "memory-region", NULL)) {
+ if (of_property_present(node, "memory-region")) {
board->setup = clcdfb_of_vram_setup;
board->mmap = clcdfb_of_vram_mmap;
board->remove = clcdfb_of_vram_remove;
u32 pixclock;
int screen_size, plane;
+ if (!var->pixclock)
+ return -EINVAL;
+
plane = fbdev->plane;
/* Make sure that the mode respect all LCD controller and
if (!par->regs)
goto out_release_fb;
- if (!of_find_property(dp, "width", NULL)) {
+ if (!of_property_present(dp, "width")) {
err = bw2_do_default_mode(par, info, &linebytes);
if (err)
goto out_unmap_regs;
cg3_blank(FB_BLANK_UNBLANK, info);
- if (!of_find_property(dp, "width", NULL)) {
+ if (!of_property_present(dp, "width")) {
err = cg3_do_default_mode(par);
if (err)
goto out_unmap_screen;
if (rc)
return rc;
- if (pci_enable_device(dp) < 0) {
+ rc = pci_enable_device(dp);
+ if (rc < 0) {
dev_err(&dp->dev, "Cannot enable PCI device\n");
goto err_out;
}
- if ((dp->resource[0].flags & IORESOURCE_MEM) == 0)
+ if ((dp->resource[0].flags & IORESOURCE_MEM) == 0) {
+ rc = -ENODEV;
goto err_disable;
+ }
addr = pci_resource_start(dp, 0);
- if (addr == 0)
+ if (addr == 0) {
+ rc = -ENODEV;
goto err_disable;
+ }
p = framebuffer_alloc(0, &dp->dev);
if (p == NULL) {
init_chips(p, addr);
- if (register_framebuffer(p) < 0) {
+ rc = register_framebuffer(p);
+ if (rc < 0) {
dev_err(&dp->dev,"C&T 65550 framebuffer failed to register\n");
goto err_unmap;
}
info->fix.mmio_start = res->start;
info->fix.mmio_len = resource_size(res);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- info->screen_base = devm_ioremap_resource(dev, res);
+ info->screen_base = devm_platform_get_and_ioremap_resource(pdev, 1, &res);
if (IS_ERR(info->screen_base)) {
ret = PTR_ERR(info->screen_base);
goto out_fb_release;
struct inode *inode,
struct file *file)
{
+ struct fb_deferred_io *fbdefio = info->fbdefio;
+
file->f_mapping->a_ops = &fb_deferred_io_aops;
+ fbdefio->open_count++;
}
EXPORT_SYMBOL_GPL(fb_deferred_io_open);
-void fb_deferred_io_release(struct fb_info *info)
+static void fb_deferred_io_lastclose(struct fb_info *info)
{
- struct fb_deferred_io *fbdefio = info->fbdefio;
struct page *page;
int i;
- BUG_ON(!fbdefio);
cancel_delayed_work_sync(&info->deferred_work);
/* clear out the mapping that we setup */
page->mapping = NULL;
}
}
+
+void fb_deferred_io_release(struct fb_info *info)
+{
+ struct fb_deferred_io *fbdefio = info->fbdefio;
+
+ if (!--fbdefio->open_count)
+ fb_deferred_io_lastclose(info);
+}
EXPORT_SYMBOL_GPL(fb_deferred_io_release);
void fb_deferred_io_cleanup(struct fb_info *info)
{
struct fb_deferred_io *fbdefio = info->fbdefio;
- fb_deferred_io_release(info);
+ fb_deferred_io_lastclose(info);
kvfree(info->pagerefs);
mutex_destroy(&fbdefio->lock);
static int lxfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
{
+ if (!var->pixclock)
+ return -EINVAL;
+
if (var->xres > 1920 || var->yres > 1440)
return -EINVAL;
dinfo = GET_DINFO(info);
+ if (!var->pixclock)
+ return -EINVAL;
+
/* update the pitch */
if (intelfbhw_validate_mode(dinfo, var) != 0)
return -EINVAL;
int pitch, err = 0;
NVTRACE_ENTER();
+ if (!var->pixclock)
+ return -EINVAL;
var->transp.offset = 0;
var->transp.length = 0;
int foreign_endian = 0;
#ifdef __BIG_ENDIAN
- if (of_get_property(dp, "little-endian", NULL))
+ if (of_property_read_bool(dp, "little-endian"))
foreign_endian = FBINFO_FOREIGN_ENDIAN;
#else
- if (of_get_property(dp, "big-endian", NULL))
+ if (of_property_read_bool(dp, "big-endian"))
foreign_endian = FBINFO_FOREIGN_ENDIAN;
#endif
lcds-y$(CONFIG_MACH_AMS_DELTA) += lcd_ams_delta.o
lcds-y$(CONFIG_MACH_OMAP_PALMTE) += lcd_palmte.o
-lcds-y$(CONFIG_MACH_OMAP_OSK) += lcd_osk.o
lcds-y$(CONFIG_FB_OMAP_LCD_MIPID) += lcd_mipid.o
+++ /dev/null
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * LCD panel support for the TI OMAP OSK board
- *
- * Copyright (C) 2004 Nokia Corporation
- * Author: Imre Deak <imre.deak@nokia.com>
- * Adapted for OSK by <dirk.behme@de.bosch.com>
- */
-
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/gpio.h>
-
-#include <linux/soc/ti/omap1-io.h>
-#include <linux/soc/ti/omap1-mux.h>
-
-#include "omapfb.h"
-
-static int osk_panel_enable(struct lcd_panel *panel)
-{
- /* configure PWL pin */
- omap_cfg_reg(PWL);
-
- /* Enable PWL unit */
- omap_writeb(0x01, OMAP_PWL_CLK_ENABLE);
-
- /* Set PWL level */
- omap_writeb(0xFF, OMAP_PWL_ENABLE);
-
- /* set GPIO2 high (lcd power enabled) */
- gpio_set_value(2, 1);
-
- return 0;
-}
-
-static void osk_panel_disable(struct lcd_panel *panel)
-{
- /* Set PWL level to zero */
- omap_writeb(0x00, OMAP_PWL_ENABLE);
-
- /* Disable PWL unit */
- omap_writeb(0x00, OMAP_PWL_CLK_ENABLE);
-
- /* set GPIO2 low */
- gpio_set_value(2, 0);
-}
-
-static struct lcd_panel osk_panel = {
- .name = "osk",
- .config = OMAP_LCDC_PANEL_TFT,
-
- .bpp = 16,
- .data_lines = 16,
- .x_res = 240,
- .y_res = 320,
- .pixel_clock = 12500,
- .hsw = 40,
- .hfp = 40,
- .hbp = 72,
- .vsw = 1,
- .vfp = 1,
- .vbp = 0,
- .pcd = 12,
-
- .enable = osk_panel_enable,
- .disable = osk_panel_disable,
-};
-
-static int osk_panel_probe(struct platform_device *pdev)
-{
- omapfb_register_panel(&osk_panel);
- return 0;
-}
-
-static struct platform_driver osk_panel_driver = {
- .probe = osk_panel_probe,
- .driver = {
- .name = "lcd_osk",
- },
-};
-
-module_platform_driver(osk_panel_driver);
-
-MODULE_AUTHOR("Imre Deak");
-MODULE_DESCRIPTION("LCD panel support for the TI OMAP OSK board");
-MODULE_LICENSE("GPL");
var->yoffset = var->yres_virtual - var->yres;
if (plane->color_mode == OMAPFB_COLOR_RGB444) {
- var->red.offset = 8; var->red.length = 4;
- var->red.msb_right = 0;
- var->green.offset = 4; var->green.length = 4;
- var->green.msb_right = 0;
- var->blue.offset = 0; var->blue.length = 4;
- var->blue.msb_right = 0;
+ var->red.offset = 8;
+ var->red.length = 4;
+ var->red.msb_right = 0;
+ var->green.offset = 4;
+ var->green.length = 4;
+ var->green.msb_right = 0;
+ var->blue.offset = 0;
+ var->blue.length = 4;
+ var->blue.msb_right = 0;
} else {
- var->red.offset = 11; var->red.length = 5;
- var->red.msb_right = 0;
- var->green.offset = 5; var->green.length = 6;
- var->green.msb_right = 0;
- var->blue.offset = 0; var->blue.length = 5;
- var->blue.msb_right = 0;
+ var->red.offset = 11;
+ var->red.length = 5;
+ var->red.msb_right = 0;
+ var->green.offset = 5;
+ var->green.length = 6;
+ var->green.msb_right = 0;
+ var->blue.offset = 0;
+ var->blue.length = 5;
+ var->blue.msb_right = 0;
}
var->height = -1;
omapdss_walk_device(dss, true);
for_each_available_child_of_node(dss, child) {
- if (!of_find_property(child, "compatible", NULL))
+ if (!of_property_present(child, "compatible"))
continue;
omapdss_walk_device(child, true);
priv->misc_dev.fops = &pxa3xx_gcu_miscdev_fops;
/* handle IO resources */
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->mmio_base = devm_ioremap_resource(dev, r);
+ priv->mmio_base = devm_platform_get_and_ioremap_resource(pdev, 0, &r);
if (IS_ERR(priv->mmio_base))
return PTR_ERR(priv->mmio_base);
#if defined(CONFIG_OF)
#ifdef __BIG_ENDIAN
- if (of_get_property(info->dev->parent->of_node, "little-endian", NULL))
+ if (of_property_read_bool(info->dev->parent->of_node, "little-endian"))
fb->flags |= FBINFO_FOREIGN_ENDIAN;
#else
- if (of_get_property(info->dev->parent->of_node, "big-endian", NULL))
+ if (of_property_read_bool(info->dev->parent->of_node, "big-endian"))
fb->flags |= FBINFO_FOREIGN_ENDIAN;
#endif
#endif
/* ------------------- driver specific functions --------------------------- */
static int
+stifb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
+{
+ struct stifb_info *fb = container_of(info, struct stifb_info, info);
+
+ if (var->xres != fb->info.var.xres ||
+ var->yres != fb->info.var.yres ||
+ var->bits_per_pixel != fb->info.var.bits_per_pixel)
+ return -EINVAL;
+
+ var->xres_virtual = var->xres;
+ var->yres_virtual = var->yres;
+ var->xoffset = 0;
+ var->yoffset = 0;
+ var->grayscale = fb->info.var.grayscale;
+ var->red.length = fb->info.var.red.length;
+ var->green.length = fb->info.var.green.length;
+ var->blue.length = fb->info.var.blue.length;
+
+ return 0;
+}
+
+static int
stifb_setcolreg(u_int regno, u_int red, u_int green,
u_int blue, u_int transp, struct fb_info *info)
{
static const struct fb_ops stifb_ops = {
.owner = THIS_MODULE,
+ .fb_check_var = stifb_check_var,
.fb_setcolreg = stifb_setcolreg,
.fb_blank = stifb_blank,
.fb_fillrect = stifb_fillrect,
struct stifb_info *fb;
struct fb_info *info;
unsigned long sti_rom_address;
+ char modestr[32];
char *dev_name;
int bpp, xres, yres;
info->flags = FBINFO_HWACCEL_COPYAREA | FBINFO_HWACCEL_FILLRECT;
info->pseudo_palette = &fb->pseudo_palette;
+ scnprintf(modestr, sizeof(modestr), "%dx%d-%d", xres, yres, bpp);
+ fb_find_mode(&info->var, info, modestr, NULL, 0, NULL, bpp);
+
/* This has to be done !!! */
if (fb_alloc_cmap(&info->cmap, NR_PALETTE, 0))
goto out_err1;
spin_lock_init(&par->lock);
- par->lowdepth =
- (of_find_property(dp, "tcx-8-bit", NULL) != NULL);
+ par->lowdepth = of_property_read_bool(dp, "tcx-8-bit");
sbusfb_fill_var(&info->var, dp, 8);
info->var.red.length = 8;
{
struct tga_par *par = (struct tga_par *)info->par;
+ if (!var->pixclock)
+ return -EINVAL;
+
if (par->tga_type == TGA_TYPE_8PLANE) {
if (var->bits_per_pixel != 8)
return -EINVAL;
static int wm8505fb_probe(struct platform_device *pdev)
{
struct wm8505fb_info *fbi;
- struct resource *res;
struct display_timings *disp_timing;
void *addr;
int ret;
addr = addr + sizeof(struct wm8505fb_info);
fbi->fb.pseudo_palette = addr;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- fbi->regbase = devm_ioremap_resource(&pdev->dev, res);
+ fbi->regbase = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(fbi->regbase))
return PTR_ERR(fbi->regbase);
if (drvdata->flags & BUS_ACCESS_FLAG) {
struct resource *res;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- drvdata->regs = devm_ioremap_resource(&pdev->dev, res);
+ drvdata->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(drvdata->regs))
return PTR_ERR(drvdata->regs);
pdata.yvirt = prop[1];
}
- if (of_find_property(pdev->dev.of_node, "rotate-display", NULL))
- pdata.rotate_screen = 1;
+ pdata.rotate_screen = of_property_read_bool(pdev->dev.of_node, "rotate-display");
platform_set_drvdata(pdev, drvdata);
return xilinxfb_assign(pdev, drvdata, &pdata);
-
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Convert a logo in ASCII PNM format to C source suitable for inclusion in
* the Linux kernel
*
* (C) Copyright 2001-2003 by Geert Uytterhoeven <geert@linux-m68k.org>
- *
- * --------------------------------------------------------------------------
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of the Linux
- * distribution for more details.
*/
#include <ctype.h>
#define LINUX_LOGO_GRAY256 4 /* 256 levels grayscale */
static const char *logo_types[LINUX_LOGO_GRAY256+1] = {
- [LINUX_LOGO_MONO] = "LINUX_LOGO_MONO",
- [LINUX_LOGO_VGA16] = "LINUX_LOGO_VGA16",
- [LINUX_LOGO_CLUT224] = "LINUX_LOGO_CLUT224",
- [LINUX_LOGO_GRAY256] = "LINUX_LOGO_GRAY256"
+ [LINUX_LOGO_MONO] = "LINUX_LOGO_MONO",
+ [LINUX_LOGO_VGA16] = "LINUX_LOGO_VGA16",
+ [LINUX_LOGO_CLUT224] = "LINUX_LOGO_CLUT224",
+ [LINUX_LOGO_GRAY256] = "LINUX_LOGO_GRAY256"
};
#define MAX_LINUX_LOGO_COLORS 224
struct color {
- unsigned char red;
- unsigned char green;
- unsigned char blue;
+ unsigned char red;
+ unsigned char green;
+ unsigned char blue;
};
static const struct color clut_vga16[16] = {
- { 0x00, 0x00, 0x00 },
- { 0x00, 0x00, 0xaa },
- { 0x00, 0xaa, 0x00 },
- { 0x00, 0xaa, 0xaa },
- { 0xaa, 0x00, 0x00 },
- { 0xaa, 0x00, 0xaa },
- { 0xaa, 0x55, 0x00 },
- { 0xaa, 0xaa, 0xaa },
- { 0x55, 0x55, 0x55 },
- { 0x55, 0x55, 0xff },
- { 0x55, 0xff, 0x55 },
- { 0x55, 0xff, 0xff },
- { 0xff, 0x55, 0x55 },
- { 0xff, 0x55, 0xff },
- { 0xff, 0xff, 0x55 },
- { 0xff, 0xff, 0xff },
+ { 0x00, 0x00, 0x00 },
+ { 0x00, 0x00, 0xaa },
+ { 0x00, 0xaa, 0x00 },
+ { 0x00, 0xaa, 0xaa },
+ { 0xaa, 0x00, 0x00 },
+ { 0xaa, 0x00, 0xaa },
+ { 0xaa, 0x55, 0x00 },
+ { 0xaa, 0xaa, 0xaa },
+ { 0x55, 0x55, 0x55 },
+ { 0x55, 0x55, 0xff },
+ { 0x55, 0xff, 0x55 },
+ { 0x55, 0xff, 0xff },
+ { 0xff, 0x55, 0x55 },
+ { 0xff, 0x55, 0xff },
+ { 0xff, 0xff, 0x55 },
+ { 0xff, 0xff, 0xff },
};
static int is_plain_pbm = 0;
static void die(const char *fmt, ...)
- __attribute__ ((noreturn)) __attribute ((format (printf, 1, 2)));
-static void usage(void) __attribute ((noreturn));
+__attribute__((noreturn)) __attribute((format (printf, 1, 2)));
+static void usage(void) __attribute((noreturn));
static unsigned int get_number(FILE *fp)
{
- int c, val;
-
- /* Skip leading whitespace */
- do {
- c = fgetc(fp);
- if (c == EOF)
- die("%s: end of file\n", filename);
- if (c == '#') {
- /* Ignore comments 'till end of line */
- do {
+ int c, val;
+
+ /* Skip leading whitespace */
+ do {
c = fgetc(fp);
if (c == EOF)
- die("%s: end of file\n", filename);
- } while (c != '\n');
+ die("%s: end of file\n", filename);
+ if (c == '#') {
+ /* Ignore comments 'till end of line */
+ do {
+ c = fgetc(fp);
+ if (c == EOF)
+ die("%s: end of file\n", filename);
+ } while (c != '\n');
+ }
+ } while (isspace(c));
+
+ /* Parse decimal number */
+ val = 0;
+ while (isdigit(c)) {
+ val = 10*val+c-'0';
+ /* some PBM are 'broken'; GiMP for example exports a PBM without space
+ * between the digits. This is Ok cause we know a PBM can only have a '1'
+ * or a '0' for the digit.
+ */
+ if (is_plain_pbm)
+ break;
+ c = fgetc(fp);
+ if (c == EOF)
+ die("%s: end of file\n", filename);
}
- } while (isspace(c));
-
- /* Parse decimal number */
- val = 0;
- while (isdigit(c)) {
- val = 10*val+c-'0';
- /* some PBM are 'broken'; GiMP for example exports a PBM without space
- * between the digits. This is Ok cause we know a PBM can only have a '1'
- * or a '0' for the digit. */
- if (is_plain_pbm)
- break;
- c = fgetc(fp);
- if (c == EOF)
- die("%s: end of file\n", filename);
- }
- return val;
+ return val;
}
static unsigned int get_number255(FILE *fp, unsigned int maxval)
{
- unsigned int val = get_number(fp);
- return (255*val+maxval/2)/maxval;
+ unsigned int val = get_number(fp);
+
+ return (255*val+maxval/2)/maxval;
}
static void read_image(void)
{
- FILE *fp;
- unsigned int i, j;
- int magic;
- unsigned int maxval;
-
- /* open image file */
- fp = fopen(filename, "r");
- if (!fp)
- die("Cannot open file %s: %s\n", filename, strerror(errno));
-
- /* check file type and read file header */
- magic = fgetc(fp);
- if (magic != 'P')
- die("%s is not a PNM file\n", filename);
- magic = fgetc(fp);
- switch (magic) {
+ FILE *fp;
+ unsigned int i, j;
+ int magic;
+ unsigned int maxval;
+
+ /* open image file */
+ fp = fopen(filename, "r");
+ if (!fp)
+ die("Cannot open file %s: %s\n", filename, strerror(errno));
+
+ /* check file type and read file header */
+ magic = fgetc(fp);
+ if (magic != 'P')
+ die("%s is not a PNM file\n", filename);
+ magic = fgetc(fp);
+ switch (magic) {
case '1':
case '2':
case '3':
- /* Plain PBM/PGM/PPM */
- break;
+ /* Plain PBM/PGM/PPM */
+ break;
case '4':
case '5':
case '6':
- /* Binary PBM/PGM/PPM */
- die("%s: Binary PNM is not supported\n"
+ /* Binary PBM/PGM/PPM */
+ die("%s: Binary PNM is not supported\n"
"Use pnmnoraw(1) to convert it to ASCII PNM\n", filename);
default:
- die("%s is not a PNM file\n", filename);
- }
- logo_width = get_number(fp);
- logo_height = get_number(fp);
-
- /* allocate image data */
- logo_data = (struct color **)malloc(logo_height*sizeof(struct color *));
- if (!logo_data)
- die("%s\n", strerror(errno));
- for (i = 0; i < logo_height; i++) {
- logo_data[i] = malloc(logo_width*sizeof(struct color));
+ die("%s is not a PNM file\n", filename);
+ }
+ logo_width = get_number(fp);
+ logo_height = get_number(fp);
+
+ /* allocate image data */
+ logo_data = (struct color **)malloc(logo_height*sizeof(struct color *));
+ if (!logo_data)
+ die("%s\n", strerror(errno));
+ for (i = 0; i < logo_height; i++) {
+ logo_data[i] = malloc(logo_width*sizeof(struct color));
if (!logo_data[i])
- die("%s\n", strerror(errno));
- }
+ die("%s\n", strerror(errno));
+ }
- /* read image data */
- switch (magic) {
+ /* read image data */
+ switch (magic) {
case '1':
- /* Plain PBM */
- is_plain_pbm = 1;
- for (i = 0; i < logo_height; i++)
- for (j = 0; j < logo_width; j++)
- logo_data[i][j].red = logo_data[i][j].green =
- logo_data[i][j].blue = 255*(1-get_number(fp));
- break;
+ /* Plain PBM */
+ is_plain_pbm = 1;
+ for (i = 0; i < logo_height; i++)
+ for (j = 0; j < logo_width; j++)
+ logo_data[i][j].red = logo_data[i][j].green =
+ logo_data[i][j].blue = 255*(1-get_number(fp));
+ break;
case '2':
- /* Plain PGM */
- maxval = get_number(fp);
- for (i = 0; i < logo_height; i++)
- for (j = 0; j < logo_width; j++)
- logo_data[i][j].red = logo_data[i][j].green =
- logo_data[i][j].blue = get_number255(fp, maxval);
- break;
+ /* Plain PGM */
+ maxval = get_number(fp);
+ for (i = 0; i < logo_height; i++)
+ for (j = 0; j < logo_width; j++)
+ logo_data[i][j].red = logo_data[i][j].green =
+ logo_data[i][j].blue = get_number255(fp, maxval);
+ break;
case '3':
- /* Plain PPM */
- maxval = get_number(fp);
- for (i = 0; i < logo_height; i++)
- for (j = 0; j < logo_width; j++) {
- logo_data[i][j].red = get_number255(fp, maxval);
- logo_data[i][j].green = get_number255(fp, maxval);
- logo_data[i][j].blue = get_number255(fp, maxval);
- }
- break;
- }
+ /* Plain PPM */
+ maxval = get_number(fp);
+ for (i = 0; i < logo_height; i++)
+ for (j = 0; j < logo_width; j++) {
+ logo_data[i][j].red = get_number255(fp, maxval);
+ logo_data[i][j].green = get_number255(fp, maxval);
+ logo_data[i][j].blue = get_number255(fp, maxval);
+ }
+ break;
+ }
- /* close file */
- fclose(fp);
+ /* close file */
+ fclose(fp);
}
static inline int is_black(struct color c)
{
- return c.red == 0 && c.green == 0 && c.blue == 0;
+ return c.red == 0 && c.green == 0 && c.blue == 0;
}
static inline int is_white(struct color c)
{
- return c.red == 255 && c.green == 255 && c.blue == 255;
+ return c.red == 255 && c.green == 255 && c.blue == 255;
}
static inline int is_gray(struct color c)
{
- return c.red == c.green && c.red == c.blue;
+ return c.red == c.green && c.red == c.blue;
}
static inline int is_equal(struct color c1, struct color c2)
{
- return c1.red == c2.red && c1.green == c2.green && c1.blue == c2.blue;
+ return c1.red == c2.red && c1.green == c2.green && c1.blue == c2.blue;
}
static void write_header(void)
{
- /* open logo file */
- if (outputname) {
- out = fopen(outputname, "w");
- if (!out)
- die("Cannot create file %s: %s\n", outputname, strerror(errno));
- } else {
- out = stdout;
- }
-
- fputs("/*\n", out);
- fputs(" * DO NOT EDIT THIS FILE!\n", out);
- fputs(" *\n", out);
- fprintf(out, " * It was automatically generated from %s\n", filename);
- fputs(" *\n", out);
- fprintf(out, " * Linux logo %s\n", logoname);
- fputs(" */\n\n", out);
- fputs("#include <linux/linux_logo.h>\n\n", out);
- fprintf(out, "static unsigned char %s_data[] __initdata = {\n",
- logoname);
+ /* open logo file */
+ if (outputname) {
+ out = fopen(outputname, "w");
+ if (!out)
+ die("Cannot create file %s: %s\n", outputname, strerror(errno));
+ } else {
+ out = stdout;
+ }
+
+ fputs("/*\n", out);
+ fputs(" * DO NOT EDIT THIS FILE!\n", out);
+ fputs(" *\n", out);
+ fprintf(out, " * It was automatically generated from %s\n", filename);
+ fputs(" *\n", out);
+ fprintf(out, " * Linux logo %s\n", logoname);
+ fputs(" */\n\n", out);
+ fputs("#include <linux/linux_logo.h>\n\n", out);
+ fprintf(out, "static unsigned char %s_data[] __initdata = {\n",
+ logoname);
}
static void write_footer(void)
{
- fputs("\n};\n\n", out);
- fprintf(out, "const struct linux_logo %s __initconst = {\n", logoname);
- fprintf(out, "\t.type\t\t= %s,\n", logo_types[logo_type]);
- fprintf(out, "\t.width\t\t= %d,\n", logo_width);
- fprintf(out, "\t.height\t\t= %d,\n", logo_height);
- if (logo_type == LINUX_LOGO_CLUT224) {
- fprintf(out, "\t.clutsize\t= %d,\n", logo_clutsize);
- fprintf(out, "\t.clut\t\t= %s_clut,\n", logoname);
- }
- fprintf(out, "\t.data\t\t= %s_data\n", logoname);
- fputs("};\n\n", out);
-
- /* close logo file */
- if (outputname)
- fclose(out);
+ fputs("\n};\n\n", out);
+ fprintf(out, "const struct linux_logo %s __initconst = {\n", logoname);
+ fprintf(out, "\t.type\t\t= %s,\n", logo_types[logo_type]);
+ fprintf(out, "\t.width\t\t= %d,\n", logo_width);
+ fprintf(out, "\t.height\t\t= %d,\n", logo_height);
+ if (logo_type == LINUX_LOGO_CLUT224) {
+ fprintf(out, "\t.clutsize\t= %d,\n", logo_clutsize);
+ fprintf(out, "\t.clut\t\t= %s_clut,\n", logoname);
+ }
+ fprintf(out, "\t.data\t\t= %s_data\n", logoname);
+ fputs("};\n\n", out);
+
+ /* close logo file */
+ if (outputname)
+ fclose(out);
}
static int write_hex_cnt;
static void write_hex(unsigned char byte)
{
- if (write_hex_cnt % 12)
- fprintf(out, ", 0x%02x", byte);
- else if (write_hex_cnt)
- fprintf(out, ",\n\t0x%02x", byte);
- else
- fprintf(out, "\t0x%02x", byte);
- write_hex_cnt++;
+ if (write_hex_cnt % 12)
+ fprintf(out, ", 0x%02x", byte);
+ else if (write_hex_cnt)
+ fprintf(out, ",\n\t0x%02x", byte);
+ else
+ fprintf(out, "\t0x%02x", byte);
+ write_hex_cnt++;
}
static void write_logo_mono(void)
{
- unsigned int i, j;
- unsigned char val, bit;
-
- /* validate image */
- for (i = 0; i < logo_height; i++)
- for (j = 0; j < logo_width; j++)
- if (!is_black(logo_data[i][j]) && !is_white(logo_data[i][j]))
- die("Image must be monochrome\n");
-
- /* write file header */
- write_header();
-
- /* write logo data */
- for (i = 0; i < logo_height; i++) {
- for (j = 0; j < logo_width;) {
- for (val = 0, bit = 0x80; bit && j < logo_width; j++, bit >>= 1)
- if (logo_data[i][j].red)
- val |= bit;
- write_hex(val);
+ unsigned int i, j;
+ unsigned char val, bit;
+
+ /* validate image */
+ for (i = 0; i < logo_height; i++)
+ for (j = 0; j < logo_width; j++)
+ if (!is_black(logo_data[i][j]) && !is_white(logo_data[i][j]))
+ die("Image must be monochrome\n");
+
+ /* write file header */
+ write_header();
+
+ /* write logo data */
+ for (i = 0; i < logo_height; i++) {
+ for (j = 0; j < logo_width;) {
+ for (val = 0, bit = 0x80; bit && j < logo_width; j++, bit >>= 1)
+ if (logo_data[i][j].red)
+ val |= bit;
+ write_hex(val);
+ }
}
- }
- /* write logo structure and file footer */
- write_footer();
+ /* write logo structure and file footer */
+ write_footer();
}
static void write_logo_vga16(void)
{
- unsigned int i, j, k;
- unsigned char val;
-
- /* validate image */
- for (i = 0; i < logo_height; i++)
- for (j = 0; j < logo_width; j++) {
- for (k = 0; k < 16; k++)
- if (is_equal(logo_data[i][j], clut_vga16[k]))
- break;
- if (k == 16)
- die("Image must use the 16 console colors only\n"
- "Use ppmquant(1) -map clut_vga16.ppm to reduce the number "
- "of colors\n");
- }
+ unsigned int i, j, k;
+ unsigned char val;
- /* write file header */
- write_header();
-
- /* write logo data */
- for (i = 0; i < logo_height; i++)
- for (j = 0; j < logo_width; j++) {
- for (k = 0; k < 16; k++)
- if (is_equal(logo_data[i][j], clut_vga16[k]))
- break;
- val = k<<4;
- if (++j < logo_width) {
- for (k = 0; k < 16; k++)
- if (is_equal(logo_data[i][j], clut_vga16[k]))
- break;
- val |= k;
- }
- write_hex(val);
- }
+ /* validate image */
+ for (i = 0; i < logo_height; i++)
+ for (j = 0; j < logo_width; j++) {
+ for (k = 0; k < 16; k++)
+ if (is_equal(logo_data[i][j], clut_vga16[k]))
+ break;
+ if (k == 16)
+ die("Image must use the 16 console colors only\n"
+ "Use ppmquant(1) -map clut_vga16.ppm to reduce the number "
+ "of colors\n");
+ }
- /* write logo structure and file footer */
- write_footer();
+ /* write file header */
+ write_header();
+
+ /* write logo data */
+ for (i = 0; i < logo_height; i++)
+ for (j = 0; j < logo_width; j++) {
+ for (k = 0; k < 16; k++)
+ if (is_equal(logo_data[i][j], clut_vga16[k]))
+ break;
+ val = k<<4;
+ if (++j < logo_width) {
+ for (k = 0; k < 16; k++)
+ if (is_equal(logo_data[i][j], clut_vga16[k]))
+ break;
+ val |= k;
+ }
+ write_hex(val);
+ }
+
+ /* write logo structure and file footer */
+ write_footer();
}
static void write_logo_clut224(void)
{
- unsigned int i, j, k;
-
- /* validate image */
- for (i = 0; i < logo_height; i++)
- for (j = 0; j < logo_width; j++) {
- for (k = 0; k < logo_clutsize; k++)
- if (is_equal(logo_data[i][j], logo_clut[k]))
- break;
- if (k == logo_clutsize) {
- if (logo_clutsize == MAX_LINUX_LOGO_COLORS)
- die("Image has more than %d colors\n"
- "Use ppmquant(1) to reduce the number of colors\n",
- MAX_LINUX_LOGO_COLORS);
- logo_clut[logo_clutsize++] = logo_data[i][j];
- }
- }
+ unsigned int i, j, k;
- /* write file header */
- write_header();
+ /* validate image */
+ for (i = 0; i < logo_height; i++)
+ for (j = 0; j < logo_width; j++) {
+ for (k = 0; k < logo_clutsize; k++)
+ if (is_equal(logo_data[i][j], logo_clut[k]))
+ break;
+ if (k == logo_clutsize) {
+ if (logo_clutsize == MAX_LINUX_LOGO_COLORS)
+ die("Image has more than %d colors\n"
+ "Use ppmquant(1) to reduce the number of colors\n",
+ MAX_LINUX_LOGO_COLORS);
+ logo_clut[logo_clutsize++] = logo_data[i][j];
+ }
+ }
- /* write logo data */
- for (i = 0; i < logo_height; i++)
- for (j = 0; j < logo_width; j++) {
- for (k = 0; k < logo_clutsize; k++)
- if (is_equal(logo_data[i][j], logo_clut[k]))
- break;
- write_hex(k+32);
+ /* write file header */
+ write_header();
+
+ /* write logo data */
+ for (i = 0; i < logo_height; i++)
+ for (j = 0; j < logo_width; j++) {
+ for (k = 0; k < logo_clutsize; k++)
+ if (is_equal(logo_data[i][j], logo_clut[k]))
+ break;
+ write_hex(k+32);
+ }
+ fputs("\n};\n\n", out);
+
+ /* write logo clut */
+ fprintf(out, "static unsigned char %s_clut[] __initdata = {\n",
+ logoname);
+ write_hex_cnt = 0;
+ for (i = 0; i < logo_clutsize; i++) {
+ write_hex(logo_clut[i].red);
+ write_hex(logo_clut[i].green);
+ write_hex(logo_clut[i].blue);
}
- fputs("\n};\n\n", out);
-
- /* write logo clut */
- fprintf(out, "static unsigned char %s_clut[] __initdata = {\n",
- logoname);
- write_hex_cnt = 0;
- for (i = 0; i < logo_clutsize; i++) {
- write_hex(logo_clut[i].red);
- write_hex(logo_clut[i].green);
- write_hex(logo_clut[i].blue);
- }
-
- /* write logo structure and file footer */
- write_footer();
+
+ /* write logo structure and file footer */
+ write_footer();
}
static void write_logo_gray256(void)
{
- unsigned int i, j;
+ unsigned int i, j;
- /* validate image */
- for (i = 0; i < logo_height; i++)
- for (j = 0; j < logo_width; j++)
- if (!is_gray(logo_data[i][j]))
- die("Image must be grayscale\n");
+ /* validate image */
+ for (i = 0; i < logo_height; i++)
+ for (j = 0; j < logo_width; j++)
+ if (!is_gray(logo_data[i][j]))
+ die("Image must be grayscale\n");
- /* write file header */
- write_header();
+ /* write file header */
+ write_header();
- /* write logo data */
- for (i = 0; i < logo_height; i++)
- for (j = 0; j < logo_width; j++)
- write_hex(logo_data[i][j].red);
+ /* write logo data */
+ for (i = 0; i < logo_height; i++)
+ for (j = 0; j < logo_width; j++)
+ write_hex(logo_data[i][j].red);
- /* write logo structure and file footer */
- write_footer();
+ /* write logo structure and file footer */
+ write_footer();
}
static void die(const char *fmt, ...)
{
- va_list ap;
+ va_list ap;
- va_start(ap, fmt);
- vfprintf(stderr, fmt, ap);
- va_end(ap);
+ va_start(ap, fmt);
+ vfprintf(stderr, fmt, ap);
+ va_end(ap);
- exit(1);
+ exit(1);
}
static void usage(void)
{
- die("\n"
+ die("\n"
"Usage: %s [options] <filename>\n"
"\n"
"Valid options:\n"
- " -h : display this usage information\n"
- " -n <name> : specify logo name (default: linux_logo)\n"
- " -o <output> : output to file <output> instead of stdout\n"
- " -t <type> : specify logo type, one of\n"
- " mono : monochrome black/white\n"
- " vga16 : 16 colors VGA text palette\n"
- " clut224 : 224 colors (default)\n"
- " gray256 : 256 levels grayscale\n"
+ " -h : display this usage information\n"
+ " -n <name> : specify logo name (default: linux_logo)\n"
+ " -o <output> : output to file <output> instead of stdout\n"
+ " -t <type> : specify logo type, one of\n"
+ " mono : monochrome black/white\n"
+ " vga16 : 16 colors VGA text palette\n"
+ " clut224 : 224 colors (default)\n"
+ " gray256 : 256 levels grayscale\n"
"\n", programname);
}
int main(int argc, char *argv[])
{
- int opt;
+ int opt;
- programname = argv[0];
+ programname = argv[0];
- opterr = 0;
- while (1) {
- opt = getopt(argc, argv, "hn:o:t:");
- if (opt == -1)
- break;
+ opterr = 0;
+ while (1) {
+ opt = getopt(argc, argv, "hn:o:t:");
+ if (opt == -1)
+ break;
- switch (opt) {
- case 'h':
- usage();
- break;
+ switch (opt) {
+ case 'h':
+ usage();
+ break;
- case 'n':
- logoname = optarg;
- break;
+ case 'n':
+ logoname = optarg;
+ break;
- case 'o':
- outputname = optarg;
- break;
+ case 'o':
+ outputname = optarg;
+ break;
- case 't':
- if (!strcmp(optarg, "mono"))
- logo_type = LINUX_LOGO_MONO;
- else if (!strcmp(optarg, "vga16"))
- logo_type = LINUX_LOGO_VGA16;
- else if (!strcmp(optarg, "clut224"))
- logo_type = LINUX_LOGO_CLUT224;
- else if (!strcmp(optarg, "gray256"))
- logo_type = LINUX_LOGO_GRAY256;
- else
- usage();
- break;
+ case 't':
+ if (!strcmp(optarg, "mono"))
+ logo_type = LINUX_LOGO_MONO;
+ else if (!strcmp(optarg, "vga16"))
+ logo_type = LINUX_LOGO_VGA16;
+ else if (!strcmp(optarg, "clut224"))
+ logo_type = LINUX_LOGO_CLUT224;
+ else if (!strcmp(optarg, "gray256"))
+ logo_type = LINUX_LOGO_GRAY256;
+ else
+ usage();
+ break;
- default:
- usage();
- break;
+ default:
+ usage();
+ break;
+ }
}
- }
- if (optind != argc-1)
- usage();
+ if (optind != argc-1)
+ usage();
- filename = argv[optind];
+ filename = argv[optind];
- read_image();
- switch (logo_type) {
+ read_image();
+ switch (logo_type) {
case LINUX_LOGO_MONO:
- write_logo_mono();
- break;
+ write_logo_mono();
+ break;
case LINUX_LOGO_VGA16:
- write_logo_vga16();
- break;
+ write_logo_vga16();
+ break;
case LINUX_LOGO_CLUT224:
- write_logo_clut224();
- break;
+ write_logo_clut224();
+ break;
case LINUX_LOGO_GRAY256:
- write_logo_gray256();
- break;
- }
- exit(0);
+ write_logo_gray256();
+ break;
+ }
+ exit(0);
}
#define AAD_LEN 48
#define MSG_HDR_VER 1
+#define SNP_REQ_MAX_RETRY_DURATION (60*HZ)
+#define SNP_REQ_RETRY_DELAY (2*HZ)
+
struct snp_guest_crypto {
struct crypto_aead *tfm;
u8 *iv, *authtag;
return __enc_payload(snp_dev, req, payload, sz);
}
-static int handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code, int msg_ver,
- u8 type, void *req_buf, size_t req_sz, void *resp_buf,
- u32 resp_sz, __u64 *fw_err)
+static int __handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code, __u64 *fw_err)
{
- unsigned long err;
- u64 seqno;
+ unsigned long err = 0xff, override_err = 0;
+ unsigned long req_start = jiffies;
+ unsigned int override_npages = 0;
int rc;
- /* Get message sequence and verify that its a non-zero */
- seqno = snp_get_msg_seqno(snp_dev);
- if (!seqno)
- return -EIO;
-
- memset(snp_dev->response, 0, sizeof(struct snp_guest_msg));
-
- /* Encrypt the userspace provided payload */
- rc = enc_payload(snp_dev, seqno, msg_ver, type, req_buf, req_sz);
- if (rc)
- return rc;
-
+retry_request:
/*
* Call firmware to process the request. In this function the encrypted
* message enters shared memory with the host. So after this call the
* prevent reuse of the IV.
*/
rc = snp_issue_guest_request(exit_code, &snp_dev->input, &err);
+ switch (rc) {
+ case -ENOSPC:
+ /*
+ * If the extended guest request fails due to having too
+ * small of a certificate data buffer, retry the same
+ * guest request without the extended data request in
+ * order to increment the sequence number and thus avoid
+ * IV reuse.
+ */
+ override_npages = snp_dev->input.data_npages;
+ exit_code = SVM_VMGEXIT_GUEST_REQUEST;
- /*
- * If the extended guest request fails due to having too small of a
- * certificate data buffer, retry the same guest request without the
- * extended data request in order to increment the sequence number
- * and thus avoid IV reuse.
- */
- if (exit_code == SVM_VMGEXIT_EXT_GUEST_REQUEST &&
- err == SNP_GUEST_REQ_INVALID_LEN) {
- const unsigned int certs_npages = snp_dev->input.data_npages;
-
- exit_code = SVM_VMGEXIT_GUEST_REQUEST;
+ /*
+ * Override the error to inform callers the given extended
+ * request buffer size was too small and give the caller the
+ * required buffer size.
+ */
+ override_err = SNP_GUEST_REQ_INVALID_LEN;
/*
* If this call to the firmware succeeds, the sequence number can
* of the VMPCK and the error code being propagated back to the
* user as an ioctl() return code.
*/
- rc = snp_issue_guest_request(exit_code, &snp_dev->input, &err);
+ goto retry_request;
- /*
- * Override the error to inform callers the given extended
- * request buffer size was too small and give the caller the
- * required buffer size.
- */
- err = SNP_GUEST_REQ_INVALID_LEN;
- snp_dev->input.data_npages = certs_npages;
+ /*
+ * The host may return SNP_GUEST_REQ_ERR_EBUSY if the request has been
+ * throttled. Retry in the driver to avoid returning and reusing the
+ * message sequence number on a different message.
+ */
+ case -EAGAIN:
+ if (jiffies - req_start > SNP_REQ_MAX_RETRY_DURATION) {
+ rc = -ETIMEDOUT;
+ break;
+ }
+ schedule_timeout_killable(SNP_REQ_RETRY_DELAY);
+ goto retry_request;
}
/*
snp_inc_msg_seqno(snp_dev);
if (fw_err)
- *fw_err = err;
+ *fw_err = override_err ?: err;
+
+ if (override_npages)
+ snp_dev->input.data_npages = override_npages;
/*
* If an extended guest request was issued and the supplied certificate
* prevent IV reuse. If the standard request was successful, return -EIO
* back to the caller as would have originally been returned.
*/
- if (!rc && err == SNP_GUEST_REQ_INVALID_LEN)
+ if (!rc && override_err == SNP_GUEST_REQ_INVALID_LEN)
+ return -EIO;
+
+ return rc;
+}
+
+static int handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code, int msg_ver,
+ u8 type, void *req_buf, size_t req_sz, void *resp_buf,
+ u32 resp_sz, __u64 *fw_err)
+{
+ u64 seqno;
+ int rc;
+
+ /* Get message sequence and verify that its a non-zero */
+ seqno = snp_get_msg_seqno(snp_dev);
+ if (!seqno)
return -EIO;
+ memset(snp_dev->response, 0, sizeof(struct snp_guest_msg));
+
+ /* Encrypt the userspace provided payload */
+ rc = enc_payload(snp_dev, seqno, msg_ver, type, req_buf, req_sz);
+ if (rc)
+ return rc;
+
+ rc = __handle_guest_request(snp_dev, exit_code, fw_err);
if (rc) {
- dev_alert(snp_dev->dev,
- "Detected error from ASP request. rc: %d, fw_err: %llu\n",
- rc, *fw_err);
- goto disable_vmpck;
+ if (rc == -EIO && *fw_err == SNP_GUEST_REQ_INVALID_LEN)
+ return rc;
+
+ dev_alert(snp_dev->dev, "Detected error from ASP request. rc: %d, fw_err: %llu\n", rc, *fw_err);
+ snp_disable_vmpck(snp_dev);
+ return rc;
}
rc = verify_and_dec_payload(snp_dev, resp_buf, resp_sz);
if (rc) {
- dev_alert(snp_dev->dev,
- "Detected unexpected decode failure from ASP. rc: %d\n",
- rc);
- goto disable_vmpck;
+ dev_alert(snp_dev->dev, "Detected unexpected decode failure from ASP. rc: %d\n", rc);
+ snp_disable_vmpck(snp_dev);
+ return rc;
}
return 0;
-
-disable_vmpck:
- snp_disable_vmpck(snp_dev);
- return rc;
}
static int get_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg)
void __iomem *mapping;
int ret;
+ if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
+ return -ENODEV;
+
if (!dev->platform_data)
return -ENODEV;
static void *xensyms_start(struct seq_file *m, loff_t *pos)
{
- struct xensyms *xs = (struct xensyms *)m->private;
+ struct xensyms *xs = m->private;
xs->op.u.symdata.symnum = *pos;
static void *xensyms_next(struct seq_file *m, void *p, loff_t *pos)
{
- struct xensyms *xs = (struct xensyms *)m->private;
+ struct xensyms *xs = m->private;
xs->op.u.symdata.symnum = ++(*pos);
static int xensyms_show(struct seq_file *m, void *p)
{
- struct xensyms *xs = (struct xensyms *)m->private;
+ struct xensyms *xs = m->private;
struct xenpf_symdata *symdata = &xs->op.u.symdata;
seq_printf(m, "%016llx %c %s\n", symdata->address,
return ret;
m = file->private_data;
- xs = (struct xensyms *)m->private;
+ xs = m->private;
xs->namelen = XEN_KSYM_NAME_LEN + 1;
xs->name = kzalloc(xs->namelen, GFP_KERNEL);
static int xensyms_release(struct inode *inode, struct file *file)
{
struct seq_file *m = file->private_data;
- struct xensyms *xs = (struct xensyms *)m->private;
+ struct xensyms *xs = m->private;
kfree(xs->name);
return seq_release_private(inode, file);
< block_group->zone_unusable);
WARN_ON(block_group->space_info->disk_total
< block_group->length * factor);
- WARN_ON(test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE,
- &block_group->runtime_flags) &&
- block_group->space_info->active_total_bytes
- < block_group->length);
}
block_group->space_info->total_bytes -= block_group->length;
- if (test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags))
- block_group->space_info->active_total_bytes -= block_group->length;
block_group->space_info->bytes_readonly -=
(block_group->length - block_group->zone_unusable);
block_group->space_info->bytes_zone_unusable -=
spin_unlock(&info->delalloc_root_lock);
while (total) {
+ struct btrfs_space_info *space_info;
bool reclaim = false;
cache = btrfs_lookup_block_group(info, bytenr);
ret = -ENOENT;
break;
}
+ space_info = cache->space_info;
factor = btrfs_bg_type_to_factor(cache->flags);
/*
byte_in_group = bytenr - cache->start;
WARN_ON(byte_in_group > cache->length);
- spin_lock(&cache->space_info->lock);
+ spin_lock(&space_info->lock);
spin_lock(&cache->lock);
if (btrfs_test_opt(info, SPACE_CACHE) &&
old_val += num_bytes;
cache->used = old_val;
cache->reserved -= num_bytes;
- cache->space_info->bytes_reserved -= num_bytes;
- cache->space_info->bytes_used += num_bytes;
- cache->space_info->disk_used += num_bytes * factor;
+ space_info->bytes_reserved -= num_bytes;
+ space_info->bytes_used += num_bytes;
+ space_info->disk_used += num_bytes * factor;
spin_unlock(&cache->lock);
- spin_unlock(&cache->space_info->lock);
+ spin_unlock(&space_info->lock);
} else {
old_val -= num_bytes;
cache->used = old_val;
cache->pinned += num_bytes;
- btrfs_space_info_update_bytes_pinned(info,
- cache->space_info, num_bytes);
- cache->space_info->bytes_used -= num_bytes;
- cache->space_info->disk_used -= num_bytes * factor;
+ btrfs_space_info_update_bytes_pinned(info, space_info,
+ num_bytes);
+ space_info->bytes_used -= num_bytes;
+ space_info->disk_used -= num_bytes * factor;
reclaim = should_reclaim_block_group(cache, num_bytes);
spin_unlock(&cache->lock);
- spin_unlock(&cache->space_info->lock);
+ spin_unlock(&space_info->lock);
set_extent_dirty(&trans->transaction->pinned_extents,
bytenr, bytenr + num_bytes - 1,
bg_reclaim_threshold = READ_ONCE(sinfo->bg_reclaim_threshold);
spin_lock(&ctl->tree_lock);
+ /* Count initial region as zone_unusable until it gets activated. */
if (!used)
to_free = size;
+ else if (initial &&
+ test_bit(BTRFS_FS_ACTIVE_ZONE_TRACKING, &block_group->fs_info->flags) &&
+ (block_group->flags & (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_SYSTEM)))
+ to_free = 0;
else if (initial)
to_free = block_group->zone_capacity;
else if (offset >= block_group->alloc_offset)
reclaimable_unusable = block_group->zone_unusable -
(block_group->length - block_group->zone_capacity);
/* All the region is now unusable. Mark it as unused and reclaim */
- if (block_group->zone_unusable == block_group->length) {
+ if (block_group->zone_unusable == block_group->length &&
+ block_group->alloc_offset) {
btrfs_mark_bg_unused(block_group);
} else if (bg_reclaim_threshold &&
reclaimable_unusable >=
/* Indicate that we want to commit the transaction. */
BTRFS_FS_NEED_TRANS_COMMIT,
- /*
- * Indicate metadata over-commit is disabled. This is set when active
- * zone tracking is needed.
- */
- BTRFS_FS_NO_OVERCOMMIT,
+ /* This is set when active zone tracking is needed. */
+ BTRFS_FS_ACTIVE_ZONE_TRACKING,
/*
* Indicate if we have some features changed, this is mostly for
return -ENOMEM;
ret = fscrypt_setup_filename(&dir->vfs_inode, &dentry->d_name, 1, &fname);
- if (ret)
+ if (ret < 0)
goto out;
+ /*
+ * fscrypt_setup_filename() should never return a positive value, but
+ * gcc on sparc/parisc thinks it can, so assert that doesn't happen.
+ */
+ ASSERT(ret == 0);
/* This needs to handle no-key deletions later on */
ASSERT(found);
spin_lock(&found->lock);
found->total_bytes += block_group->length;
- if (test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags))
- found->active_total_bytes += block_group->length;
found->disk_total += block_group->length * factor;
found->bytes_used += block_group->used;
found->disk_used += block_group->used * factor;
return avail;
}
-static inline u64 writable_total_bytes(struct btrfs_fs_info *fs_info,
- struct btrfs_space_info *space_info)
-{
- /*
- * On regular filesystem, all total_bytes are always writable. On zoned
- * filesystem, there may be a limitation imposed by max_active_zones.
- * For metadata allocation, we cannot finish an existing active block
- * group to avoid a deadlock. Thus, we need to consider only the active
- * groups to be writable for metadata space.
- */
- if (!btrfs_is_zoned(fs_info) || (space_info->flags & BTRFS_BLOCK_GROUP_DATA))
- return space_info->total_bytes;
-
- return space_info->active_total_bytes;
-}
-
int btrfs_can_overcommit(struct btrfs_fs_info *fs_info,
struct btrfs_space_info *space_info, u64 bytes,
enum btrfs_reserve_flush_enum flush)
return 0;
used = btrfs_space_info_used(space_info, true);
- if (test_bit(BTRFS_FS_NO_OVERCOMMIT, &fs_info->flags) &&
+ if (test_bit(BTRFS_FS_ACTIVE_ZONE_TRACKING, &fs_info->flags) &&
(space_info->flags & BTRFS_BLOCK_GROUP_METADATA))
avail = 0;
else
avail = calc_available_free_space(fs_info, space_info, flush);
- if (used + bytes < writable_total_bytes(fs_info, space_info) + avail)
+ if (used + bytes < space_info->total_bytes + avail)
return 1;
return 0;
}
ticket = list_first_entry(head, struct reserve_ticket, list);
/* Check and see if our ticket can be satisfied now. */
- if ((used + ticket->bytes <= writable_total_bytes(fs_info, space_info)) ||
+ if ((used + ticket->bytes <= space_info->total_bytes) ||
btrfs_can_overcommit(fs_info, space_info, ticket->bytes,
flush)) {
btrfs_space_info_update_bytes_may_use(fs_info,
{
u64 used;
u64 avail;
- u64 total;
u64 to_reclaim = space_info->reclaim_size;
lockdep_assert_held(&space_info->lock);
* space. If that's the case add in our overage so we make sure to put
* appropriate pressure on the flushing state machine.
*/
- total = writable_total_bytes(fs_info, space_info);
- if (total + avail < used)
- to_reclaim += used - (total + avail);
+ if (space_info->total_bytes + avail < used)
+ to_reclaim += used - (space_info->total_bytes + avail);
return to_reclaim;
}
{
u64 global_rsv_size = fs_info->global_block_rsv.reserved;
u64 ordered, delalloc;
- u64 total = writable_total_bytes(fs_info, space_info);
u64 thresh;
u64 used;
- thresh = mult_perc(total, 90);
+ thresh = mult_perc(space_info->total_bytes, 90);
lockdep_assert_held(&space_info->lock);
BTRFS_RESERVE_FLUSH_ALL);
used = space_info->bytes_used + space_info->bytes_reserved +
space_info->bytes_readonly + global_rsv_size;
- if (used < total)
- thresh += total - used;
+ if (used < space_info->total_bytes)
+ thresh += space_info->total_bytes - used;
thresh >>= space_info->clamp;
used = space_info->bytes_pinned;
* can_overcommit() to ensure we can overcommit to continue.
*/
if (!pending_tickets &&
- ((used + orig_bytes <= writable_total_bytes(fs_info, space_info)) ||
+ ((used + orig_bytes <= space_info->total_bytes) ||
btrfs_can_overcommit(fs_info, space_info, orig_bytes, flush))) {
btrfs_space_info_update_bytes_may_use(fs_info, space_info,
orig_bytes);
*/
if (ret && unlikely(flush == BTRFS_RESERVE_FLUSH_EMERGENCY)) {
used = btrfs_space_info_used(space_info, false);
- if (used + orig_bytes <=
- writable_total_bytes(fs_info, space_info)) {
+ if (used + orig_bytes <= space_info->total_bytes) {
btrfs_space_info_update_bytes_may_use(fs_info, space_info,
orig_bytes);
ret = 0;
u64 bytes_may_use; /* number of bytes that may be used for
delalloc/allocations */
u64 bytes_readonly; /* total bytes that are read only */
- /* Total bytes in the space, but only accounts active block groups. */
- u64 active_total_bytes;
u64 bytes_zone_unusable; /* total bytes that are unusable until
resetting the device zone */
ASSERT(op != BTRFS_MAP_DISCARD);
em = btrfs_get_chunk_map(fs_info, logical, *length);
- ASSERT(!IS_ERR(em));
+ if (IS_ERR(em))
+ return PTR_ERR(em);
map = em->map_lookup;
data_stripes = nr_data_stripes(map);
}
atomic_set(&zone_info->active_zones_left,
max_active_zones - nactive);
- /* Overcommit does not work well with active zone tacking. */
- set_bit(BTRFS_FS_NO_OVERCOMMIT, &fs_info->flags);
+ set_bit(BTRFS_FS_ACTIVE_ZONE_TRACKING, &fs_info->flags);
}
/* Validate superblock log */
return;
WARN_ON(cache->bytes_super != 0);
- unusable = (cache->alloc_offset - cache->used) +
- (cache->length - cache->zone_capacity);
- free = cache->zone_capacity - cache->alloc_offset;
+
+ /* Check for block groups never get activated */
+ if (test_bit(BTRFS_FS_ACTIVE_ZONE_TRACKING, &cache->fs_info->flags) &&
+ cache->flags & (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_SYSTEM) &&
+ !test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &cache->runtime_flags) &&
+ cache->alloc_offset == 0) {
+ unusable = cache->length;
+ free = 0;
+ } else {
+ unusable = (cache->alloc_offset - cache->used) +
+ (cache->length - cache->zone_capacity);
+ free = cache->zone_capacity - cache->alloc_offset;
+ }
/* We only need ->free_space in ALLOC_SEQ block groups */
cache->cached = BTRFS_CACHE_FINISHED;
/* Successfully activated all the zones */
set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags);
- space_info->active_total_bytes += block_group->length;
+ WARN_ON(block_group->alloc_offset != 0);
+ if (block_group->zone_unusable == block_group->length) {
+ block_group->zone_unusable = block_group->length - block_group->zone_capacity;
+ space_info->bytes_zone_unusable -= block_group->zone_capacity;
+ }
spin_unlock(&block_group->lock);
btrfs_try_granting_tickets(fs_info, space_info);
spin_unlock(&space_info->lock);
if (!device->bdev)
continue;
- if (!zinfo->max_active_zones ||
- atomic_read(&zinfo->active_zones_left)) {
+ if (!zinfo->max_active_zones) {
ret = true;
break;
}
+
+ switch (flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
+ case 0: /* single */
+ ret = (atomic_read(&zinfo->active_zones_left) >= 1);
+ break;
+ case BTRFS_BLOCK_GROUP_DUP:
+ ret = (atomic_read(&zinfo->active_zones_left) >= 2);
+ break;
+ }
+ if (ret)
+ break;
}
mutex_unlock(&fs_info->chunk_mutex);
u64 avail;
spin_lock(&block_group->lock);
- if (block_group->reserved ||
+ if (block_group->reserved || block_group->alloc_offset == 0 ||
(block_group->flags & BTRFS_BLOCK_GROUP_SYSTEM)) {
spin_unlock(&block_group->lock);
continue;
if (!btrfs_is_zoned(fs_info) || (space_info->flags & BTRFS_BLOCK_GROUP_DATA))
return 0;
- /* No more block groups to activate */
- if (space_info->active_total_bytes == space_info->total_bytes)
- return 0;
-
for (;;) {
int ret;
bool need_finish = false;
return dentry;
}
+static const char *path_no_prefix(struct cifs_sb_info *cifs_sb,
+ const char *path)
+{
+ size_t len = 0;
+
+ if (!*path)
+ return path;
+
+ if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) &&
+ cifs_sb->prepath) {
+ len = strlen(cifs_sb->prepath) + 1;
+ if (unlikely(len > strlen(path)))
+ return ERR_PTR(-EINVAL);
+ }
+ return path + len;
+}
+
/*
* Open the and cache a directory handle.
* If error then *cfid is not initialized.
struct dentry *dentry = NULL;
struct cached_fid *cfid;
struct cached_fids *cfids;
+ const char *npath;
if (tcon == NULL || tcon->cfids == NULL || tcon->nohandlecache ||
is_smb1_server(tcon->ses->server))
}
/*
+ * Skip any prefix paths in @path as lookup_positive_unlocked() ends up
+ * calling ->lookup() which already adds those through
+ * build_path_from_dentry(). Also, do it earlier as we might reconnect
+ * below when trying to send compounded request and then potentially
+ * having a different prefix path (e.g. after DFS failover).
+ */
+ npath = path_no_prefix(cifs_sb, path);
+ if (IS_ERR(npath)) {
+ rc = PTR_ERR(npath);
+ kfree(utf16_path);
+ return rc;
+ }
+
+ /*
* We do not hold the lock for the open because in case
* SMB2_open needs to reconnect.
* This is safe because no other thread will be able to get a ref
oparms = (struct cifs_open_parms) {
.tcon = tcon,
+ .path = path,
.create_options = cifs_create_options(cifs_sb, CREATE_NOT_FILE),
.desired_access = FILE_READ_ATTRIBUTES,
.disposition = FILE_OPEN,
(char *)&cfid->file_all_info))
cfid->file_all_info_is_valid = true;
- if (!path[0])
+ if (!npath[0])
dentry = dget(cifs_sb->root);
else {
- dentry = path_to_dentry(cifs_sb, path);
+ dentry = path_to_dentry(cifs_sb, npath);
if (IS_ERR(dentry)) {
rc = -ENOENT;
goto oshr_free;
seq_puts(m, "# Version:1\n");
seq_puts(m, "# Format:\n");
- seq_puts(m, "# <tree id> <persistent fid> <flags> <count> <pid> <uid>");
+ seq_puts(m, "# <tree id> <ses id> <persistent fid> <flags> <count> <pid> <uid>");
#ifdef CONFIG_CIFS_DEBUG2
seq_printf(m, " <filename> <mid>\n");
#else
spin_lock(&tcon->open_file_lock);
list_for_each_entry(cfile, &tcon->openFileList, tlist) {
seq_printf(m,
- "0x%x 0x%llx 0x%x %d %d %d %pd",
+ "0x%x 0x%llx 0x%llx 0x%x %d %d %d %pd",
tcon->tid,
+ ses->Suid,
cfile->fid.persistent_fid,
cfile->f_flags,
cfile->count,
{
struct mid_q_entry *mid_entry;
struct TCP_Server_Info *server;
+ struct TCP_Server_Info *chan_server;
struct cifs_ses *ses;
struct cifs_tcon *tcon;
struct cifs_server_iface *iface;
from_kuid(&init_user_ns, ses->linux_uid),
from_kuid(&init_user_ns, ses->cred_uid));
+ if (ses->dfs_root_ses) {
+ seq_printf(m, "\n\tDFS root session id: 0x%llx",
+ ses->dfs_root_ses->Suid);
+ }
+
spin_lock(&ses->chan_lock);
if (CIFS_CHAN_NEEDS_RECONNECT(ses, 0))
seq_puts(m, "\tPrimary channel: DISCONNECTED ");
seq_puts(m, "\t\t[CONNECTED]\n");
}
spin_unlock(&ses->iface_lock);
+
+ seq_puts(m, "\n\n\tMIDs: ");
+ spin_lock(&ses->chan_lock);
+ for (j = 0; j < ses->chan_count; j++) {
+ chan_server = ses->chans[j].server;
+ if (!chan_server)
+ continue;
+
+ if (list_empty(&chan_server->pending_mid_q))
+ continue;
+
+ seq_printf(m, "\n\tServer ConnectionId: 0x%llx",
+ chan_server->conn_id);
+ spin_lock(&chan_server->mid_lock);
+ list_for_each_entry(mid_entry, &chan_server->pending_mid_q, qhead) {
+ seq_printf(m, "\n\t\tState: %d com: %d pid: %d cbdata: %p mid %llu",
+ mid_entry->mid_state,
+ le16_to_cpu(mid_entry->command),
+ mid_entry->pid,
+ mid_entry->callback_data,
+ mid_entry->mid);
+ }
+ spin_unlock(&chan_server->mid_lock);
+ }
+ spin_unlock(&ses->chan_lock);
+ seq_puts(m, "\n--\n");
}
if (i == 0)
seq_printf(m, "\n\t\t[NONE]");
-
- seq_puts(m, "\n\n\tMIDs: ");
- spin_lock(&server->mid_lock);
- list_for_each_entry(mid_entry, &server->pending_mid_q, qhead) {
- seq_printf(m, "\n\tState: %d com: %d pid:"
- " %d cbdata: %p mid %llu\n",
- mid_entry->mid_state,
- le16_to_cpu(mid_entry->command),
- mid_entry->pid,
- mid_entry->callback_data,
- mid_entry->mid);
- }
- spin_unlock(&server->mid_lock);
- seq_printf(m, "\n--\n");
}
if (c == 0)
seq_printf(m, "\n\t[NONE]");
tmp.source = full_path;
tmp.leaf_fullpath = NULL;
tmp.UNC = tmp.prepath = NULL;
+ tmp.dfs_root_ses = NULL;
rc = smb3_fs_context_dup(ctx, &tmp);
if (rc) {
/* only used when CIFS_MOUNT_USE_PREFIX_PATH is set */
char *prepath;
- /* randomly generated 128-bit number for indexing dfs mount groups in referral cache */
- uuid_t dfs_mount_id;
/*
* Indicate whether serverino option was turned off later
* (cifs_autodisable_serverino) in order to match new mounts.
spin_lock(&tcon->tc_lock);
if ((tcon->tc_count > 1) || (tcon->status == TID_EXITING)) {
/* we have other mounts to same share or we have
- already tried to force umount this and woken up
+ already tried to umount this and woken up
all waiting network requests, nothing to do */
spin_unlock(&tcon->tc_lock);
spin_unlock(&cifs_tcp_ses_lock);
return;
- } else if (tcon->tc_count == 1)
- tcon->status = TID_EXITING;
+ }
+ /*
+ * can not set tcon->status to TID_EXITING yet since we don't know if umount -f will
+ * fail later (e.g. due to open files). TID_EXITING will be set just before tdis req sent
+ */
spin_unlock(&tcon->tc_lock);
spin_unlock(&cifs_tcp_ses_lock);
/* BB add field for back pointer to sb struct(s)? */
#ifdef CONFIG_CIFS_DFS_UPCALL
struct list_head ulist; /* cache update list */
+ struct list_head dfs_ses_list;
#endif
struct delayed_work query_interfaces; /* query interfaces workqueue job */
};
struct TCP_Server_Info *server;
struct cifs_ses *ses;
struct cifs_tcon *tcon;
- struct cifs_ses *root_ses;
- uuid_t mount_id;
char *origin_fullpath, *leaf_fullpath;
+ struct list_head dfs_ses_list;
};
static inline void free_dfs_info_param(struct dfs_info3_param *param)
/*
* only tree disconnect, open, and write, (and ulogoff which does not
- * have tcon) are allowed as we start force umount
+ * have tcon) are allowed as we start umount
*/
spin_lock(&tcon->tc_lock);
if (tcon->status == TID_EXITING) {
- if (smb_command != SMB_COM_WRITE_ANDX &&
- smb_command != SMB_COM_OPEN_ANDX &&
- smb_command != SMB_COM_TREE_DISCONNECT) {
+ if (smb_command != SMB_COM_TREE_DISCONNECT) {
spin_unlock(&tcon->tc_lock);
cifs_dbg(FYI, "can not send cmd %d while umounting\n",
smb_command);
cifs_chan_update_iface(ses, server);
spin_lock(&ses->chan_lock);
- if (!mark_smb_session && cifs_chan_needs_reconnect(ses, server))
- goto next_session;
+ if (!mark_smb_session && cifs_chan_needs_reconnect(ses, server)) {
+ spin_unlock(&ses->chan_lock);
+ continue;
+ }
if (mark_smb_session)
CIFS_SET_ALL_CHANS_NEED_RECONNECT(ses);
else
cifs_chan_set_need_reconnect(ses, server);
+ cifs_dbg(FYI, "%s: channel connect bitmap: 0x%lx\n",
+ __func__, ses->chans_need_reconnect);
+
/* If all channels need reconnect, then tcon needs reconnect */
- if (!mark_smb_session && !CIFS_ALL_CHANS_NEED_RECONNECT(ses))
- goto next_session;
+ if (!mark_smb_session && !CIFS_ALL_CHANS_NEED_RECONNECT(ses)) {
+ spin_unlock(&ses->chan_lock);
+ continue;
+ }
+ spin_unlock(&ses->chan_lock);
+ spin_lock(&ses->ses_lock);
ses->ses_status = SES_NEED_RECON;
+ spin_unlock(&ses->ses_lock);
list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
tcon->need_reconnect = true;
+ spin_lock(&tcon->tc_lock);
tcon->status = TID_NEED_RECON;
+ spin_unlock(&tcon->tc_lock);
}
if (ses->tcon_ipc) {
ses->tcon_ipc->need_reconnect = true;
+ spin_lock(&ses->tcon_ipc->tc_lock);
ses->tcon_ipc->status = TID_NEED_RECON;
+ spin_unlock(&ses->tcon_ipc->tc_lock);
}
-
-next_session:
- spin_unlock(&ses->chan_lock);
}
spin_unlock(&cifs_tcp_ses_lock);
}
return ERR_PTR(rc);
}
-/* this function must be called with ses_lock held */
+/* this function must be called with ses_lock and chan_lock held */
static int match_session(struct cifs_ses *ses, struct smb3_fs_context *ctx)
{
if (ctx->sectype != Unspecified &&
* If an existing session is limited to less channels than
* requested, it should not be reused
*/
- spin_lock(&ses->chan_lock);
- if (ses->chan_max < ctx->max_channels) {
- spin_unlock(&ses->chan_lock);
+ if (ses->chan_max < ctx->max_channels)
return 0;
- }
- spin_unlock(&ses->chan_lock);
switch (ses->sectype) {
case Kerberos:
spin_unlock(&ses->ses_lock);
continue;
}
+ spin_lock(&ses->chan_lock);
if (!match_session(ses, ctx)) {
+ spin_unlock(&ses->chan_lock);
spin_unlock(&ses->ses_lock);
continue;
}
+ spin_unlock(&ses->chan_lock);
spin_unlock(&ses->ses_lock);
++ses->ses_count;
* need to lock before changing something in the session.
*/
spin_lock(&cifs_tcp_ses_lock);
+ ses->dfs_root_ses = ctx->dfs_root_ses;
list_add(&ses->smb_ses_list, &server->smb_ses_list);
spin_unlock(&cifs_tcp_ses_lock);
WARN_ON(tcon->tc_count < 0);
list_del_init(&tcon->tcon_list);
+ tcon->status = TID_EXITING;
spin_unlock(&tcon->tc_lock);
spin_unlock(&cifs_tcp_ses_lock);
spin_lock(&tcp_srv->srv_lock);
spin_lock(&ses->ses_lock);
+ spin_lock(&ses->chan_lock);
spin_lock(&tcon->tc_lock);
if (!match_server(tcp_srv, ctx, dfs_super_cmp) ||
!match_session(ses, ctx) ||
rc = compare_mount_options(sb, mnt_data);
out:
spin_unlock(&tcon->tc_lock);
+ spin_unlock(&ses->chan_lock);
spin_unlock(&ses->ses_lock);
spin_unlock(&tcp_srv->srv_lock);
bool isdfs;
int rc;
- uuid_gen(&mnt_ctx.mount_id);
+ INIT_LIST_HEAD(&mnt_ctx.dfs_ses_list);
+
rc = dfs_mount_share(&mnt_ctx, &isdfs);
if (rc)
goto error;
kfree(cifs_sb->prepath);
cifs_sb->prepath = ctx->prepath;
ctx->prepath = NULL;
- uuid_copy(&cifs_sb->dfs_mount_id, &mnt_ctx.mount_id);
out:
cifs_try_adding_channels(cifs_sb, mnt_ctx.ses);
return rc;
error:
- dfs_cache_put_refsrv_sessions(&mnt_ctx.mount_id);
+ dfs_put_root_smb_sessions(&mnt_ctx.dfs_ses_list);
kfree(mnt_ctx.origin_fullpath);
kfree(mnt_ctx.leaf_fullpath);
cifs_mount_put_conns(&mnt_ctx);
spin_unlock(&cifs_sb->tlink_tree_lock);
kfree(cifs_sb->prepath);
-#ifdef CONFIG_CIFS_DFS_UPCALL
- dfs_cache_put_refsrv_sessions(&cifs_sb->dfs_mount_id);
-#endif
call_rcu(&cifs_sb->rcu, delayed_free);
}
/* only send once per connect */
spin_lock(&server->srv_lock);
- if (!server->ops->need_neg(server) ||
+ if (server->tcpStatus != CifsGood &&
+ server->tcpStatus != CifsNew &&
server->tcpStatus != CifsNeedNegotiate) {
spin_unlock(&server->srv_lock);
+ return -EHOSTDOWN;
+ }
+
+ if (!server->ops->need_neg(server) &&
+ server->tcpStatus == CifsGood) {
+ spin_unlock(&server->srv_lock);
return 0;
}
+
server->tcpStatus = CifsInNegotiate;
spin_unlock(&server->srv_lock);
bool is_binding = false;
spin_lock(&ses->ses_lock);
+ cifs_dbg(FYI, "%s: channel connect bitmap: 0x%lx\n",
+ __func__, ses->chans_need_reconnect);
+
if (ses->ses_status != SES_GOOD &&
ses->ses_status != SES_NEW &&
ses->ses_status != SES_NEED_RECON) {
spin_unlock(&ses->ses_lock);
- return 0;
+ return -EHOSTDOWN;
}
/* only send once per connect */
spin_lock(&ses->chan_lock);
- if (CIFS_ALL_CHANS_GOOD(ses) ||
- cifs_chan_in_reconnect(ses, server)) {
+ if (CIFS_ALL_CHANS_GOOD(ses)) {
+ if (ses->ses_status == SES_NEED_RECON)
+ ses->ses_status = SES_GOOD;
spin_unlock(&ses->chan_lock);
spin_unlock(&ses->ses_lock);
return 0;
}
- is_binding = !CIFS_ALL_CHANS_NEED_RECONNECT(ses);
+
cifs_chan_set_in_reconnect(ses, server);
+ is_binding = !CIFS_ALL_CHANS_NEED_RECONNECT(ses);
spin_unlock(&ses->chan_lock);
if (!is_binding)
/* only send once per connect */
spin_lock(&tcon->tc_lock);
- if (tcon->ses->ses_status != SES_GOOD ||
- (tcon->status != TID_NEW &&
- tcon->status != TID_NEED_TCON)) {
+ if (tcon->status != TID_NEW &&
+ tcon->status != TID_NEED_TCON) {
+ spin_unlock(&tcon->tc_lock);
+ return -EHOSTDOWN;
+ }
+
+ if (tcon->status == TID_GOOD) {
spin_unlock(&tcon->tc_lock);
return 0;
}
ctx->leaf_fullpath = (char *)full_path;
rc = cifs_mount_get_session(mnt_ctx);
ctx->leaf_fullpath = NULL;
- if (!rc) {
- struct cifs_ses *ses = mnt_ctx->ses;
- mutex_lock(&ses->session_mutex);
- ses->dfs_root_ses = mnt_ctx->root_ses;
- mutex_unlock(&ses->session_mutex);
- }
return rc;
}
-static void set_root_ses(struct cifs_mount_ctx *mnt_ctx)
+static int get_root_smb_session(struct cifs_mount_ctx *mnt_ctx)
{
- if (mnt_ctx->ses) {
+ struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
+ struct dfs_root_ses *root_ses;
+ struct cifs_ses *ses = mnt_ctx->ses;
+
+ if (ses) {
+ root_ses = kmalloc(sizeof(*root_ses), GFP_KERNEL);
+ if (!root_ses)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&root_ses->list);
+
spin_lock(&cifs_tcp_ses_lock);
- mnt_ctx->ses->ses_count++;
+ ses->ses_count++;
spin_unlock(&cifs_tcp_ses_lock);
- dfs_cache_add_refsrv_session(&mnt_ctx->mount_id, mnt_ctx->ses);
+ root_ses->ses = ses;
+ list_add_tail(&root_ses->list, &mnt_ctx->dfs_ses_list);
}
- mnt_ctx->root_ses = mnt_ctx->ses;
+ ctx->dfs_root_ses = ses;
+ return 0;
}
static int get_dfs_conn(struct cifs_mount_ctx *mnt_ctx, const char *ref_path, const char *full_path,
{
struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
struct dfs_info3_param ref = {};
- int rc;
+ bool is_refsrv = false;
+ int rc, rc2;
rc = dfs_cache_get_tgt_referral(ref_path + 1, tit, &ref);
if (rc)
if (rc)
goto out;
- if (ref.flags & DFSREF_REFERRAL_SERVER)
- set_root_ses(mnt_ctx);
+ is_refsrv = !!(ref.flags & DFSREF_REFERRAL_SERVER);
rc = -EREMOTE;
if (ref.flags & DFSREF_STORAGE_SERVER) {
goto out;
/* some servers may not advertise referral capability under ref.flags */
- if (!(ref.flags & DFSREF_REFERRAL_SERVER) &&
- is_tcon_dfs(mnt_ctx->tcon))
- set_root_ses(mnt_ctx);
+ is_refsrv |= is_tcon_dfs(mnt_ctx->tcon);
rc = cifs_is_path_remote(mnt_ctx);
}
+ if (rc == -EREMOTE && is_refsrv) {
+ rc2 = get_root_smb_session(mnt_ctx);
+ if (rc2)
+ rc = rc2;
+ }
+
out:
free_dfs_info_param(&ref);
return rc;
char *ref_path = NULL, *full_path = NULL;
struct dfs_cache_tgt_iterator *tit;
struct TCP_Server_Info *server;
+ struct cifs_tcon *tcon;
char *origin_fullpath = NULL;
int num_links = 0;
int rc;
if (!rc) {
server = mnt_ctx->server;
+ tcon = mnt_ctx->tcon;
mutex_lock(&server->refpath_lock);
- server->origin_fullpath = origin_fullpath;
- server->current_fullpath = server->leaf_fullpath;
+ if (!server->origin_fullpath) {
+ server->origin_fullpath = origin_fullpath;
+ server->current_fullpath = server->leaf_fullpath;
+ origin_fullpath = NULL;
+ }
mutex_unlock(&server->refpath_lock);
- origin_fullpath = NULL;
+
+ if (list_empty(&tcon->dfs_ses_list)) {
+ list_replace_init(&mnt_ctx->dfs_ses_list,
+ &tcon->dfs_ses_list);
+ } else {
+ dfs_put_root_smb_sessions(&mnt_ctx->dfs_ses_list);
+ }
}
out:
rc = get_session(mnt_ctx, NULL);
if (rc)
return rc;
- mnt_ctx->root_ses = mnt_ctx->ses;
+ ctx->dfs_root_ses = mnt_ctx->ses;
/*
* If called with 'nodfs' mount option, then skip DFS resolving. Otherwise unconditionally
* try to get an DFS referral (even cached) to determine whether it is an DFS mount.
}
*isdfs = true;
- set_root_ses(mnt_ctx);
+ rc = get_root_smb_session(mnt_ctx);
+ if (rc)
+ return rc;
return __dfs_mount_share(mnt_ctx);
}
/* only send once per connect */
spin_lock(&tcon->tc_lock);
- if (tcon->ses->ses_status != SES_GOOD ||
- (tcon->status != TID_NEW &&
- tcon->status != TID_NEED_TCON)) {
+ if (tcon->status != TID_NEW &&
+ tcon->status != TID_NEED_TCON) {
+ spin_unlock(&tcon->tc_lock);
+ return -EHOSTDOWN;
+ }
+
+ if (tcon->status == TID_GOOD) {
spin_unlock(&tcon->tc_lock);
return 0;
}
#include "fs_context.h"
#include "cifs_unicode.h"
+struct dfs_root_ses {
+ struct list_head list;
+ struct cifs_ses *ses;
+};
+
int dfs_parse_target_referral(const char *full_path, const struct dfs_info3_param *ref,
struct smb3_fs_context *ctx);
int dfs_mount_share(struct cifs_mount_ctx *mnt_ctx, bool *isdfs);
static inline int dfs_get_referral(struct cifs_mount_ctx *mnt_ctx, const char *path,
struct dfs_info3_param *ref, struct dfs_cache_tgt_list *tl)
{
+ struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
struct cifs_sb_info *cifs_sb = mnt_ctx->cifs_sb;
- return dfs_cache_find(mnt_ctx->xid, mnt_ctx->root_ses, cifs_sb->local_nls,
+ return dfs_cache_find(mnt_ctx->xid, ctx->dfs_root_ses, cifs_sb->local_nls,
cifs_remap(cifs_sb), path, ref, tl);
}
true);
}
+static inline void dfs_put_root_smb_sessions(struct list_head *head)
+{
+ struct dfs_root_ses *root, *tmp;
+
+ list_for_each_entry_safe(root, tmp, head, list) {
+ list_del_init(&root->list);
+ cifs_put_smb_ses(root->ses);
+ kfree(root);
+ }
+}
+
#endif /* _CIFS_DFS_H */
struct cache_dfs_tgt *tgthint;
};
-/* List of referral server sessions per dfs mount */
-struct mount_group {
- struct list_head list;
- uuid_t id;
- struct cifs_ses *sessions[CACHE_MAX_ENTRIES];
- int num_sessions;
- spinlock_t lock;
- struct list_head refresh_list;
- struct kref refcount;
-};
-
static struct kmem_cache *cache_slab __read_mostly;
static struct workqueue_struct *dfscache_wq __read_mostly;
static struct hlist_head cache_htable[CACHE_HTABLE_SIZE];
static DECLARE_RWSEM(htable_rw_lock);
-static LIST_HEAD(mount_group_list);
-static DEFINE_MUTEX(mount_group_list_lock);
-
static void refresh_cache_worker(struct work_struct *work);
static DECLARE_DELAYED_WORK(refresh_task, refresh_cache_worker);
-static void __mount_group_release(struct mount_group *mg)
-{
- int i;
-
- for (i = 0; i < mg->num_sessions; i++)
- cifs_put_smb_ses(mg->sessions[i]);
- kfree(mg);
-}
-
-static void mount_group_release(struct kref *kref)
-{
- struct mount_group *mg = container_of(kref, struct mount_group, refcount);
-
- mutex_lock(&mount_group_list_lock);
- list_del(&mg->list);
- mutex_unlock(&mount_group_list_lock);
- __mount_group_release(mg);
-}
-
-static struct mount_group *find_mount_group_locked(const uuid_t *id)
-{
- struct mount_group *mg;
-
- list_for_each_entry(mg, &mount_group_list, list) {
- if (uuid_equal(&mg->id, id))
- return mg;
- }
- return ERR_PTR(-ENOENT);
-}
-
-static struct mount_group *__get_mount_group_locked(const uuid_t *id)
-{
- struct mount_group *mg;
-
- mg = find_mount_group_locked(id);
- if (!IS_ERR(mg))
- return mg;
-
- mg = kmalloc(sizeof(*mg), GFP_KERNEL);
- if (!mg)
- return ERR_PTR(-ENOMEM);
- kref_init(&mg->refcount);
- uuid_copy(&mg->id, id);
- mg->num_sessions = 0;
- spin_lock_init(&mg->lock);
- list_add(&mg->list, &mount_group_list);
- return mg;
-}
-
-static struct mount_group *get_mount_group(const uuid_t *id)
-{
- struct mount_group *mg;
-
- mutex_lock(&mount_group_list_lock);
- mg = __get_mount_group_locked(id);
- if (!IS_ERR(mg))
- kref_get(&mg->refcount);
- mutex_unlock(&mount_group_list_lock);
-
- return mg;
-}
-
-static void free_mount_group_list(void)
-{
- struct mount_group *mg, *tmp_mg;
-
- list_for_each_entry_safe(mg, tmp_mg, &mount_group_list, list) {
- list_del_init(&mg->list);
- __mount_group_release(mg);
- }
-}
-
/**
* dfs_cache_canonical_path - get a canonical DFS path
*
{
cancel_delayed_work_sync(&refresh_task);
unload_nls(cache_cp);
- free_mount_group_list();
flush_cache_ents();
kmem_cache_destroy(cache_slab);
destroy_workqueue(dfscache_wq);
return rc;
}
-/**
- * dfs_cache_add_refsrv_session - add SMB session of referral server
- *
- * @mount_id: mount group uuid to lookup.
- * @ses: reference counted SMB session of referral server.
- */
-void dfs_cache_add_refsrv_session(const uuid_t *mount_id, struct cifs_ses *ses)
-{
- struct mount_group *mg;
-
- if (WARN_ON_ONCE(!mount_id || uuid_is_null(mount_id) || !ses))
- return;
-
- mg = get_mount_group(mount_id);
- if (WARN_ON_ONCE(IS_ERR(mg)))
- return;
-
- spin_lock(&mg->lock);
- if (mg->num_sessions < ARRAY_SIZE(mg->sessions))
- mg->sessions[mg->num_sessions++] = ses;
- spin_unlock(&mg->lock);
- kref_put(&mg->refcount, mount_group_release);
-}
-
-/**
- * dfs_cache_put_refsrv_sessions - put all referral server sessions
- *
- * Put all SMB sessions from the given mount group id.
- *
- * @mount_id: mount group uuid to lookup.
- */
-void dfs_cache_put_refsrv_sessions(const uuid_t *mount_id)
-{
- struct mount_group *mg;
-
- if (!mount_id || uuid_is_null(mount_id))
- return;
-
- mutex_lock(&mount_group_list_lock);
- mg = find_mount_group_locked(mount_id);
- if (IS_ERR(mg)) {
- mutex_unlock(&mount_group_list_lock);
- return;
- }
- mutex_unlock(&mount_group_list_lock);
- kref_put(&mg->refcount, mount_group_release);
-}
-
/* Extract share from DFS target and return a pointer to prefix path or NULL */
static const char *parse_target_share(const char *target, char **share)
{
}
spin_lock(&ipc->tc_lock);
- if (ses->ses_status != SES_GOOD || ipc->status != TID_GOOD) {
+ if (ipc->status != TID_GOOD) {
spin_unlock(&ipc->tc_lock);
cifs_dbg(FYI, "%s: skip cache refresh due to disconnected ipc\n", __func__);
goto out;
cifs_dbg(FYI, "%s: not a dfs mount\n", __func__);
return 0;
}
-
- if (uuid_is_null(&cifs_sb->dfs_mount_id)) {
- cifs_dbg(FYI, "%s: no dfs mount group id\n", __func__);
- return -EINVAL;
- }
/*
* After reconnecting to a different server, unique ids won't match anymore, so we disable
* serverino. This prevents dentry revalidation to think the dentry are stale (ESTALE).
struct dfs_info3_param *ref);
int dfs_cache_get_tgt_share(char *path, const struct dfs_cache_tgt_iterator *it, char **share,
char **prefix);
-void dfs_cache_put_refsrv_sessions(const uuid_t *mount_id);
-void dfs_cache_add_refsrv_session(const uuid_t *mount_id, struct cifs_ses *ses);
char *dfs_cache_canonical_path(const char *path, const struct nls_table *cp, int remap);
int dfs_cache_remount_fs(struct cifs_sb_info *cifs_sb);
struct list_head *tmp1;
/* only send once per connect */
- spin_lock(&tcon->ses->ses_lock);
- if ((tcon->ses->ses_status != SES_GOOD) || (tcon->status != TID_NEED_RECON)) {
- spin_unlock(&tcon->ses->ses_lock);
+ spin_lock(&tcon->tc_lock);
+ if (tcon->status != TID_NEED_RECON) {
+ spin_unlock(&tcon->tc_lock);
return;
}
tcon->status = TID_IN_FILES_INVALIDATE;
- spin_unlock(&tcon->ses->ses_lock);
+ spin_unlock(&tcon->tc_lock);
/* list all files open on tree connection and mark them invalid */
spin_lock(&tcon->open_file_lock);
bool rootfs:1; /* if it's a SMB root file system */
bool witness:1; /* use witness protocol */
char *leaf_fullpath;
+ struct cifs_ses *dfs_root_ses;
};
extern const struct fs_parameter_spec smb3_fs_parameters[];
* max deferred close timeout (jiffies) - 2^30
*/
#define SMB3_MAX_DCLOSETIMEO (1 << 30)
-#define SMB3_DEF_DCLOSETIMEO (5 * HZ) /* Can increase later, other clients use larger */
+#define SMB3_DEF_DCLOSETIMEO (1 * HZ) /* even 1 sec enough to help eg open/write/close/open/read */
#endif
oparms = (struct cifs_open_parms) {
.tcon = tcon,
.cifs_sb = cifs_sb,
+ .path = path,
.desired_access = GENERIC_READ,
.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR),
.disposition = FILE_OPEN,
oparms = (struct cifs_open_parms) {
.tcon = tcon,
.cifs_sb = cifs_sb,
+ .path = path,
.desired_access = GENERIC_WRITE,
.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR),
.disposition = FILE_CREATE,
#ifdef CONFIG_CIFS_DFS_UPCALL
#include "dns_resolve.h"
#include "dfs_cache.h"
+#include "dfs.h"
#endif
#include "fs_context.h"
#include "cached_dir.h"
spin_lock_init(&ret_buf->stat_lock);
atomic_set(&ret_buf->num_local_opens, 0);
atomic_set(&ret_buf->num_remote_opens, 0);
+#ifdef CONFIG_CIFS_DFS_UPCALL
+ INIT_LIST_HEAD(&ret_buf->dfs_ses_list);
+#endif
return ret_buf;
}
atomic_dec(&tconInfoAllocCount);
kfree(tcon->nativeFileSystem);
kfree_sensitive(tcon->password);
+#ifdef CONFIG_CIFS_DFS_UPCALL
+ dfs_put_root_smb_sessions(&tcon->dfs_ses_list);
+#endif
kfree(tcon);
}
* removing cached DFS targets that the client would eventually
* need during failover.
*/
+ ses = CIFS_DFS_ROOT_SES(ses);
if (ses->server->ops->get_dfs_refer &&
!ses->server->ops->get_dfs_refer(xid, ses, ref_path, &refs,
&num_refs, cifs_sb->local_nls,
vars->oparms = (struct cifs_open_parms) {
.tcon = tcon,
+ .path = full_path,
.desired_access = desired_access,
.disposition = create_disposition,
.create_options = cifs_create_options(cifs_sb, create_options),
size[0] = 8; /* sizeof __le64 */
data[0] = ptr;
- rc = SMB2_set_info_init(tcon, server,
- &rqst[num_rqst], COMPOUND_FID,
- COMPOUND_FID, current->tgid,
- FILE_END_OF_FILE_INFORMATION,
- SMB2_O_INFO_FILE, 0, data, size);
+ if (cfile) {
+ rc = SMB2_set_info_init(tcon, server,
+ &rqst[num_rqst],
+ cfile->fid.persistent_fid,
+ cfile->fid.volatile_fid,
+ current->tgid,
+ FILE_END_OF_FILE_INFORMATION,
+ SMB2_O_INFO_FILE, 0,
+ data, size);
+ } else {
+ rc = SMB2_set_info_init(tcon, server,
+ &rqst[num_rqst],
+ COMPOUND_FID,
+ COMPOUND_FID,
+ current->tgid,
+ FILE_END_OF_FILE_INFORMATION,
+ SMB2_O_INFO_FILE, 0,
+ data, size);
+ if (!rc) {
+ smb2_set_next_command(tcon, &rqst[num_rqst]);
+ smb2_set_related(&rqst[num_rqst]);
+ }
+ }
if (rc)
goto finished;
- smb2_set_next_command(tcon, &rqst[num_rqst]);
- smb2_set_related(&rqst[num_rqst++]);
+ num_rqst++;
trace_smb3_set_eof_enter(xid, ses->Suid, tcon->tid, full_path);
break;
case SMB2_OP_SET_INFO:
p = buf;
spin_lock(&ses->iface_lock);
+ /* do not query too frequently, this time with lock held */
+ if (ses->iface_last_update &&
+ time_before(jiffies, ses->iface_last_update +
+ (SMB_INTERFACE_POLL_INTERVAL * HZ))) {
+ spin_unlock(&ses->iface_lock);
+ return 0;
+ }
+
/*
* Go through iface_list and do kref_put to remove
* any unused ifaces. ifaces in use will be removed
struct network_interface_info_ioctl_rsp *out_buf = NULL;
struct cifs_ses *ses = tcon->ses;
+ /* do not query too frequently */
+ if (ses->iface_last_update &&
+ time_before(jiffies, ses->iface_last_update +
+ (SMB_INTERFACE_POLL_INTERVAL * HZ)))
+ return 0;
+
rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
FSCTL_QUERY_NETWORK_INTERFACE_INFO,
NULL /* no data input */, 0 /* no data input */,
if (rc == -EOPNOTSUPP) {
cifs_dbg(FYI,
"server does not support query network interfaces\n");
- goto out;
+ ret_data_len = 0;
} else if (rc != 0) {
cifs_tcon_dbg(VFS, "error %d on ioctl to get interface list\n", rc);
goto out;
oparms = (struct cifs_open_parms) {
.tcon = tcon,
+ .path = "",
.desired_access = FILE_READ_ATTRIBUTES,
.disposition = FILE_OPEN,
.create_options = cifs_create_options(cifs_sb, 0),
oparms = (struct cifs_open_parms) {
.tcon = tcon,
+ .path = "",
.desired_access = FILE_READ_ATTRIBUTES,
.disposition = FILE_OPEN,
.create_options = cifs_create_options(cifs_sb, 0),
oparms = (struct cifs_open_parms) {
.tcon = tcon,
+ .path = full_path,
.desired_access = FILE_READ_ATTRIBUTES,
.disposition = FILE_OPEN,
.create_options = cifs_create_options(cifs_sb, 0),
oparms = (struct cifs_open_parms) {
.tcon = tcon,
+ .path = path,
.desired_access = FILE_WRITE_EA,
.disposition = FILE_OPEN,
.create_options = cifs_create_options(cifs_sb, 0),
tcon = cifs_sb_master_tcon(cifs_sb);
oparms = (struct cifs_open_parms) {
.tcon = tcon,
+ .path = path,
.desired_access = FILE_READ_ATTRIBUTES | FILE_READ_DATA,
.disposition = FILE_OPEN,
.create_options = cifs_create_options(cifs_sb, 0),
oparms = (struct cifs_open_parms) {
.tcon = tcon,
+ .path = path,
.desired_access = FILE_READ_ATTRIBUTES | FILE_READ_DATA,
.disposition = FILE_OPEN,
.create_options = cifs_create_options(cifs_sb, 0),
oparms = (struct cifs_open_parms) {
.tcon = tcon,
+ .path = path,
.desired_access = desired_access,
.disposition = FILE_OPEN,
.create_options = cifs_create_options(cifs_sb, 0),
oparms = (struct cifs_open_parms) {
.tcon = tcon,
+ .path = "",
.desired_access = FILE_READ_ATTRIBUTES,
.disposition = FILE_OPEN,
.create_options = cifs_create_options(cifs_sb, 0),
oparms = (struct cifs_open_parms) {
.tcon = tcon,
+ .path = full_path,
.desired_access = FILE_READ_ATTRIBUTES,
.disposition = FILE_OPEN,
.create_options = cifs_create_options(cifs_sb, create_options),
oparms = (struct cifs_open_parms) {
.tcon = tcon,
+ .path = full_path,
.desired_access = FILE_READ_ATTRIBUTES,
.disposition = FILE_OPEN,
.create_options = cifs_create_options(cifs_sb, OPEN_REPARSE_POINT),
oparms = (struct cifs_open_parms) {
.tcon = tcon,
+ .path = path,
.desired_access = READ_CONTROL,
.disposition = FILE_OPEN,
/*
struct TCP_Server_Info *server)
{
int rc = 0;
- struct nls_table *nls_codepage;
+ struct nls_table *nls_codepage = NULL;
struct cifs_ses *ses;
/*
spin_lock(&tcon->tc_lock);
if (tcon->status == TID_EXITING) {
/*
- * only tree disconnect, open, and write,
- * (and ulogoff which does not have tcon)
- * are allowed as we start force umount.
+ * only tree disconnect allowed when disconnecting ...
*/
- if ((smb2_command != SMB2_WRITE) &&
- (smb2_command != SMB2_CREATE) &&
- (smb2_command != SMB2_TREE_DISCONNECT)) {
+ if (smb2_command != SMB2_TREE_DISCONNECT) {
spin_unlock(&tcon->tc_lock);
cifs_dbg(FYI, "can not send cmd %d while umounting\n",
smb2_command);
}
spin_unlock(&server->srv_lock);
+again:
rc = cifs_wait_for_server_reconnect(server, tcon->retry);
if (rc)
return rc;
tcon->ses->chans_need_reconnect,
tcon->need_reconnect);
- nls_codepage = load_nls_default();
-
+ mutex_lock(&ses->session_mutex);
/*
* Recheck after acquire mutex. If another thread is negotiating
* and the server never sends an answer the socket will be closed
spin_lock(&server->srv_lock);
if (server->tcpStatus == CifsNeedReconnect) {
spin_unlock(&server->srv_lock);
+ mutex_unlock(&ses->session_mutex);
+
+ if (tcon->retry)
+ goto again;
+
rc = -EHOSTDOWN;
goto out;
}
spin_unlock(&server->srv_lock);
+ nls_codepage = load_nls_default();
+
/*
* need to prevent multiple threads trying to simultaneously
* reconnect the same SMB session
*/
+ spin_lock(&ses->ses_lock);
spin_lock(&ses->chan_lock);
- if (!cifs_chan_needs_reconnect(ses, server)) {
+ if (!cifs_chan_needs_reconnect(ses, server) &&
+ ses->ses_status == SES_GOOD) {
spin_unlock(&ses->chan_lock);
-
+ spin_unlock(&ses->ses_lock);
/* this means that we only need to tree connect */
if (tcon->need_reconnect)
goto skip_sess_setup;
+ mutex_unlock(&ses->session_mutex);
goto out;
}
spin_unlock(&ses->chan_lock);
+ spin_unlock(&ses->ses_lock);
- mutex_lock(&ses->session_mutex);
rc = cifs_negotiate_protocol(0, ses, server);
if (!rc) {
rc = cifs_setup_session(0, ses, server, nls_codepage);
mutex_unlock(&ses->session_mutex);
goto out;
}
- mutex_unlock(&ses->session_mutex);
skip_sess_setup:
- mutex_lock(&ses->session_mutex);
if (!tcon->need_reconnect) {
mutex_unlock(&ses->session_mutex);
goto out;
cifs_dbg(FYI, "reconnect tcon rc = %d\n", rc);
if (rc) {
/* If sess reconnected but tcon didn't, something strange ... */
- pr_warn_once("reconnect tcon failed rc = %d\n", rc);
+ cifs_dbg(VFS, "reconnect tcon failed rc = %d\n", rc);
goto out;
}
if (rc)
return rc;
- spin_lock(&ses->chan_lock);
- is_binding = !CIFS_ALL_CHANS_NEED_RECONNECT(ses);
- spin_unlock(&ses->chan_lock);
+ spin_lock(&ses->ses_lock);
+ is_binding = (ses->ses_status == SES_GOOD);
+ spin_unlock(&ses->ses_lock);
if (is_binding) {
req->hdr.SessionId = cpu_to_le64(ses->Suid);
goto out_put_spnego_key;
}
- spin_lock(&ses->chan_lock);
- is_binding = !CIFS_ALL_CHANS_NEED_RECONNECT(ses);
- spin_unlock(&ses->chan_lock);
+ spin_lock(&ses->ses_lock);
+ is_binding = (ses->ses_status == SES_GOOD);
+ spin_unlock(&ses->ses_lock);
/* keep session key if binding */
if (!is_binding) {
cifs_dbg(FYI, "rawntlmssp session setup challenge phase\n");
- spin_lock(&ses->chan_lock);
- is_binding = !CIFS_ALL_CHANS_NEED_RECONNECT(ses);
- spin_unlock(&ses->chan_lock);
+ spin_lock(&ses->ses_lock);
+ is_binding = (ses->ses_status == SES_GOOD);
+ spin_unlock(&ses->ses_lock);
/* keep existing ses id and flags if binding */
if (!is_binding) {
rsp = (struct smb2_sess_setup_rsp *)sess_data->iov[0].iov_base;
- spin_lock(&ses->chan_lock);
- is_binding = !CIFS_ALL_CHANS_NEED_RECONNECT(ses);
- spin_unlock(&ses->chan_lock);
+ spin_lock(&ses->ses_lock);
+ is_binding = (ses->ses_status == SES_GOOD);
+ spin_unlock(&ses->ses_lock);
/* keep existing ses id and flags if binding */
if (!is_binding) {
rqst.rq_nvec = n_iov;
/* no need to inc num_remote_opens because we close it just below */
- trace_smb3_posix_mkdir_enter(xid, tcon->tid, ses->Suid, CREATE_NOT_FILE,
+ trace_smb3_posix_mkdir_enter(xid, tcon->tid, ses->Suid, full_path, CREATE_NOT_FILE,
FILE_WRITE_ATTRIBUTES);
/* resource #4: response buffer */
rc = cifs_send_recv(xid, ses, server,
if (rc)
goto creat_exit;
- trace_smb3_open_enter(xid, tcon->tid, tcon->ses->Suid,
+ trace_smb3_open_enter(xid, tcon->tid, tcon->ses->Suid, oparms->path,
oparms->create_options, oparms->desired_access);
rc = cifs_send_recv(xid, ses, server,
struct cifs_ses *ses = NULL;
int i;
int rc = 0;
+ bool is_binding = false;
spin_lock(&cifs_tcp_ses_lock);
goto out;
found:
+ spin_lock(&ses->ses_lock);
spin_lock(&ses->chan_lock);
- if (cifs_chan_needs_reconnect(ses, server) &&
- !CIFS_ALL_CHANS_NEED_RECONNECT(ses)) {
+
+ is_binding = (cifs_chan_needs_reconnect(ses, server) &&
+ ses->ses_status == SES_GOOD);
+ if (is_binding) {
/*
* If we are in the process of binding a new channel
* to an existing session, use the master connection
*/
memcpy(key, ses->smb3signingkey, SMB3_SIGN_KEY_SIZE);
spin_unlock(&ses->chan_lock);
+ spin_unlock(&ses->ses_lock);
goto out;
}
if (chan->server == server) {
memcpy(key, chan->signkey, SMB3_SIGN_KEY_SIZE);
spin_unlock(&ses->chan_lock);
+ spin_unlock(&ses->ses_lock);
goto out;
}
}
spin_unlock(&ses->chan_lock);
+ spin_unlock(&ses->ses_lock);
cifs_dbg(VFS,
"%s: Could not find channel signing key for session 0x%llx\n",
bool is_binding = false;
int chan_index = 0;
+ spin_lock(&ses->ses_lock);
spin_lock(&ses->chan_lock);
- is_binding = !CIFS_ALL_CHANS_NEED_RECONNECT(ses);
+ is_binding = (cifs_chan_needs_reconnect(ses, server) &&
+ ses->ses_status == SES_GOOD);
+
chan_index = cifs_ses_get_chan_index(ses, server);
/* TODO: introduce ref counting for channels when the can be freed */
spin_unlock(&ses->chan_lock);
+ spin_unlock(&ses->ses_lock);
/*
* All channels use the same encryption/decryption keys but
/* safe to access primary channel, since it will never go away */
spin_lock(&ses->chan_lock);
- memcpy(ses->chans[0].signkey, ses->smb3signingkey,
+ memcpy(ses->chans[chan_index].signkey, ses->smb3signingkey,
SMB3_SIGN_KEY_SIZE);
spin_unlock(&ses->chan_lock);
TP_PROTO(unsigned int xid,
__u32 tid,
__u64 sesid,
+ const char *full_path,
int create_options,
int desired_access),
- TP_ARGS(xid, tid, sesid, create_options, desired_access),
+ TP_ARGS(xid, tid, sesid, full_path, create_options, desired_access),
TP_STRUCT__entry(
__field(unsigned int, xid)
__field(__u32, tid)
__field(__u64, sesid)
+ __string(path, full_path)
__field(int, create_options)
__field(int, desired_access)
),
__entry->xid = xid;
__entry->tid = tid;
__entry->sesid = sesid;
+ __assign_str(path, full_path);
__entry->create_options = create_options;
__entry->desired_access = desired_access;
),
- TP_printk("xid=%u sid=0x%llx tid=0x%x cr_opts=0x%x des_access=0x%x",
- __entry->xid, __entry->sesid, __entry->tid,
+ TP_printk("xid=%u sid=0x%llx tid=0x%x path=%s cr_opts=0x%x des_access=0x%x",
+ __entry->xid, __entry->sesid, __entry->tid, __get_str(path),
__entry->create_options, __entry->desired_access)
)
TP_PROTO(unsigned int xid, \
__u32 tid, \
__u64 sesid, \
+ const char *full_path, \
int create_options, \
int desired_access), \
- TP_ARGS(xid, tid, sesid, create_options, desired_access))
+ TP_ARGS(xid, tid, sesid, full_path, create_options, desired_access))
DEFINE_SMB3_OPEN_ENTER_EVENT(open_enter);
DEFINE_SMB3_OPEN_ENTER_EVENT(posix_mkdir_enter);
__smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
struct smb_rqst *rqst)
{
- int rc = 0;
+ int rc;
struct kvec *iov;
int n_vec;
unsigned int send_length = 0;
struct msghdr smb_msg = {};
__be32 rfc1002_marker;
+ cifs_in_send_inc(server);
if (cifs_rdma_enabled(server)) {
/* return -EAGAIN when connecting or reconnecting */
rc = -EAGAIN;
goto smbd_done;
}
+ rc = -EAGAIN;
if (ssocket == NULL)
- return -EAGAIN;
+ goto out;
+ rc = -ERESTARTSYS;
if (fatal_signal_pending(current)) {
cifs_dbg(FYI, "signal pending before send request\n");
- return -ERESTARTSYS;
+ goto out;
}
+ rc = 0;
/* cork the socket */
tcp_sock_set_cork(ssocket->sk, true);
rc);
else if (rc > 0)
rc = 0;
-
+out:
+ cifs_in_send_dec(server);
return rc;
}
* I/O response may come back and free the mid entry on another thread.
*/
cifs_save_when_sent(mid);
- cifs_in_send_inc(server);
rc = smb_send_rqst(server, 1, rqst, flags);
- cifs_in_send_dec(server);
if (rc < 0) {
revert_current_mid(server, mid->credits);
else
midQ[i]->callback = cifs_compound_last_callback;
}
- cifs_in_send_inc(server);
rc = smb_send_rqst(server, num_rqst, rqst, flags);
- cifs_in_send_dec(server);
for (i = 0; i < num_rqst; i++)
cifs_save_when_sent(midQ[i]);
midQ->mid_state = MID_REQUEST_SUBMITTED;
- cifs_in_send_inc(server);
rc = smb_send(server, in_buf, len);
- cifs_in_send_dec(server);
cifs_save_when_sent(midQ);
if (rc < 0)
}
midQ->mid_state = MID_REQUEST_SUBMITTED;
- cifs_in_send_inc(server);
rc = smb_send(server, in_buf, len);
- cifs_in_send_dec(server);
cifs_save_when_sent(midQ);
if (rc < 0)
* destroying any subkeys embedded in it.
*/
+ if (WARN_ON(!sb->s_master_keys))
+ return;
spin_lock(&sb->s_master_keys->lock);
hlist_del_rcu(&mk->mk_node);
spin_unlock(&sb->s_master_keys->lock);
* Release all encryption keys that have been added to the filesystem, along
* with the keyring that contains them.
*
- * This is called at unmount time. The filesystem's underlying block device(s)
- * are still available at this time; this is important because after user file
- * accesses have been allowed, this function may need to evict keys from the
- * keyslots of an inline crypto engine, which requires the block device(s).
+ * This is called at unmount time, after all potentially-encrypted inodes have
+ * been evicted. The filesystem's underlying block device(s) are still
+ * available at this time; this is important because after user file accesses
+ * have been allowed, this function may need to evict keys from the keyslots of
+ * an inline crypto engine, which requires the block device(s).
*/
void fscrypt_destroy_keyring(struct super_block *sb)
{
hlist_for_each_entry_safe(mk, tmp, bucket, mk_node) {
/*
- * Since all inodes were already evicted, every key
- * remaining in the keyring should have an empty inode
- * list, and should only still be in the keyring due to
- * the single active ref associated with ->mk_secret.
- * There should be no structural refs beyond the one
- * associated with the active ref.
+ * Since all potentially-encrypted inodes were already
+ * evicted, every key remaining in the keyring should
+ * have an empty inode list, and should only still be in
+ * the keyring due to the single active ref associated
+ * with ->mk_secret. There should be no structural refs
+ * beyond the one associated with the active ref.
*/
WARN_ON(refcount_read(&mk->mk_active_refs) != 1);
WARN_ON(refcount_read(&mk->mk_struct_refs) != 1);
goto end_rename;
}
retval = ext4_rename_dir_prepare(handle, &old);
- if (retval) {
- inode_unlock(old.inode);
+ if (retval)
goto end_rename;
- }
}
/*
* If we're renaming a file within an inline_data dir and adding or
return 0;
}
+static int gfs2_dentry_delete(const struct dentry *dentry)
+{
+ struct gfs2_inode *ginode;
+
+ if (d_really_is_negative(dentry))
+ return 0;
+
+ ginode = GFS2_I(d_inode(dentry));
+ if (!gfs2_holder_initialized(&ginode->i_iopen_gh))
+ return 0;
+
+ if (test_bit(GLF_DEMOTE, &ginode->i_iopen_gh.gh_gl->gl_flags))
+ return 1;
+
+ return 0;
+}
+
const struct dentry_operations gfs2_dops = {
.d_revalidate = gfs2_drevalidate,
.d_hash = gfs2_dhash,
+ .d_delete = gfs2_dentry_delete,
};
goto smb3signkey_ret;
}
- if (conn->cipher_type == SMB2_ENCRYPTION_AES256_CCM ||
- conn->cipher_type == SMB2_ENCRYPTION_AES256_GCM)
+ if (key_size == SMB3_ENC_DEC_KEY_SIZE &&
+ (conn->cipher_type == SMB2_ENCRYPTION_AES256_CCM ||
+ conn->cipher_type == SMB2_ENCRYPTION_AES256_GCM))
rc = crypto_shash_update(CRYPTO_HMACSHA256(ctx), L256, 4);
else
rc = crypto_shash_update(CRYPTO_HMACSHA256(ctx), L128, 4);
kvfree(conn->request_buf);
conn->request_buf = NULL;
- size = t->ops->read(t, hdr_buf, sizeof(hdr_buf));
+ size = t->ops->read(t, hdr_buf, sizeof(hdr_buf), -1);
if (size != sizeof(hdr_buf))
break;
}
/*
- * Check if pdu size is valid (min : smb header size,
- * max : 0x00FFFFFF).
+ * Check maximum pdu size(0x00FFFFFF).
*/
- if (pdu_size < __SMB2_HEADER_STRUCTURE_SIZE ||
- pdu_size > MAX_STREAM_PROT_LEN) {
+ if (pdu_size > MAX_STREAM_PROT_LEN)
break;
- }
/* 4 for rfc1002 length field */
size = pdu_size + 4;
* We already read 4 bytes to find out PDU size, now
* read in PDU
*/
- size = t->ops->read(t, conn->request_buf + 4, pdu_size);
+ size = t->ops->read(t, conn->request_buf + 4, pdu_size, 2);
if (size < 0) {
pr_err("sock_read failed: %d\n", size);
break;
int (*prepare)(struct ksmbd_transport *t);
void (*disconnect)(struct ksmbd_transport *t);
void (*shutdown)(struct ksmbd_transport *t);
- int (*read)(struct ksmbd_transport *t, char *buf, unsigned int size);
+ int (*read)(struct ksmbd_transport *t, char *buf,
+ unsigned int size, int max_retries);
int (*writev)(struct ksmbd_transport *t, struct kvec *iovs, int niov,
int size, bool need_invalidate_rkey,
unsigned int remote_key);
sizeof(struct smb_acl) +
sizeof(struct smb_ace) * ace_num * 2,
GFP_KERNEL);
- if (!pntsd)
+ if (!pntsd) {
+ posix_acl_release(fattr.cf_acls);
+ posix_acl_release(fattr.cf_dacls);
goto err_out;
+ }
rc = build_sec_desc(idmap,
pntsd, NULL, 0,
info->Attributes |= cpu_to_le32(server_conf.share_fake_fscaps);
+ if (test_share_config_flag(work->tcon->share_conf,
+ KSMBD_SHARE_FLAG_STREAMS))
+ info->Attributes |= cpu_to_le32(FILE_NAMED_STREAMS);
+
info->MaxPathNameComponentLength = cpu_to_le32(stfs.f_namelen);
len = smbConvertToUTF16((__le16 *)info->FileSystemName,
"NTFS", PATH_MAX, conn->local_nls, 0);
if (in_count == 0)
return -EINVAL;
+ start = le64_to_cpu(qar_req->file_offset);
+ length = le64_to_cpu(qar_req->length);
+
+ if (start < 0 || length < 0)
+ return -EINVAL;
+
fp = ksmbd_lookup_fd_fast(work, id);
if (!fp)
return -ENOENT;
- start = le64_to_cpu(qar_req->file_offset);
- length = le64_to_cpu(qar_req->length);
-
ret = ksmbd_vfs_fqar_lseek(fp, start, length,
qar_rsp, in_count, out_count);
if (ret && ret != -E2BIG)
off = le64_to_cpu(zero_data->FileOffset);
bfz = le64_to_cpu(zero_data->BeyondFinalZero);
- if (off > bfz) {
+ if (off < 0 || bfz < 0 || off > bfz) {
ret = -EINVAL;
goto out;
}
static int __smb2_negotiate(struct ksmbd_conn *conn)
{
- return (conn->dialect >= SMB21_PROT_ID &&
+ return (conn->dialect >= SMB20_PROT_ID &&
conn->dialect <= SMB311_PROT_ID);
}
{
struct smb_negotiate_rsp *neg_rsp = work->response_buf;
- ksmbd_debug(SMB, "Unsupported SMB protocol\n");
- neg_rsp->hdr.Status.CifsError = STATUS_INVALID_LOGON_TYPE;
- return -EINVAL;
+ ksmbd_debug(SMB, "Unsupported SMB1 protocol\n");
+
+ /*
+ * Remove 4 byte direct TCP header, add 2 byte bcc and
+ * 2 byte DialectIndex.
+ */
+ *(__be32 *)work->response_buf =
+ cpu_to_be32(sizeof(struct smb_hdr) - 4 + 2 + 2);
+ neg_rsp->hdr.Status.CifsError = STATUS_SUCCESS;
+
+ neg_rsp->hdr.Command = SMB_COM_NEGOTIATE;
+ *(__le32 *)neg_rsp->hdr.Protocol = SMB1_PROTO_NUMBER;
+ neg_rsp->hdr.Flags = SMBFLG_RESPONSE;
+ neg_rsp->hdr.Flags2 = SMBFLG2_UNICODE | SMBFLG2_ERR_STATUS |
+ SMBFLG2_EXT_SEC | SMBFLG2_IS_LONG_NAME;
+
+ neg_rsp->hdr.WordCount = 1;
+ neg_rsp->DialectIndex = cpu_to_le16(work->conn->dialect);
+ neg_rsp->ByteCount = 0;
+ return 0;
}
int ksmbd_smb_negotiate_common(struct ksmbd_work *work, unsigned int command)
}
}
- if (command == SMB2_NEGOTIATE_HE && __smb2_negotiate(conn)) {
+ if (command == SMB2_NEGOTIATE_HE) {
ret = smb2_handle_negotiate(work);
init_smb2_neg_rsp(work);
return ret;
#define SMB1_PROTO_NUMBER cpu_to_le32(0x424d53ff)
#define SMB_COM_NEGOTIATE 0x72
-
#define SMB1_CLIENT_GUID_SIZE (16)
+
+#define SMBFLG_RESPONSE 0x80 /* this PDU is a response from server */
+
+#define SMBFLG2_IS_LONG_NAME cpu_to_le16(0x40)
+#define SMBFLG2_EXT_SEC cpu_to_le16(0x800)
+#define SMBFLG2_ERR_STATUS cpu_to_le16(0x4000)
+#define SMBFLG2_UNICODE cpu_to_le16(0x8000)
+
struct smb_hdr {
__be32 smb_buf_length;
__u8 Protocol[4];
struct smb_negotiate_rsp {
struct smb_hdr hdr; /* wct = 17 */
__le16 DialectIndex; /* 0xFFFF = no dialect acceptable */
- __u8 SecurityMode;
- __le16 MaxMpxCount;
- __le16 MaxNumberVcs;
- __le32 MaxBufferSize;
- __le32 MaxRawSize;
- __le32 SessionKey;
- __le32 Capabilities; /* see below */
- __le32 SystemTimeLow;
- __le32 SystemTimeHigh;
- __le16 ServerTimeZone;
- __u8 EncryptionKeyLength;
__le16 ByteCount;
- union {
- unsigned char EncryptionKey[8]; /* cap extended security off */
- /* followed by Domain name - if extended security is off */
- /* followed by 16 bytes of server GUID */
- /* then security blob if cap_extended_security negotiated */
- struct {
- unsigned char GUID[SMB1_CLIENT_GUID_SIZE];
- unsigned char SecurityBlob[1];
- } __packed extended_response;
- } __packed u;
} __packed;
struct filesystem_attribute_info {
}
static int smb_direct_read(struct ksmbd_transport *t, char *buf,
- unsigned int size)
+ unsigned int size, int unused)
{
struct smb_direct_recvmsg *recvmsg;
struct smb_direct_data_transfer *data_transfer;
/**
* ksmbd_tcp_readv() - read data from socket in given iovec
- * @t: TCP transport instance
- * @iov_orig: base IO vector
- * @nr_segs: number of segments in base iov
- * @to_read: number of bytes to read from socket
+ * @t: TCP transport instance
+ * @iov_orig: base IO vector
+ * @nr_segs: number of segments in base iov
+ * @to_read: number of bytes to read from socket
+ * @max_retries: maximum retry count
*
* Return: on success return number of bytes read from socket,
* otherwise return error number
*/
static int ksmbd_tcp_readv(struct tcp_transport *t, struct kvec *iov_orig,
- unsigned int nr_segs, unsigned int to_read)
+ unsigned int nr_segs, unsigned int to_read,
+ int max_retries)
{
int length = 0;
int total_read;
struct msghdr ksmbd_msg;
struct kvec *iov;
struct ksmbd_conn *conn = KSMBD_TRANS(t)->conn;
- int max_retry = 2;
iov = get_conn_iovec(t, nr_segs);
if (!iov)
} else if (conn->status == KSMBD_SESS_NEED_RECONNECT) {
total_read = -EAGAIN;
break;
- } else if ((length == -ERESTARTSYS || length == -EAGAIN) &&
- max_retry) {
+ } else if (length == -ERESTARTSYS || length == -EAGAIN) {
+ /*
+ * If max_retries is negative, Allow unlimited
+ * retries to keep connection with inactive sessions.
+ */
+ if (max_retries == 0) {
+ total_read = length;
+ break;
+ } else if (max_retries > 0) {
+ max_retries--;
+ }
+
usleep_range(1000, 2000);
length = 0;
- max_retry--;
continue;
} else if (length <= 0) {
- total_read = -EAGAIN;
+ total_read = length;
break;
}
}
* Return: on success return number of bytes read from socket,
* otherwise return error number
*/
-static int ksmbd_tcp_read(struct ksmbd_transport *t, char *buf, unsigned int to_read)
+static int ksmbd_tcp_read(struct ksmbd_transport *t, char *buf,
+ unsigned int to_read, int max_retries)
{
struct kvec iov;
iov.iov_base = buf;
iov.iov_len = to_read;
- return ksmbd_tcp_readv(TCP_TRANS(t), &iov, 1, to_read);
+ return ksmbd_tcp_readv(TCP_TRANS(t), &iov, 1, to_read, max_retries);
}
static int ksmbd_tcp_writev(struct ksmbd_transport *t, struct kvec *iov,
u32 exclusive;
int error;
__be32 *p;
- s32 end;
memset(lock, 0, sizeof(*lock));
locks_init_lock(fl);
fl->fl_type = exclusive != 0 ? F_WRLCK : F_RDLCK;
p = xdr_decode_hyper(p, &l_offset);
xdr_decode_hyper(p, &l_len);
- end = l_offset + l_len - 1;
-
- fl->fl_start = (loff_t)l_offset;
- if (l_len == 0 || end < 0)
- fl->fl_end = OFFSET_MAX;
- else
- fl->fl_end = (loff_t)end;
+ nlm4svc_set_file_lock_range(fl, l_offset, l_len);
error = 0;
out:
return error;
return res;
}
+void nlm4svc_set_file_lock_range(struct file_lock *fl, u64 off, u64 len)
+{
+ s64 end = off + len - 1;
+
+ fl->fl_start = off;
+ if (len == 0 || end < 0)
+ fl->fl_end = OFFSET_MAX;
+ else
+ fl->fl_end = end;
+}
+
/*
* NLM file handles are defined by specification to be a variable-length
* XDR opaque no longer than 1024 bytes. However, this implementation
locks_init_lock(fl);
fl->fl_flags = FL_POSIX;
fl->fl_type = F_RDLCK;
-
+ nlm4svc_set_file_lock_range(fl, lock->lock_start, lock->lock_len);
return true;
}
config NFS_V4
tristate "NFS client support for NFS version 4"
depends on NFS_FS
- select SUNRPC_GSS
+ select RPCSEC_GSS_KRB5
select KEYS
help
This option enables support for version 4 of the NFS protocol
else
goto found;
}
- set->timestamp = ktime_get_ns();
rb_link_node(&set->rb_node, parent, p);
rb_insert_color(&set->rb_node, root_node);
list_add_tail(&set->lru, &nfsi->access_cache_entry_lru);
cache->fsgid = cred->fsgid;
cache->group_info = get_group_info(cred->group_info);
cache->mask = set->mask;
+ cache->timestamp = ktime_get_ns();
/* The above field assignments must be visible
* before this item appears on the lru. We cannot easily
#include <linux/stat.h>
#include <linux/mm.h>
#include <linux/slab.h>
+#include <linux/task_io_accounting_ops.h>
#include <linux/pagemap.h>
#include <linux/sunrpc/clnt.h>
#include <linux/nfs_fs.h>
trace_nfs_aop_readpage(inode, folio);
nfs_inc_stats(inode, NFSIOS_VFSREADPAGE);
+ task_io_account_read(folio_size(folio));
/*
* Try to flush any pending writes to the file..
trace_nfs_aop_readahead(inode, readahead_pos(ractl), nr_pages);
nfs_inc_stats(inode, NFSIOS_VFSREADPAGES);
+ task_io_account_read(readahead_length(ractl));
ret = -ESTALE;
if (NFS_STALE(inode))
bool "NFS server support for NFS version 4"
depends on NFSD && PROC_FS
select FS_POSIX_ACL
- select SUNRPC_GSS
+ select RPCSEC_GSS_KRB5
select CRYPTO
select CRYPTO_MD5
select CRYPTO_SHA256
struct page *last_page;
last_page = page + (offset + sd->len - 1) / PAGE_SIZE;
- for (page += offset / PAGE_SIZE; page <= last_page; page++)
+ for (page += offset / PAGE_SIZE; page <= last_page; page++) {
+ /*
+ * Skip page replacement when extending the contents
+ * of the current page.
+ */
+ if (page == *(rqstp->rq_next_page - 1))
+ continue;
svc_rqst_replace_page(rqstp, page);
+ }
if (rqstp->rq_res.page_len == 0) // first call
rqstp->rq_res.page_base = offset % PAGE_SIZE;
rqstp->rq_res.page_len += sd->len;
if (argv->v_index > ~(__u64)0 - argv->v_nmembs)
return -EINVAL;
- buf = (void *)__get_free_pages(GFP_NOFS, 0);
+ buf = (void *)get_zeroed_page(GFP_NOFS);
if (unlikely(!buf))
return -ENOMEM;
maxmembs = PAGE_SIZE / argv->v_size;
}
if (unlikely(copied < len) && wc->w_target_page) {
+ loff_t new_isize;
+
if (!PageUptodate(wc->w_target_page))
copied = 0;
- ocfs2_zero_new_buffers(wc->w_target_page, start+copied,
- start+len);
+ new_isize = max_t(loff_t, i_size_read(inode), pos + copied);
+ if (new_isize > page_offset(wc->w_target_page))
+ ocfs2_zero_new_buffers(wc->w_target_page, start+copied,
+ start+len);
+ else {
+ /*
+ * When page is fully beyond new isize (data copy
+ * failed), do not bother zeroing the page. Invalidate
+ * it instead so that writeback does not get confused
+ * put page & buffer dirty bits into inconsistent
+ * state.
+ */
+ block_invalidate_folio(page_folio(wc->w_target_page),
+ 0, PAGE_SIZE);
+ }
}
if (wc->w_target_page)
flush_dcache_page(wc->w_target_page);
cgroup_writeback_umount();
- /* evict all inodes with zero refcount */
+ /* Evict all inodes with zero refcount. */
evict_inodes(sb);
- /* only nonzero refcount inodes can have marks */
+
+ /*
+ * Clean up and evict any inodes that still have references due
+ * to fsnotify or the security policy.
+ */
fsnotify_sb_delete(sb);
- fscrypt_destroy_keyring(sb);
security_sb_delete(sb);
+ /*
+ * Now that all potentially-encrypted inodes have been evicted,
+ * the fscrypt keyring can be destroyed.
+ */
+ fscrypt_destroy_keyring(sb);
+
if (sb->s_dio_done_wq) {
destroy_workqueue(sb->s_dio_done_wq);
sb->s_dio_done_wq = NULL;
#include "fsverity_private.h"
#include <linux/mount.h>
-#include <linux/pagemap.h>
#include <linux/sched/signal.h>
#include <linux/uaccess.h>
goto out_drop_write;
err = enable_verity(filp, &arg);
- if (err)
- goto out_allow_write_access;
/*
- * Some pages of the file may have been evicted from pagecache after
- * being used in the Merkle tree construction, then read into pagecache
- * again by another process reading from the file concurrently. Since
- * these pages didn't undergo verification against the file digest which
- * fs-verity now claims to be enforcing, we have to wipe the pagecache
- * to ensure that all future reads are verified.
+ * We no longer drop the inode's pagecache after enabling verity. This
+ * used to be done to try to avoid a race condition where pages could be
+ * evicted after being used in the Merkle tree construction, then
+ * re-instantiated by a concurrent read. Such pages are unverified, and
+ * the backing storage could have filled them with different content, so
+ * they shouldn't be used to fulfill reads once verity is enabled.
+ *
+ * But, dropping the pagecache has a big performance impact, and it
+ * doesn't fully solve the race condition anyway. So for those reasons,
+ * and also because this race condition isn't very important relatively
+ * speaking (especially for small-ish files, where the chance of a page
+ * being used, evicted, *and* re-instantiated all while enabling verity
+ * is quite small), we no longer drop the inode's pagecache.
*/
- filemap_write_and_wait(inode->i_mapping);
- invalidate_inode_pages2(inode->i_mapping);
/*
* allow_write_access() is needed to pair with deny_write_access().
* Regardless, the filesystem won't allow writing to verity files.
*/
-out_allow_write_access:
allow_write_access(filp);
out_drop_write:
mnt_drop_write_file(filp);
int __init fsverity_init_workqueue(void)
{
/*
- * Use an unbound workqueue to allow bios to be verified in parallel
- * even when they happen to complete on the same CPU. This sacrifices
- * locality, but it's worthwhile since hashing is CPU-intensive.
+ * Use a high-priority workqueue to prioritize verification work, which
+ * blocks reads from completing, over regular application tasks.
*
- * Also use a high-priority workqueue to prioritize verification work,
- * which blocks reads from completing, over regular application tasks.
+ * For performance reasons, don't use an unbound workqueue. Using an
+ * unbound workqueue for crypto operations causes excessive scheduler
+ * latency on ARM64.
*/
fsverity_read_workqueue = alloc_workqueue("fsverity_read_queue",
- WQ_UNBOUND | WQ_HIGHPRI,
+ WQ_HIGHPRI,
num_online_cpus());
if (!fsverity_read_workqueue)
return -ENOMEM;
xfs_bmap_util.o \
xfs_bio_io.o \
xfs_buf.o \
+ xfs_dahash_test.o \
xfs_dir2_readdir.o \
xfs_discard.o \
xfs_error.o \
pag->pagf_refcount_level = be32_to_cpu(agf->agf_refcount_level);
if (xfs_agfl_needs_reset(pag->pag_mount, agf))
set_bit(XFS_AGSTATE_AGFL_NEEDS_RESET, &pag->pag_opstate);
+ else
+ clear_bit(XFS_AGSTATE_AGFL_NEEDS_RESET, &pag->pag_opstate);
/*
* Update the in-core allocbt counter. Filter out the rmapbt
XFS_STATS_INC(mp, xs_allocx);
XFS_STATS_ADD(mp, xs_allocb, args->len);
+ trace_xfs_alloc_vextent_finish(args);
+
out_drop_perag:
if (drop_perag && args->pag) {
xfs_perag_rele(args->pag);
xfs_agnumber_t minimum_agno;
int error;
+ ASSERT(args->pag != NULL);
+ ASSERT(args->pag->pag_agno == agno);
+
args->agno = agno;
args->agbno = 0;
+
+ trace_xfs_alloc_vextent_this_ag(args);
+
error = xfs_alloc_vextent_check_args(args, XFS_AGB_TO_FSB(mp, agno, 0),
&minimum_agno);
if (error) {
uint32_t flags)
{
struct xfs_mount *mp = args->mp;
+ xfs_agnumber_t restart_agno = minimum_agno;
xfs_agnumber_t agno;
int error = 0;
+ if (flags & XFS_ALLOC_FLAG_TRYLOCK)
+ restart_agno = 0;
restart:
- for_each_perag_wrap_range(mp, start_agno, minimum_agno,
+ for_each_perag_wrap_range(mp, start_agno, restart_agno,
mp->m_sb.sb_agcount, agno, args->pag) {
args->agno = agno;
error = xfs_alloc_vextent_prepare_ag(args);
*/
if (flags) {
flags = 0;
+ restart_agno = minimum_agno;
goto restart;
}
bool bump_rotor = false;
int error;
+ ASSERT(args->pag == NULL);
+
args->agno = NULLAGNUMBER;
args->agbno = NULLAGBLOCK;
+
+ trace_xfs_alloc_vextent_start_ag(args);
+
error = xfs_alloc_vextent_check_args(args, target, &minimum_agno);
if (error) {
if (error == -ENOSPC)
xfs_agnumber_t start_agno;
int error;
+ ASSERT(args->pag == NULL);
+
args->agno = NULLAGNUMBER;
args->agbno = NULLAGBLOCK;
+
+ trace_xfs_alloc_vextent_first_ag(args);
+
error = xfs_alloc_vextent_check_args(args, target, &minimum_agno);
if (error) {
if (error == -ENOSPC)
xfs_agnumber_t minimum_agno;
int error;
+ ASSERT(args->pag != NULL);
+ ASSERT(args->pag->pag_agno == XFS_FSB_TO_AGNO(mp, target));
+
args->agno = XFS_FSB_TO_AGNO(mp, target);
args->agbno = XFS_FSB_TO_AGBNO(mp, target);
+
+ trace_xfs_alloc_vextent_exact_bno(args);
+
error = xfs_alloc_vextent_check_args(args, target, &minimum_agno);
if (error) {
if (error == -ENOSPC)
bool needs_perag = args->pag == NULL;
int error;
+ if (!needs_perag)
+ ASSERT(args->pag->pag_agno == XFS_FSB_TO_AGNO(mp, target));
+
args->agno = XFS_FSB_TO_AGNO(mp, target);
args->agbno = XFS_FSB_TO_AGBNO(mp, target);
+
+ trace_xfs_alloc_vextent_near_bno(args);
+
error = xfs_alloc_vextent_check_args(args, target, &minimum_agno);
if (error) {
if (error == -ENOSPC)
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2023 Oracle. All Rights Reserved.
+ * Author: Darrick J. Wong <djwong@kernel.org>
+ */
+#include "xfs.h"
+#include "xfs_fs.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_da_format.h"
+#include "xfs_da_btree.h"
+#include "xfs_dahash_test.h"
+
+/* 4096 random bytes */
+static uint8_t __initdata __attribute__((__aligned__(8))) test_buf[] =
+{
+ 0x5b, 0x85, 0x21, 0xcb, 0x09, 0x68, 0x7d, 0x30,
+ 0xc7, 0x69, 0xd7, 0x30, 0x92, 0xde, 0x59, 0xe4,
+ 0xc9, 0x6e, 0x8b, 0xdb, 0x98, 0x6b, 0xaa, 0x60,
+ 0xa8, 0xb5, 0xbc, 0x6c, 0xa9, 0xb1, 0x5b, 0x2c,
+ 0xea, 0xb4, 0x92, 0x6a, 0x3f, 0x79, 0x91, 0xe4,
+ 0xe9, 0x70, 0x51, 0x8c, 0x7f, 0x95, 0x6f, 0x1a,
+ 0x56, 0xa1, 0x5c, 0x27, 0x03, 0x67, 0x9f, 0x3a,
+ 0xe2, 0x31, 0x11, 0x29, 0x6b, 0x98, 0xfc, 0xc4,
+ 0x53, 0x24, 0xc5, 0x8b, 0xce, 0x47, 0xb2, 0xb9,
+ 0x32, 0xcb, 0xc1, 0xd0, 0x03, 0x57, 0x4e, 0xd4,
+ 0xe9, 0x3c, 0xa1, 0x63, 0xcf, 0x12, 0x0e, 0xca,
+ 0xe1, 0x13, 0xd1, 0x93, 0xa6, 0x88, 0x5c, 0x61,
+ 0x5b, 0xbb, 0xf0, 0x19, 0x46, 0xb4, 0xcf, 0x9e,
+ 0xb6, 0x6b, 0x4c, 0x3a, 0xcf, 0x60, 0xf9, 0x7a,
+ 0x8d, 0x07, 0x63, 0xdb, 0x40, 0xe9, 0x0b, 0x6f,
+ 0xad, 0x97, 0xf1, 0xed, 0xd0, 0x1e, 0x26, 0xfd,
+ 0xbf, 0xb7, 0xc8, 0x04, 0x94, 0xf8, 0x8b, 0x8c,
+ 0xf1, 0xab, 0x7a, 0xd4, 0xdd, 0xf3, 0xe8, 0x88,
+ 0xc3, 0xed, 0x17, 0x8a, 0x9b, 0x40, 0x0d, 0x53,
+ 0x62, 0x12, 0x03, 0x5f, 0x1b, 0x35, 0x32, 0x1f,
+ 0xb4, 0x7b, 0x93, 0x78, 0x0d, 0xdb, 0xce, 0xa4,
+ 0xc0, 0x47, 0xd5, 0xbf, 0x68, 0xe8, 0x5d, 0x74,
+ 0x8f, 0x8e, 0x75, 0x1c, 0xb2, 0x4f, 0x9a, 0x60,
+ 0xd1, 0xbe, 0x10, 0xf4, 0x5c, 0xa1, 0x53, 0x09,
+ 0xa5, 0xe0, 0x09, 0x54, 0x85, 0x5c, 0xdc, 0x07,
+ 0xe7, 0x21, 0x69, 0x7b, 0x8a, 0xfd, 0x90, 0xf1,
+ 0x22, 0xd0, 0xb4, 0x36, 0x28, 0xe6, 0xb8, 0x0f,
+ 0x39, 0xde, 0xc8, 0xf3, 0x86, 0x60, 0x34, 0xd2,
+ 0x5e, 0xdf, 0xfd, 0xcf, 0x0f, 0xa9, 0x65, 0xf0,
+ 0xd5, 0x4d, 0x96, 0x40, 0xe3, 0xdf, 0x3f, 0x95,
+ 0x5a, 0x39, 0x19, 0x93, 0xf4, 0x75, 0xce, 0x22,
+ 0x00, 0x1c, 0x93, 0xe2, 0x03, 0x66, 0xf4, 0x93,
+ 0x73, 0x86, 0x81, 0x8e, 0x29, 0x44, 0x48, 0x86,
+ 0x61, 0x7c, 0x48, 0xa3, 0x43, 0xd2, 0x9c, 0x8d,
+ 0xd4, 0x95, 0xdd, 0xe1, 0x22, 0x89, 0x3a, 0x40,
+ 0x4c, 0x1b, 0x8a, 0x04, 0xa8, 0x09, 0x69, 0x8b,
+ 0xea, 0xc6, 0x55, 0x8e, 0x57, 0xe6, 0x64, 0x35,
+ 0xf0, 0xc7, 0x16, 0x9f, 0x5d, 0x5e, 0x86, 0x40,
+ 0x46, 0xbb, 0xe5, 0x45, 0x88, 0xfe, 0xc9, 0x63,
+ 0x15, 0xfb, 0xf5, 0xbd, 0x71, 0x61, 0xeb, 0x7b,
+ 0x78, 0x70, 0x07, 0x31, 0x03, 0x9f, 0xb2, 0xc8,
+ 0xa7, 0xab, 0x47, 0xfd, 0xdf, 0xa0, 0x78, 0x72,
+ 0xa4, 0x2a, 0xe4, 0xb6, 0xba, 0xc0, 0x1e, 0x86,
+ 0x71, 0xe6, 0x3d, 0x18, 0x37, 0x70, 0xe6, 0xff,
+ 0xe0, 0xbc, 0x0b, 0x22, 0xa0, 0x1f, 0xd3, 0xed,
+ 0xa2, 0x55, 0x39, 0xab, 0xa8, 0x13, 0x73, 0x7c,
+ 0x3f, 0xb2, 0xd6, 0x19, 0xac, 0xff, 0x99, 0xed,
+ 0xe8, 0xe6, 0xa6, 0x22, 0xe3, 0x9c, 0xf1, 0x30,
+ 0xdc, 0x01, 0x0a, 0x56, 0xfa, 0xe4, 0xc9, 0x99,
+ 0xdd, 0xa8, 0xd8, 0xda, 0x35, 0x51, 0x73, 0xb4,
+ 0x40, 0x86, 0x85, 0xdb, 0x5c, 0xd5, 0x85, 0x80,
+ 0x14, 0x9c, 0xfd, 0x98, 0xa9, 0x82, 0xc5, 0x37,
+ 0xff, 0x32, 0x5d, 0xd0, 0x0b, 0xfa, 0xdc, 0x04,
+ 0x5e, 0x09, 0xd2, 0xca, 0x17, 0x4b, 0x1a, 0x8e,
+ 0x15, 0xe1, 0xcc, 0x4e, 0x52, 0x88, 0x35, 0xbd,
+ 0x48, 0xfe, 0x15, 0xa0, 0x91, 0xfd, 0x7e, 0x6c,
+ 0x0e, 0x5d, 0x79, 0x1b, 0x81, 0x79, 0xd2, 0x09,
+ 0x34, 0x70, 0x3d, 0x81, 0xec, 0xf6, 0x24, 0xbb,
+ 0xfb, 0xf1, 0x7b, 0xdf, 0x54, 0xea, 0x80, 0x9b,
+ 0xc7, 0x99, 0x9e, 0xbd, 0x16, 0x78, 0x12, 0x53,
+ 0x5e, 0x01, 0xa7, 0x4e, 0xbd, 0x67, 0xe1, 0x9b,
+ 0x4c, 0x0e, 0x61, 0x45, 0x97, 0xd2, 0xf0, 0x0f,
+ 0xfe, 0x15, 0x08, 0xb7, 0x11, 0x4c, 0xe7, 0xff,
+ 0x81, 0x53, 0xff, 0x91, 0x25, 0x38, 0x7e, 0x40,
+ 0x94, 0xe5, 0xe0, 0xad, 0xe6, 0xd9, 0x79, 0xb6,
+ 0x92, 0xc9, 0xfc, 0xde, 0xc3, 0x1a, 0x23, 0xbb,
+ 0xdd, 0xc8, 0x51, 0x0c, 0x3a, 0x72, 0xfa, 0x73,
+ 0x6f, 0xb7, 0xee, 0x61, 0x39, 0x03, 0x01, 0x3f,
+ 0x7f, 0x94, 0x2e, 0x2e, 0xba, 0x3a, 0xbb, 0xb4,
+ 0xfa, 0x6a, 0x17, 0xfe, 0xea, 0xef, 0x5e, 0x66,
+ 0x97, 0x3f, 0x32, 0x3d, 0xd7, 0x3e, 0xb1, 0xf1,
+ 0x6c, 0x14, 0x4c, 0xfd, 0x37, 0xd3, 0x38, 0x80,
+ 0xfb, 0xde, 0xa6, 0x24, 0x1e, 0xc8, 0xca, 0x7f,
+ 0x3a, 0x93, 0xd8, 0x8b, 0x18, 0x13, 0xb2, 0xe5,
+ 0xe4, 0x93, 0x05, 0x53, 0x4f, 0x84, 0x66, 0xa7,
+ 0x58, 0x5c, 0x7b, 0x86, 0x52, 0x6d, 0x0d, 0xce,
+ 0xa4, 0x30, 0x7d, 0xb6, 0x18, 0x9f, 0xeb, 0xff,
+ 0x22, 0xbb, 0x72, 0x29, 0xb9, 0x44, 0x0b, 0x48,
+ 0x1e, 0x84, 0x71, 0x81, 0xe3, 0x6d, 0x73, 0x26,
+ 0x92, 0xb4, 0x4d, 0x2a, 0x29, 0xb8, 0x1f, 0x72,
+ 0xed, 0xd0, 0xe1, 0x64, 0x77, 0xea, 0x8e, 0x88,
+ 0x0f, 0xef, 0x3f, 0xb1, 0x3b, 0xad, 0xf9, 0xc9,
+ 0x8b, 0xd0, 0xac, 0xc6, 0xcc, 0xa9, 0x40, 0xcc,
+ 0x76, 0xf6, 0x3b, 0x53, 0xb5, 0x88, 0xcb, 0xc8,
+ 0x37, 0xf1, 0xa2, 0xba, 0x23, 0x15, 0x99, 0x09,
+ 0xcc, 0xe7, 0x7a, 0x3b, 0x37, 0xf7, 0x58, 0xc8,
+ 0x46, 0x8c, 0x2b, 0x2f, 0x4e, 0x0e, 0xa6, 0x5c,
+ 0xea, 0x85, 0x55, 0xba, 0x02, 0x0e, 0x0e, 0x48,
+ 0xbc, 0xe1, 0xb1, 0x01, 0x35, 0x79, 0x13, 0x3d,
+ 0x1b, 0xc0, 0x53, 0x68, 0x11, 0xe7, 0x95, 0x0f,
+ 0x9d, 0x3f, 0x4c, 0x47, 0x7b, 0x4d, 0x1c, 0xae,
+ 0x50, 0x9b, 0xcb, 0xdd, 0x05, 0x8d, 0x9a, 0x97,
+ 0xfd, 0x8c, 0xef, 0x0c, 0x1d, 0x67, 0x73, 0xa8,
+ 0x28, 0x36, 0xd5, 0xb6, 0x92, 0x33, 0x40, 0x75,
+ 0x0b, 0x51, 0xc3, 0x64, 0xba, 0x1d, 0xc2, 0xcc,
+ 0xee, 0x7d, 0x54, 0x0f, 0x27, 0x69, 0xa7, 0x27,
+ 0x63, 0x30, 0x29, 0xd9, 0xc8, 0x84, 0xd8, 0xdf,
+ 0x9f, 0x68, 0x8d, 0x04, 0xca, 0xa6, 0xc5, 0xc7,
+ 0x7a, 0x5c, 0xc8, 0xd1, 0xcb, 0x4a, 0xec, 0xd0,
+ 0xd8, 0x20, 0x69, 0xc5, 0x17, 0xcd, 0x78, 0xc8,
+ 0x75, 0x23, 0x30, 0x69, 0xc9, 0xd4, 0xea, 0x5c,
+ 0x4f, 0x6b, 0x86, 0x3f, 0x8b, 0xfe, 0xee, 0x44,
+ 0xc9, 0x7c, 0xb7, 0xdd, 0x3e, 0xe5, 0xec, 0x54,
+ 0x03, 0x3e, 0xaa, 0x82, 0xc6, 0xdf, 0xb2, 0x38,
+ 0x0e, 0x5d, 0xb3, 0x88, 0xd9, 0xd3, 0x69, 0x5f,
+ 0x8f, 0x70, 0x8a, 0x7e, 0x11, 0xd9, 0x1e, 0x7b,
+ 0x38, 0xf1, 0x42, 0x1a, 0xc0, 0x35, 0xf5, 0xc7,
+ 0x36, 0x85, 0xf5, 0xf7, 0xb8, 0x7e, 0xc7, 0xef,
+ 0x18, 0xf1, 0x63, 0xd6, 0x7a, 0xc6, 0xc9, 0x0e,
+ 0x4d, 0x69, 0x4f, 0x84, 0xef, 0x26, 0x41, 0x0c,
+ 0xec, 0xc7, 0xe0, 0x7e, 0x3c, 0x67, 0x01, 0x4c,
+ 0x62, 0x1a, 0x20, 0x6f, 0xee, 0x47, 0x4d, 0xc0,
+ 0x99, 0x13, 0x8d, 0x91, 0x4a, 0x26, 0xd4, 0x37,
+ 0x28, 0x90, 0x58, 0x75, 0x66, 0x2b, 0x0a, 0xdf,
+ 0xda, 0xee, 0x92, 0x25, 0x90, 0x62, 0x39, 0x9e,
+ 0x44, 0x98, 0xad, 0xc1, 0x88, 0xed, 0xe4, 0xb4,
+ 0xaf, 0xf5, 0x8c, 0x9b, 0x48, 0x4d, 0x56, 0x60,
+ 0x97, 0x0f, 0x61, 0x59, 0x9e, 0xa6, 0x27, 0xfe,
+ 0xc1, 0x91, 0x15, 0x38, 0xb8, 0x0f, 0xae, 0x61,
+ 0x7d, 0x26, 0x13, 0x5a, 0x73, 0xff, 0x1c, 0xa3,
+ 0x61, 0x04, 0x58, 0x48, 0x55, 0x44, 0x11, 0xfe,
+ 0x15, 0xca, 0xc3, 0xbd, 0xca, 0xc5, 0xb4, 0x40,
+ 0x5d, 0x1b, 0x7f, 0x39, 0xb5, 0x9c, 0x35, 0xec,
+ 0x61, 0x15, 0x32, 0x32, 0xb8, 0x4e, 0x40, 0x9f,
+ 0x17, 0x1f, 0x0a, 0x4d, 0xa9, 0x91, 0xef, 0xb7,
+ 0xb0, 0xeb, 0xc2, 0x83, 0x9a, 0x6c, 0xd2, 0x79,
+ 0x43, 0x78, 0x5e, 0x2f, 0xe5, 0xdd, 0x1a, 0x3c,
+ 0x45, 0xab, 0x29, 0x40, 0x3a, 0x37, 0x5b, 0x6f,
+ 0xd7, 0xfc, 0x48, 0x64, 0x3c, 0x49, 0xfb, 0x21,
+ 0xbe, 0xc3, 0xff, 0x07, 0xfb, 0x17, 0xe9, 0xc9,
+ 0x0c, 0x4c, 0x5c, 0x15, 0x9e, 0x8e, 0x22, 0x30,
+ 0x0a, 0xde, 0x48, 0x7f, 0xdb, 0x0d, 0xd1, 0x2b,
+ 0x87, 0x38, 0x9e, 0xcc, 0x5a, 0x01, 0x16, 0xee,
+ 0x75, 0x49, 0x0d, 0x30, 0x01, 0x34, 0x6a, 0xb6,
+ 0x9a, 0x5a, 0x2a, 0xec, 0xbb, 0x48, 0xac, 0xd3,
+ 0x77, 0x83, 0xd8, 0x08, 0x86, 0x4f, 0x48, 0x09,
+ 0x29, 0x41, 0x79, 0xa1, 0x03, 0x12, 0xc4, 0xcd,
+ 0x90, 0x55, 0x47, 0x66, 0x74, 0x9a, 0xcc, 0x4f,
+ 0x35, 0x8c, 0xd6, 0x98, 0xef, 0xeb, 0x45, 0xb9,
+ 0x9a, 0x26, 0x2f, 0x39, 0xa5, 0x70, 0x6d, 0xfc,
+ 0xb4, 0x51, 0xee, 0xf4, 0x9c, 0xe7, 0x38, 0x59,
+ 0xad, 0xf4, 0xbc, 0x46, 0xff, 0x46, 0x8e, 0x60,
+ 0x9c, 0xa3, 0x60, 0x1d, 0xf8, 0x26, 0x72, 0xf5,
+ 0x72, 0x9d, 0x68, 0x80, 0x04, 0xf6, 0x0b, 0xa1,
+ 0x0a, 0xd5, 0xa7, 0x82, 0x3a, 0x3e, 0x47, 0xa8,
+ 0x5a, 0xde, 0x59, 0x4f, 0x7b, 0x07, 0xb3, 0xe9,
+ 0x24, 0x19, 0x3d, 0x34, 0x05, 0xec, 0xf1, 0xab,
+ 0x6e, 0x64, 0x8f, 0xd3, 0xe6, 0x41, 0x86, 0x80,
+ 0x70, 0xe3, 0x8d, 0x60, 0x9c, 0x34, 0x25, 0x01,
+ 0x07, 0x4d, 0x19, 0x41, 0x4e, 0x3d, 0x5c, 0x7e,
+ 0xa8, 0xf5, 0xcc, 0xd5, 0x7b, 0xe2, 0x7d, 0x3d,
+ 0x49, 0x86, 0x7d, 0x07, 0xb7, 0x10, 0xe3, 0x35,
+ 0xb8, 0x84, 0x6d, 0x76, 0xab, 0x17, 0xc6, 0x38,
+ 0xb4, 0xd3, 0x28, 0x57, 0xad, 0xd3, 0x88, 0x5a,
+ 0xda, 0xea, 0xc8, 0x94, 0xcc, 0x37, 0x19, 0xac,
+ 0x9c, 0x9f, 0x4b, 0x00, 0x15, 0xc0, 0xc8, 0xca,
+ 0x1f, 0x15, 0xaa, 0xe0, 0xdb, 0xf9, 0x2f, 0x57,
+ 0x1b, 0x24, 0xc7, 0x6f, 0x76, 0x29, 0xfb, 0xed,
+ 0x25, 0x0d, 0xc0, 0xfe, 0xbd, 0x5a, 0xbf, 0x20,
+ 0x08, 0x51, 0x05, 0xec, 0x71, 0xa3, 0xbf, 0xef,
+ 0x5e, 0x99, 0x75, 0xdb, 0x3c, 0x5f, 0x9a, 0x8c,
+ 0xbb, 0x19, 0x5c, 0x0e, 0x93, 0x19, 0xf8, 0x6a,
+ 0xbc, 0xf2, 0x12, 0x54, 0x2f, 0xcb, 0x28, 0x64,
+ 0x88, 0xb3, 0x92, 0x0d, 0x96, 0xd1, 0xa6, 0xe4,
+ 0x1f, 0xf1, 0x4d, 0xa4, 0xab, 0x1c, 0xee, 0x54,
+ 0xf2, 0xad, 0x29, 0x6d, 0x32, 0x37, 0xb2, 0x16,
+ 0x77, 0x5c, 0xdc, 0x2e, 0x54, 0xec, 0x75, 0x26,
+ 0xc6, 0x36, 0xd9, 0x17, 0x2c, 0xf1, 0x7a, 0xdc,
+ 0x4b, 0xf1, 0xe2, 0xd9, 0x95, 0xba, 0xac, 0x87,
+ 0xc1, 0xf3, 0x8e, 0x58, 0x08, 0xd8, 0x87, 0x60,
+ 0xc9, 0xee, 0x6a, 0xde, 0xa4, 0xd2, 0xfc, 0x0d,
+ 0xe5, 0x36, 0xc4, 0x5c, 0x52, 0xb3, 0x07, 0x54,
+ 0x65, 0x24, 0xc1, 0xb1, 0xd1, 0xb1, 0x53, 0x13,
+ 0x31, 0x79, 0x7f, 0x05, 0x76, 0xeb, 0x37, 0x59,
+ 0x15, 0x2b, 0xd1, 0x3f, 0xac, 0x08, 0x97, 0xeb,
+ 0x91, 0x98, 0xdf, 0x6c, 0x09, 0x0d, 0x04, 0x9f,
+ 0xdc, 0x3b, 0x0e, 0x60, 0x68, 0x47, 0x23, 0x15,
+ 0x16, 0xc6, 0x0b, 0x35, 0xf8, 0x77, 0xa2, 0x78,
+ 0x50, 0xd4, 0x64, 0x22, 0x33, 0xff, 0xfb, 0x93,
+ 0x71, 0x46, 0x50, 0x39, 0x1b, 0x9c, 0xea, 0x4e,
+ 0x8d, 0x0c, 0x37, 0xe5, 0x5c, 0x51, 0x3a, 0x31,
+ 0xb2, 0x85, 0x84, 0x3f, 0x41, 0xee, 0xa2, 0xc1,
+ 0xc6, 0x13, 0x3b, 0x54, 0x28, 0xd2, 0x18, 0x37,
+ 0xcc, 0x46, 0x9f, 0x6a, 0x91, 0x3d, 0x5a, 0x15,
+ 0x3c, 0x89, 0xa3, 0x61, 0x06, 0x7d, 0x2e, 0x78,
+ 0xbe, 0x7d, 0x40, 0xba, 0x2f, 0x95, 0xb1, 0x2f,
+ 0x87, 0x3b, 0x8a, 0xbe, 0x6a, 0xf4, 0xc2, 0x31,
+ 0x74, 0xee, 0x91, 0xe0, 0x23, 0xaa, 0x5d, 0x7f,
+ 0xdd, 0xf0, 0x44, 0x8c, 0x0b, 0x59, 0x2b, 0xfc,
+ 0x48, 0x3a, 0xdf, 0x07, 0x05, 0x38, 0x6c, 0xc9,
+ 0xeb, 0x18, 0x24, 0x68, 0x8d, 0x58, 0x98, 0xd3,
+ 0x31, 0xa3, 0xe4, 0x70, 0x59, 0xb1, 0x21, 0xbe,
+ 0x7e, 0x65, 0x7d, 0xb8, 0x04, 0xab, 0xf6, 0xe4,
+ 0xd7, 0xda, 0xec, 0x09, 0x8f, 0xda, 0x6d, 0x24,
+ 0x07, 0xcc, 0x29, 0x17, 0x05, 0x78, 0x1a, 0xc1,
+ 0xb1, 0xce, 0xfc, 0xaa, 0x2d, 0xe7, 0xcc, 0x85,
+ 0x84, 0x84, 0x03, 0x2a, 0x0c, 0x3f, 0xa9, 0xf8,
+ 0xfd, 0x84, 0x53, 0x59, 0x5c, 0xf0, 0xd4, 0x09,
+ 0xf0, 0xd2, 0x6c, 0x32, 0x03, 0xb0, 0xa0, 0x8c,
+ 0x52, 0xeb, 0x23, 0x91, 0x88, 0x43, 0x13, 0x46,
+ 0xf6, 0x1e, 0xb4, 0x1b, 0xf5, 0x8e, 0x3a, 0xb5,
+ 0x3d, 0x00, 0xf6, 0xe5, 0x08, 0x3d, 0x5f, 0x39,
+ 0xd3, 0x21, 0x69, 0xbc, 0x03, 0x22, 0x3a, 0xd2,
+ 0x5c, 0x84, 0xf8, 0x15, 0xc4, 0x80, 0x0b, 0xbc,
+ 0x29, 0x3c, 0xf3, 0x95, 0x98, 0xcd, 0x8f, 0x35,
+ 0xbc, 0xa5, 0x3e, 0xfc, 0xd4, 0x13, 0x9e, 0xde,
+ 0x4f, 0xce, 0x71, 0x9d, 0x09, 0xad, 0xf2, 0x80,
+ 0x6b, 0x65, 0x7f, 0x03, 0x00, 0x14, 0x7c, 0x15,
+ 0x85, 0x40, 0x6d, 0x70, 0xea, 0xdc, 0xb3, 0x63,
+ 0x35, 0x4f, 0x4d, 0xe0, 0xd9, 0xd5, 0x3c, 0x58,
+ 0x56, 0x23, 0x80, 0xe2, 0x36, 0xdd, 0x75, 0x1d,
+ 0x94, 0x11, 0x41, 0x8e, 0xe0, 0x81, 0x8e, 0xcf,
+ 0xe0, 0xe5, 0xf6, 0xde, 0xd1, 0xe7, 0x04, 0x12,
+ 0x79, 0x92, 0x2b, 0x71, 0x2a, 0x79, 0x8b, 0x7c,
+ 0x44, 0x79, 0x16, 0x30, 0x4e, 0xf4, 0xf6, 0x9b,
+ 0xb7, 0x40, 0xa3, 0x5a, 0xa7, 0x69, 0x3e, 0xc1,
+ 0x3a, 0x04, 0xd0, 0x88, 0xa0, 0x3b, 0xdd, 0xc6,
+ 0x9e, 0x7e, 0x1e, 0x1e, 0x8f, 0x44, 0xf7, 0x73,
+ 0x67, 0x1e, 0x1a, 0x78, 0xfa, 0x62, 0xf4, 0xa9,
+ 0xa8, 0xc6, 0x5b, 0xb8, 0xfa, 0x06, 0x7d, 0x5e,
+ 0x38, 0x1c, 0x9a, 0x39, 0xe9, 0x39, 0x98, 0x22,
+ 0x0b, 0xa7, 0xac, 0x0b, 0xf3, 0xbc, 0xf1, 0xeb,
+ 0x8c, 0x81, 0xe3, 0x48, 0x8a, 0xed, 0x42, 0xc2,
+ 0x38, 0xcf, 0x3e, 0xda, 0xd2, 0x89, 0x8d, 0x9c,
+ 0x53, 0xb5, 0x2f, 0x41, 0x01, 0x26, 0x84, 0x9c,
+ 0xa3, 0x56, 0xf6, 0x49, 0xc7, 0xd4, 0x9f, 0x93,
+ 0x1b, 0x96, 0x49, 0x5e, 0xad, 0xb3, 0x84, 0x1f,
+ 0x3c, 0xa4, 0xe0, 0x9b, 0xd1, 0x90, 0xbc, 0x38,
+ 0x6c, 0xdd, 0x95, 0x4d, 0x9d, 0xb1, 0x71, 0x57,
+ 0x2d, 0x34, 0xe8, 0xb8, 0x42, 0xc7, 0x99, 0x03,
+ 0xc7, 0x07, 0x30, 0x65, 0x91, 0x55, 0xd5, 0x90,
+ 0x70, 0x97, 0x37, 0x68, 0xd4, 0x11, 0xf9, 0xe8,
+ 0xce, 0xec, 0xdc, 0x34, 0xd5, 0xd3, 0xb7, 0xc4,
+ 0xb8, 0x97, 0x05, 0x92, 0xad, 0xf8, 0xe2, 0x36,
+ 0x64, 0x41, 0xc9, 0xc5, 0x41, 0x77, 0x52, 0xd7,
+ 0x2c, 0xa5, 0x24, 0x2f, 0xd9, 0x34, 0x0b, 0x47,
+ 0x35, 0xa7, 0x28, 0x8b, 0xc5, 0xcd, 0xe9, 0x46,
+ 0xac, 0x39, 0x94, 0x3c, 0x10, 0xc6, 0x29, 0x73,
+ 0x0e, 0x0e, 0x5d, 0xe0, 0x71, 0x03, 0x8a, 0x72,
+ 0x0e, 0x26, 0xb0, 0x7d, 0x84, 0xed, 0x95, 0x23,
+ 0x49, 0x5a, 0x45, 0x83, 0x45, 0x60, 0x11, 0x4a,
+ 0x46, 0x31, 0xd4, 0xd8, 0x16, 0x54, 0x98, 0x58,
+ 0xed, 0x6d, 0xcc, 0x5d, 0xd6, 0x50, 0x61, 0x9f,
+ 0x9d, 0xc5, 0x3e, 0x9d, 0x32, 0x47, 0xde, 0x96,
+ 0xe1, 0x5d, 0xd8, 0xf8, 0xb4, 0x69, 0x6f, 0xb9,
+ 0x15, 0x90, 0x57, 0x7a, 0xf6, 0xad, 0xb0, 0x5b,
+ 0xf5, 0xa6, 0x36, 0x94, 0xfd, 0x84, 0xce, 0x1c,
+ 0x0f, 0x4b, 0xd0, 0xc2, 0x5b, 0x6b, 0x56, 0xef,
+ 0x73, 0x93, 0x0b, 0xc3, 0xee, 0xd9, 0xcf, 0xd3,
+ 0xa4, 0x22, 0x58, 0xcd, 0x50, 0x6e, 0x65, 0xf4,
+ 0xe9, 0xb7, 0x71, 0xaf, 0x4b, 0xb3, 0xb6, 0x2f,
+ 0x0f, 0x0e, 0x3b, 0xc9, 0x85, 0x14, 0xf5, 0x17,
+ 0xe8, 0x7a, 0x3a, 0xbf, 0x5f, 0x5e, 0xf8, 0x18,
+ 0x48, 0xa6, 0x72, 0xab, 0x06, 0x95, 0xe9, 0xc8,
+ 0xa7, 0xf4, 0x32, 0x44, 0x04, 0x0c, 0x84, 0x98,
+ 0x73, 0xe3, 0x89, 0x8d, 0x5f, 0x7e, 0x4a, 0x42,
+ 0x8f, 0xc5, 0x28, 0xb1, 0x82, 0xef, 0x1c, 0x97,
+ 0x31, 0x3b, 0x4d, 0xe0, 0x0e, 0x10, 0x10, 0x97,
+ 0x93, 0x49, 0x78, 0x2f, 0x0d, 0x86, 0x8b, 0xa1,
+ 0x53, 0xa9, 0x81, 0x20, 0x79, 0xe7, 0x07, 0x77,
+ 0xb6, 0xac, 0x5e, 0xd2, 0x05, 0xcd, 0xe9, 0xdb,
+ 0x8a, 0x94, 0x82, 0x8a, 0x23, 0xb9, 0x3d, 0x1c,
+ 0xa9, 0x7d, 0x72, 0x4a, 0xed, 0x33, 0xa3, 0xdb,
+ 0x21, 0xa7, 0x86, 0x33, 0x45, 0xa5, 0xaa, 0x56,
+ 0x45, 0xb5, 0x83, 0x29, 0x40, 0x47, 0x79, 0x04,
+ 0x6e, 0xb9, 0x95, 0xd0, 0x81, 0x77, 0x2d, 0x48,
+ 0x1e, 0xfe, 0xc3, 0xc2, 0x1e, 0xe5, 0xf2, 0xbe,
+ 0xfd, 0x3b, 0x94, 0x9f, 0xc4, 0xc4, 0x26, 0x9d,
+ 0xe4, 0x66, 0x1e, 0x19, 0xee, 0x6c, 0x79, 0x97,
+ 0x11, 0x31, 0x4b, 0x0d, 0x01, 0xcb, 0xde, 0xa8,
+ 0xf6, 0x6d, 0x7c, 0x39, 0x46, 0x4e, 0x7e, 0x3f,
+ 0x94, 0x17, 0xdf, 0xa1, 0x7d, 0xd9, 0x1c, 0x8e,
+ 0xbc, 0x7d, 0x33, 0x7d, 0xe3, 0x12, 0x40, 0xca,
+ 0xab, 0x37, 0x11, 0x46, 0xd4, 0xae, 0xef, 0x44,
+ 0xa2, 0xb3, 0x6a, 0x66, 0x0e, 0x0c, 0x90, 0x7f,
+ 0xdf, 0x5c, 0x66, 0x5f, 0xf2, 0x94, 0x9f, 0xa6,
+ 0x73, 0x4f, 0xeb, 0x0d, 0xad, 0xbf, 0xc0, 0x63,
+ 0x5c, 0xdc, 0x46, 0x51, 0xe8, 0x8e, 0x90, 0x19,
+ 0xa8, 0xa4, 0x3c, 0x91, 0x79, 0xfa, 0x7e, 0x58,
+ 0x85, 0x13, 0x55, 0xc5, 0x19, 0x82, 0x37, 0x1b,
+ 0x0a, 0x02, 0x1f, 0x99, 0x6b, 0x18, 0xf1, 0x28,
+ 0x08, 0xa2, 0x73, 0xb8, 0x0f, 0x2e, 0xcd, 0xbf,
+ 0xf3, 0x86, 0x7f, 0xea, 0xef, 0xd0, 0xbb, 0xa6,
+ 0x21, 0xdf, 0x49, 0x73, 0x51, 0xcc, 0x36, 0xd3,
+ 0x3e, 0xa0, 0xf8, 0x44, 0xdf, 0xd3, 0xa6, 0xbe,
+ 0x8a, 0xd4, 0x57, 0xdd, 0x72, 0x94, 0x61, 0x0f,
+ 0x82, 0xd1, 0x07, 0xb8, 0x7c, 0x18, 0x83, 0xdf,
+ 0x3a, 0xe5, 0x50, 0x6a, 0x82, 0x20, 0xac, 0xa9,
+ 0xa8, 0xff, 0xd9, 0xf3, 0x77, 0x33, 0x5a, 0x9e,
+ 0x7f, 0x6d, 0xfe, 0x5d, 0x33, 0x41, 0x42, 0xe7,
+ 0x6c, 0x19, 0xe0, 0x44, 0x8a, 0x15, 0xf6, 0x70,
+ 0x98, 0xb7, 0x68, 0x4d, 0xfa, 0x97, 0x39, 0xb0,
+ 0x8e, 0xe8, 0x84, 0x8b, 0x75, 0x30, 0xb7, 0x7d,
+ 0x92, 0x69, 0x20, 0x9c, 0x81, 0xfb, 0x4b, 0xf4,
+ 0x01, 0x50, 0xeb, 0xce, 0x0c, 0x1c, 0x6c, 0xb5,
+ 0x4a, 0xd7, 0x27, 0x0c, 0xce, 0xbb, 0xe5, 0x85,
+ 0xf0, 0xb6, 0xee, 0xd5, 0x70, 0xdd, 0x3b, 0xfc,
+ 0xd4, 0x99, 0xf1, 0x33, 0xdd, 0x8b, 0xc4, 0x2f,
+ 0xae, 0xab, 0x74, 0x96, 0x32, 0xc7, 0x4c, 0x56,
+ 0x3c, 0x89, 0x0f, 0x96, 0x0b, 0x42, 0xc0, 0xcb,
+ 0xee, 0x0f, 0x0b, 0x8c, 0xfb, 0x7e, 0x47, 0x7b,
+ 0x64, 0x48, 0xfd, 0xb2, 0x00, 0x80, 0x89, 0xa5,
+ 0x13, 0x55, 0x62, 0xfc, 0x8f, 0xe2, 0x42, 0x03,
+ 0xb7, 0x4e, 0x2a, 0x79, 0xb4, 0x82, 0xea, 0x23,
+ 0x49, 0xda, 0xaf, 0x52, 0x63, 0x1e, 0x60, 0x03,
+ 0x89, 0x06, 0x44, 0x46, 0x08, 0xc3, 0xc4, 0x87,
+ 0x70, 0x2e, 0xda, 0x94, 0xad, 0x6b, 0xe0, 0xe4,
+ 0xd1, 0x8a, 0x06, 0xc2, 0xa8, 0xc0, 0xa7, 0x43,
+ 0x3c, 0x47, 0x52, 0x0e, 0xc3, 0x77, 0x81, 0x11,
+ 0x67, 0x0e, 0xa0, 0x70, 0x04, 0x47, 0x29, 0x40,
+ 0x86, 0x0d, 0x34, 0x56, 0xa7, 0xc9, 0x35, 0x59,
+ 0x68, 0xdc, 0x93, 0x81, 0x70, 0xee, 0x86, 0xd9,
+ 0x80, 0x06, 0x40, 0x4f, 0x1a, 0x0d, 0x40, 0x30,
+ 0x0b, 0xcb, 0x96, 0x47, 0xc1, 0xb7, 0x52, 0xfd,
+ 0x56, 0xe0, 0x72, 0x4b, 0xfb, 0xbd, 0x92, 0x45,
+ 0x61, 0x71, 0xc2, 0x33, 0x11, 0xbf, 0x52, 0x83,
+ 0x79, 0x26, 0xe0, 0x49, 0x6b, 0xb7, 0x05, 0x8b,
+ 0xe8, 0x0e, 0x87, 0x31, 0xd7, 0x9d, 0x8a, 0xf5,
+ 0xc0, 0x5f, 0x2e, 0x58, 0x4a, 0xdb, 0x11, 0xb3,
+ 0x6c, 0x30, 0x2a, 0x46, 0x19, 0xe3, 0x27, 0x84,
+ 0x1f, 0x63, 0x6e, 0xf6, 0x57, 0xc7, 0xc9, 0xd8,
+ 0x5e, 0xba, 0xb3, 0x87, 0xd5, 0x83, 0x26, 0x34,
+ 0x21, 0x9e, 0x65, 0xde, 0x42, 0xd3, 0xbe, 0x7b,
+ 0xbc, 0x91, 0x71, 0x44, 0x4d, 0x99, 0x3b, 0x31,
+ 0xe5, 0x3f, 0x11, 0x4e, 0x7f, 0x13, 0x51, 0x3b,
+ 0xae, 0x79, 0xc9, 0xd3, 0x81, 0x8e, 0x25, 0x40,
+ 0x10, 0xfc, 0x07, 0x1e, 0xf9, 0x7b, 0x9a, 0x4b,
+ 0x6c, 0xe3, 0xb3, 0xad, 0x1a, 0x0a, 0xdd, 0x9e,
+ 0x59, 0x0c, 0xa2, 0xcd, 0xae, 0x48, 0x4a, 0x38,
+ 0x5b, 0x47, 0x41, 0x94, 0x65, 0x6b, 0xbb, 0xeb,
+ 0x5b, 0xe3, 0xaf, 0x07, 0x5b, 0xd4, 0x4a, 0xa2,
+ 0xc9, 0x5d, 0x2f, 0x64, 0x03, 0xd7, 0x3a, 0x2c,
+ 0x6e, 0xce, 0x76, 0x95, 0xb4, 0xb3, 0xc0, 0xf1,
+ 0xe2, 0x45, 0x73, 0x7a, 0x5c, 0xab, 0xc1, 0xfc,
+ 0x02, 0x8d, 0x81, 0x29, 0xb3, 0xac, 0x07, 0xec,
+ 0x40, 0x7d, 0x45, 0xd9, 0x7a, 0x59, 0xee, 0x34,
+ 0xf0, 0xe9, 0xd5, 0x7b, 0x96, 0xb1, 0x3d, 0x95,
+ 0xcc, 0x86, 0xb5, 0xb6, 0x04, 0x2d, 0xb5, 0x92,
+ 0x7e, 0x76, 0xf4, 0x06, 0xa9, 0xa3, 0x12, 0x0f,
+ 0xb1, 0xaf, 0x26, 0xba, 0x7c, 0xfc, 0x7e, 0x1c,
+ 0xbc, 0x2c, 0x49, 0x97, 0x53, 0x60, 0x13, 0x0b,
+ 0xa6, 0x61, 0x83, 0x89, 0x42, 0xd4, 0x17, 0x0c,
+ 0x6c, 0x26, 0x52, 0xc3, 0xb3, 0xd4, 0x67, 0xf5,
+ 0xe3, 0x04, 0xb7, 0xf4, 0xcb, 0x80, 0xb8, 0xcb,
+ 0x77, 0x56, 0x3e, 0xaa, 0x57, 0x54, 0xee, 0xb4,
+ 0x2c, 0x67, 0xcf, 0xf2, 0xdc, 0xbe, 0x55, 0xf9,
+ 0x43, 0x1f, 0x6e, 0x22, 0x97, 0x67, 0x7f, 0xc4,
+ 0xef, 0xb1, 0x26, 0x31, 0x1e, 0x27, 0xdf, 0x41,
+ 0x80, 0x47, 0x6c, 0xe2, 0xfa, 0xa9, 0x8c, 0x2a,
+ 0xf6, 0xf2, 0xab, 0xf0, 0x15, 0xda, 0x6c, 0xc8,
+ 0xfe, 0xb5, 0x23, 0xde, 0xa9, 0x05, 0x3f, 0x06,
+ 0x54, 0x4c, 0xcd, 0xe1, 0xab, 0xfc, 0x0e, 0x62,
+ 0x33, 0x31, 0x73, 0x2c, 0x76, 0xcb, 0xb4, 0x47,
+ 0x1e, 0x20, 0xad, 0xd8, 0xf2, 0x31, 0xdd, 0xc4,
+ 0x8b, 0x0c, 0x77, 0xbe, 0xe1, 0x8b, 0x26, 0x00,
+ 0x02, 0x58, 0xd6, 0x8d, 0xef, 0xad, 0x74, 0x67,
+ 0xab, 0x3f, 0xef, 0xcb, 0x6f, 0xb0, 0xcc, 0x81,
+ 0x44, 0x4c, 0xaf, 0xe9, 0x49, 0x4f, 0xdb, 0xa0,
+ 0x25, 0xa4, 0xf0, 0x89, 0xf1, 0xbe, 0xd8, 0x10,
+ 0xff, 0xb1, 0x3b, 0x4b, 0xfa, 0x98, 0xf5, 0x79,
+ 0x6d, 0x1e, 0x69, 0x4d, 0x57, 0xb1, 0xc8, 0x19,
+ 0x1b, 0xbd, 0x1e, 0x8c, 0x84, 0xb7, 0x7b, 0xe8,
+ 0xd2, 0x2d, 0x09, 0x41, 0x41, 0x37, 0x3d, 0xb1,
+ 0x6f, 0x26, 0x5d, 0x71, 0x16, 0x3d, 0xb7, 0x83,
+ 0x27, 0x2c, 0xa7, 0xb6, 0x50, 0xbd, 0x91, 0x86,
+ 0xab, 0x24, 0xa1, 0x38, 0xfd, 0xea, 0x71, 0x55,
+ 0x7e, 0x9a, 0x07, 0x77, 0x4b, 0xfa, 0x61, 0x66,
+ 0x20, 0x1e, 0x28, 0x95, 0x18, 0x1b, 0xa4, 0xa0,
+ 0xfd, 0xc0, 0x89, 0x72, 0x43, 0xd9, 0x3b, 0x49,
+ 0x5a, 0x3f, 0x9d, 0xbf, 0xdb, 0xb4, 0x46, 0xea,
+ 0x42, 0x01, 0x77, 0x23, 0x68, 0x95, 0xb6, 0x24,
+ 0xb3, 0xa8, 0x6c, 0x28, 0x3b, 0x11, 0x40, 0x7e,
+ 0x18, 0x65, 0x6d, 0xd8, 0x24, 0x42, 0x7d, 0x88,
+ 0xc0, 0x52, 0xd9, 0x05, 0xe4, 0x95, 0x90, 0x87,
+ 0x8c, 0xf4, 0xd0, 0x6b, 0xb9, 0x83, 0x99, 0x34,
+ 0x6d, 0xfe, 0x54, 0x40, 0x94, 0x52, 0x21, 0x4f,
+ 0x14, 0x25, 0xc5, 0xd6, 0x5e, 0x95, 0xdc, 0x0a,
+ 0x2b, 0x89, 0x20, 0x11, 0x84, 0x48, 0xd6, 0x3a,
+ 0xcd, 0x5c, 0x24, 0xad, 0x62, 0xe3, 0xb1, 0x93,
+ 0x25, 0x8d, 0xcd, 0x7e, 0xfc, 0x27, 0xa3, 0x37,
+ 0xfd, 0x84, 0xfc, 0x1b, 0xb2, 0xf1, 0x27, 0x38,
+ 0x5a, 0xb7, 0xfc, 0xf2, 0xfa, 0x95, 0x66, 0xd4,
+ 0xfb, 0xba, 0xa7, 0xd7, 0xa3, 0x72, 0x69, 0x48,
+ 0x48, 0x8c, 0xeb, 0x28, 0x89, 0xfe, 0x33, 0x65,
+ 0x5a, 0x36, 0x01, 0x7e, 0x06, 0x79, 0x0a, 0x09,
+ 0x3b, 0x74, 0x11, 0x9a, 0x6e, 0xbf, 0xd4, 0x9e,
+ 0x58, 0x90, 0x49, 0x4f, 0x4d, 0x08, 0xd4, 0xe5,
+ 0x4a, 0x09, 0x21, 0xef, 0x8b, 0xb8, 0x74, 0x3b,
+ 0x91, 0xdd, 0x36, 0x85, 0x60, 0x2d, 0xfa, 0xd4,
+ 0x45, 0x7b, 0x45, 0x53, 0xf5, 0x47, 0x87, 0x7e,
+ 0xa6, 0x37, 0xc8, 0x78, 0x7a, 0x68, 0x9d, 0x8d,
+ 0x65, 0x2c, 0x0e, 0x91, 0x5c, 0xa2, 0x60, 0xf0,
+ 0x8e, 0x3f, 0xe9, 0x1a, 0xcd, 0xaa, 0xe7, 0xd5,
+ 0x77, 0x18, 0xaf, 0xc9, 0xbc, 0x18, 0xea, 0x48,
+ 0x1b, 0xfb, 0x22, 0x48, 0x70, 0x16, 0x29, 0x9e,
+ 0x5b, 0xc1, 0x2c, 0x66, 0x23, 0xbc, 0xf0, 0x1f,
+ 0xef, 0xaf, 0xe4, 0xd6, 0x04, 0x19, 0x82, 0x7a,
+ 0x0b, 0xba, 0x4b, 0x46, 0xb1, 0x6a, 0x85, 0x5d,
+ 0xb4, 0x73, 0xd6, 0x21, 0xa1, 0x71, 0x60, 0x14,
+ 0xee, 0x0a, 0x77, 0xc4, 0x66, 0x2e, 0xf9, 0x69,
+ 0x30, 0xaf, 0x41, 0x0b, 0xc8, 0x83, 0x3c, 0x53,
+ 0x99, 0x19, 0x27, 0x46, 0xf7, 0x41, 0x6e, 0x56,
+ 0xdc, 0x94, 0x28, 0x67, 0x4e, 0xb7, 0x25, 0x48,
+ 0x8a, 0xc2, 0xe0, 0x60, 0x96, 0xcc, 0x18, 0xf4,
+ 0x84, 0xdd, 0xa7, 0x5e, 0x3e, 0x05, 0x0b, 0x26,
+ 0x26, 0xb2, 0x5c, 0x1f, 0x57, 0x1a, 0x04, 0x7e,
+ 0x6a, 0xe3, 0x2f, 0xb4, 0x35, 0xb6, 0x38, 0x40,
+ 0x40, 0xcd, 0x6f, 0x87, 0x2e, 0xef, 0xa3, 0xd7,
+ 0xa9, 0xc2, 0xe8, 0x0d, 0x27, 0xdf, 0x44, 0x62,
+ 0x99, 0xa0, 0xfc, 0xcf, 0x81, 0x78, 0xcb, 0xfe,
+ 0xe5, 0xa0, 0x03, 0x4e, 0x6c, 0xd7, 0xf4, 0xaf,
+ 0x7a, 0xbb, 0x61, 0x82, 0xfe, 0x71, 0x89, 0xb2,
+ 0x22, 0x7c, 0x8e, 0x83, 0x04, 0xce, 0xf6, 0x5d,
+ 0x84, 0x8f, 0x95, 0x6a, 0x7f, 0xad, 0xfd, 0x32,
+ 0x9c, 0x5e, 0xe4, 0x9c, 0x89, 0x60, 0x54, 0xaa,
+ 0x96, 0x72, 0xd2, 0xd7, 0x36, 0x85, 0xa9, 0x45,
+ 0xd2, 0x2a, 0xa1, 0x81, 0x49, 0x6f, 0x7e, 0x04,
+ 0xfa, 0xe2, 0xfe, 0x90, 0x26, 0x77, 0x5a, 0x33,
+ 0xb8, 0x04, 0x9a, 0x7a, 0xe6, 0x4c, 0x4f, 0xad,
+ 0x72, 0x96, 0x08, 0x28, 0x58, 0x13, 0xf8, 0xc4,
+ 0x1c, 0xf0, 0xc3, 0x45, 0x95, 0x49, 0x20, 0x8c,
+ 0x9f, 0x39, 0x70, 0xe1, 0x77, 0xfe, 0xd5, 0x4b,
+ 0xaf, 0x86, 0xda, 0xef, 0x22, 0x06, 0x83, 0x36,
+ 0x29, 0x12, 0x11, 0x40, 0xbc, 0x3b, 0x86, 0xaa,
+ 0xaa, 0x65, 0x60, 0xc3, 0x80, 0xca, 0xed, 0xa9,
+ 0xf3, 0xb0, 0x79, 0x96, 0xa2, 0x55, 0x27, 0x28,
+ 0x55, 0x73, 0x26, 0xa5, 0x50, 0xea, 0x92, 0x4b,
+ 0x3c, 0x5c, 0x82, 0x33, 0xf0, 0x01, 0x3f, 0x03,
+ 0xc1, 0x08, 0x05, 0xbf, 0x98, 0xf4, 0x9b, 0x6d,
+ 0xa5, 0xa8, 0xb4, 0x82, 0x0c, 0x06, 0xfa, 0xff,
+ 0x2d, 0x08, 0xf3, 0x05, 0x4f, 0x57, 0x2a, 0x39,
+ 0xd4, 0x83, 0x0d, 0x75, 0x51, 0xd8, 0x5b, 0x1b,
+ 0xd3, 0x51, 0x5a, 0x32, 0x2a, 0x9b, 0x32, 0xb2,
+ 0xf2, 0xa4, 0x96, 0x12, 0xf2, 0xae, 0x40, 0x34,
+ 0x67, 0xa8, 0xf5, 0x44, 0xd5, 0x35, 0x53, 0xfe,
+ 0xa3, 0x60, 0x96, 0x63, 0x0f, 0x1f, 0x6e, 0xb0,
+ 0x5a, 0x42, 0xa6, 0xfc, 0x51, 0x0b, 0x60, 0x27,
+ 0xbc, 0x06, 0x71, 0xed, 0x65, 0x5b, 0x23, 0x86,
+ 0x4a, 0x07, 0x3b, 0x22, 0x07, 0x46, 0xe6, 0x90,
+ 0x3e, 0xf3, 0x25, 0x50, 0x1b, 0x4c, 0x7f, 0x03,
+ 0x08, 0xa8, 0x36, 0x6b, 0x87, 0xe5, 0xe3, 0xdb,
+ 0x9a, 0x38, 0x83, 0xff, 0x9f, 0x1a, 0x9f, 0x57,
+ 0xa4, 0x2a, 0xf6, 0x37, 0xbc, 0x1a, 0xff, 0xc9,
+ 0x1e, 0x35, 0x0c, 0xc3, 0x7c, 0xa3, 0xb2, 0xe5,
+ 0xd2, 0xc6, 0xb4, 0x57, 0x47, 0xe4, 0x32, 0x16,
+ 0x6d, 0xa9, 0xae, 0x64, 0xe6, 0x2d, 0x8d, 0xc5,
+ 0x8d, 0x50, 0x8e, 0xe8, 0x1a, 0x22, 0x34, 0x2a,
+ 0xd9, 0xeb, 0x51, 0x90, 0x4a, 0xb1, 0x41, 0x7d,
+ 0x64, 0xf9, 0xb9, 0x0d, 0xf6, 0x23, 0x33, 0xb0,
+ 0x33, 0xf4, 0xf7, 0x3f, 0x27, 0x84, 0xc6, 0x0f,
+ 0x54, 0xa5, 0xc0, 0x2e, 0xec, 0x0b, 0x3a, 0x48,
+ 0x6e, 0x80, 0x35, 0x81, 0x43, 0x9b, 0x90, 0xb1,
+ 0xd0, 0x2b, 0xea, 0x21, 0xdc, 0xda, 0x5b, 0x09,
+ 0xf4, 0xcc, 0x10, 0xb4, 0xc7, 0xfe, 0x79, 0x51,
+ 0xc3, 0xc5, 0xac, 0x88, 0x74, 0x84, 0x0b, 0x4b,
+ 0xca, 0x79, 0x16, 0x29, 0xfb, 0x69, 0x54, 0xdf,
+ 0x41, 0x7e, 0xe9, 0xc7, 0x8e, 0xea, 0xa5, 0xfe,
+ 0xfc, 0x76, 0x0e, 0x90, 0xc4, 0x92, 0x38, 0xad,
+ 0x7b, 0x48, 0xe6, 0x6e, 0xf7, 0x21, 0xfd, 0x4e,
+ 0x93, 0x0a, 0x7b, 0x41, 0x83, 0x68, 0xfb, 0x57,
+ 0x51, 0x76, 0x34, 0xa9, 0x6c, 0x00, 0xaa, 0x4f,
+ 0x66, 0x65, 0x98, 0x4a, 0x4f, 0xa3, 0xa0, 0xef,
+ 0x69, 0x3f, 0xe3, 0x1c, 0x92, 0x8c, 0xfd, 0xd8,
+ 0xe8, 0xde, 0x7c, 0x7f, 0x3e, 0x84, 0x8e, 0x69,
+ 0x3c, 0xf1, 0xf2, 0x05, 0x46, 0xdc, 0x2f, 0x9d,
+ 0x5e, 0x6e, 0x4c, 0xfb, 0xb5, 0x99, 0x2a, 0x59,
+ 0x63, 0xc1, 0x34, 0xbc, 0x57, 0xc0, 0x0d, 0xb9,
+ 0x61, 0x25, 0xf3, 0x33, 0x23, 0x51, 0xb6, 0x0d,
+ 0x07, 0xa6, 0xab, 0x94, 0x4a, 0xb7, 0x2a, 0xea,
+ 0xee, 0xac, 0xa3, 0xc3, 0x04, 0x8b, 0x0e, 0x56,
+ 0xfe, 0x44, 0xa7, 0x39, 0xe2, 0xed, 0xed, 0xb4,
+ 0x22, 0x2b, 0xac, 0x12, 0x32, 0x28, 0x91, 0xd8,
+ 0xa5, 0xab, 0xff, 0x5f, 0xe0, 0x4b, 0xda, 0x78,
+ 0x17, 0xda, 0xf1, 0x01, 0x5b, 0xcd, 0xe2, 0x5f,
+ 0x50, 0x45, 0x73, 0x2b, 0xe4, 0x76, 0x77, 0xf4,
+ 0x64, 0x1d, 0x43, 0xfb, 0x84, 0x7a, 0xea, 0x91,
+ 0xae, 0xf9, 0x9e, 0xb7, 0xb4, 0xb0, 0x91, 0x5f,
+ 0x16, 0x35, 0x9a, 0x11, 0xb8, 0xc7, 0xc1, 0x8c,
+ 0xc6, 0x10, 0x8d, 0x2f, 0x63, 0x4a, 0xa7, 0x57,
+ 0x3a, 0x51, 0xd6, 0x32, 0x2d, 0x64, 0x72, 0xd4,
+ 0x66, 0xdc, 0x10, 0xa6, 0x67, 0xd6, 0x04, 0x23,
+ 0x9d, 0x0a, 0x11, 0x77, 0xdd, 0x37, 0x94, 0x17,
+ 0x3c, 0xbf, 0x8b, 0x65, 0xb0, 0x2e, 0x5e, 0x66,
+ 0x47, 0x64, 0xac, 0xdd, 0xf0, 0x84, 0xfd, 0x39,
+ 0xfa, 0x15, 0x5d, 0xef, 0xae, 0xca, 0xc1, 0x36,
+ 0xa7, 0x5c, 0xbf, 0xc7, 0x08, 0xc2, 0x66, 0x00,
+ 0x74, 0x74, 0x4e, 0x27, 0x3f, 0x55, 0x8a, 0xb7,
+ 0x38, 0x66, 0x83, 0x6d, 0xcf, 0x99, 0x9e, 0x60,
+ 0x8f, 0xdd, 0x2e, 0x62, 0x22, 0x0e, 0xef, 0x0c,
+ 0x98, 0xa7, 0x85, 0x74, 0x3b, 0x9d, 0xec, 0x9e,
+ 0xa9, 0x19, 0x72, 0xa5, 0x7f, 0x2c, 0x39, 0xb7,
+ 0x7d, 0xb7, 0xf1, 0x12, 0x65, 0x27, 0x4b, 0x5a,
+ 0xde, 0x17, 0xfe, 0xad, 0x44, 0xf3, 0x20, 0x4d,
+ 0xfd, 0xe4, 0x1f, 0xb5, 0x81, 0xb0, 0x36, 0x37,
+ 0x08, 0x6f, 0xc3, 0x0c, 0xe9, 0x85, 0x98, 0x82,
+ 0xa9, 0x62, 0x0c, 0xc4, 0x97, 0xc0, 0x50, 0xc8,
+ 0xa7, 0x3c, 0x50, 0x9f, 0x43, 0xb9, 0xcd, 0x5e,
+ 0x4d, 0xfa, 0x1c, 0x4b, 0x0b, 0xa9, 0x98, 0x85,
+ 0x38, 0x92, 0xac, 0x8d, 0xe4, 0xad, 0x9b, 0x98,
+ 0xab, 0xd9, 0x38, 0xac, 0x62, 0x52, 0xa3, 0x22,
+ 0x63, 0x0f, 0xbf, 0x95, 0x48, 0xdf, 0x69, 0xe7,
+ 0x8b, 0x33, 0xd5, 0xb2, 0xbd, 0x05, 0x49, 0x49,
+ 0x9d, 0x57, 0x73, 0x19, 0x33, 0xae, 0xfa, 0x33,
+ 0xf1, 0x19, 0xa8, 0x80, 0xce, 0x04, 0x9f, 0xbc,
+ 0x1d, 0x65, 0x82, 0x1b, 0xe5, 0x3a, 0x51, 0xc8,
+ 0x1c, 0x21, 0xe3, 0x5d, 0xf3, 0x7d, 0x9b, 0x2f,
+ 0x2c, 0x1d, 0x4a, 0x7f, 0x9b, 0x68, 0x35, 0xa3,
+ 0xb2, 0x50, 0xf7, 0x62, 0x79, 0xcd, 0xf4, 0x98,
+ 0x4f, 0xe5, 0x63, 0x7c, 0x3e, 0x45, 0x31, 0x8c,
+ 0x16, 0xa0, 0x12, 0xc8, 0x58, 0xce, 0x39, 0xa6,
+ 0xbc, 0x54, 0xdb, 0xc5, 0xe0, 0xd5, 0xba, 0xbc,
+ 0xb9, 0x04, 0xf4, 0x8d, 0xe8, 0x2f, 0x15, 0x9d,
+};
+
+/* 100 test cases */
+static struct dahash_test {
+ uint16_t start; /* random 12 bit offset in buf */
+ uint16_t length; /* random 8 bit length of test */
+ xfs_dahash_t dahash; /* expected dahash result */
+} test[] __initdata =
+{
+ {0x0567, 0x0097, 0x96951389},
+ {0x0869, 0x0055, 0x6455ab4f},
+ {0x0c51, 0x00be, 0x8663afde},
+ {0x044a, 0x00fc, 0x98fbe432},
+ {0x0f29, 0x0079, 0x42371997},
+ {0x08ba, 0x0052, 0x942be4f7},
+ {0x01f2, 0x0013, 0x5262687e},
+ {0x09e3, 0x00e2, 0x8ffb0908},
+ {0x007c, 0x0051, 0xb3158491},
+ {0x0854, 0x001f, 0x83bb20d9},
+ {0x031b, 0x0008, 0x98970bdf},
+ {0x0de7, 0x0027, 0xbfbf6f6c},
+ {0x0f76, 0x0005, 0x906a7105},
+ {0x092e, 0x00d0, 0x86631850},
+ {0x0233, 0x0082, 0xdbdd914e},
+ {0x04c9, 0x0075, 0x5a400a9e},
+ {0x0b66, 0x0099, 0xae128b45},
+ {0x000d, 0x00ed, 0xe61c216a},
+ {0x0a31, 0x003d, 0xf69663b9},
+ {0x00a3, 0x0052, 0x643c39ae},
+ {0x0125, 0x00d5, 0x7c310b0d},
+ {0x0105, 0x004a, 0x06a77e74},
+ {0x0858, 0x008e, 0x265bc739},
+ {0x045e, 0x0095, 0x13d6b192},
+ {0x0dab, 0x003c, 0xc4498704},
+ {0x00cd, 0x00b5, 0x802a4e2d},
+ {0x069b, 0x008c, 0x5df60f71},
+ {0x0454, 0x006c, 0x5f03d8bb},
+ {0x040e, 0x0032, 0x0ce513b5},
+ {0x0874, 0x00e2, 0x6a811fb3},
+ {0x0521, 0x00b4, 0x93296833},
+ {0x0ddc, 0x00cf, 0xf9305338},
+ {0x0a70, 0x0023, 0x239549ea},
+ {0x083e, 0x0027, 0x2d88ba97},
+ {0x0241, 0x00a7, 0xfe0b32e1},
+ {0x0dfc, 0x0096, 0x1a11e815},
+ {0x023e, 0x001e, 0xebc9a1f3},
+ {0x067e, 0x0066, 0xb1067f81},
+ {0x09ea, 0x000e, 0x46fd7247},
+ {0x036b, 0x008c, 0x1a39acdf},
+ {0x078f, 0x0030, 0x964042ab},
+ {0x085c, 0x008f, 0x1829edab},
+ {0x02ec, 0x009f, 0x6aefa72d},
+ {0x043b, 0x00ce, 0x65642ff5},
+ {0x0a32, 0x00b8, 0xbd82759e},
+ {0x0d3c, 0x0087, 0xf4d66d54},
+ {0x09ec, 0x008a, 0x06bfa1ff},
+ {0x0902, 0x0015, 0x755025d2},
+ {0x08fe, 0x000e, 0xf690ce2d},
+ {0x00fb, 0x00dc, 0xe55f1528},
+ {0x0eaa, 0x003a, 0x0fe0a8d7},
+ {0x05fb, 0x0006, 0x86281cfb},
+ {0x0dd1, 0x00a7, 0x60ab51b4},
+ {0x0005, 0x001b, 0xf51d969b},
+ {0x077c, 0x00dd, 0xc2fed268},
+ {0x0575, 0x00f5, 0x432c0b1a},
+ {0x05be, 0x0088, 0x78baa04b},
+ {0x0c89, 0x0068, 0xeda9e428},
+ {0x0f5c, 0x0068, 0xec143c76},
+ {0x06a8, 0x0009, 0xd72651ce},
+ {0x060f, 0x008e, 0x765426cd},
+ {0x07b1, 0x0047, 0x2cfcfa0c},
+ {0x04f1, 0x0041, 0x55b172f9},
+ {0x0e05, 0x00ac, 0x61efde93},
+ {0x0bf7, 0x0097, 0x05b83eee},
+ {0x04e9, 0x00f3, 0x9928223a},
+ {0x023a, 0x0005, 0xdfada9bc},
+ {0x0acb, 0x000e, 0x2217cecd},
+ {0x0148, 0x0060, 0xbc3f7405},
+ {0x0764, 0x0059, 0xcbc201b1},
+ {0x021f, 0x0059, 0x5d6b2256},
+ {0x0f1e, 0x006c, 0xdefeeb45},
+ {0x071c, 0x00b9, 0xb9b59309},
+ {0x0564, 0x0063, 0xae064271},
+ {0x0b14, 0x0044, 0xdb867d9b},
+ {0x0e5a, 0x0055, 0xff06b685},
+ {0x015e, 0x00ba, 0x1115ccbc},
+ {0x0379, 0x00e6, 0x5f4e58dd},
+ {0x013b, 0x0067, 0x4897427e},
+ {0x0e64, 0x0071, 0x7af2b7a4},
+ {0x0a11, 0x0050, 0x92105726},
+ {0x0109, 0x0055, 0xd0d000f9},
+ {0x00aa, 0x0022, 0x815d229d},
+ {0x09ac, 0x004f, 0x02f9d985},
+ {0x0e1b, 0x00ce, 0x5cf92ab4},
+ {0x08af, 0x00d8, 0x17ca72d1},
+ {0x0e33, 0x000a, 0xda2dba6b},
+ {0x0ee3, 0x006a, 0xb00048e5},
+ {0x0648, 0x001a, 0x2364b8cb},
+ {0x0315, 0x0085, 0x0596fd0d},
+ {0x0fbb, 0x003e, 0x298230ca},
+ {0x0422, 0x006a, 0x78ada4ab},
+ {0x04ba, 0x0073, 0xced1fbc2},
+ {0x007d, 0x0061, 0x4b7ff236},
+ {0x070b, 0x00d0, 0x261cf0ae},
+ {0x0c1a, 0x0035, 0x8be92ee2},
+ {0x0af8, 0x0063, 0x824dcf03},
+ {0x08f8, 0x006d, 0xd289710c},
+ {0x021b, 0x00ee, 0x6ac1c41d},
+ {0x05b5, 0x00da, 0x8e52f0e2},
+};
+
+int __init
+xfs_dahash_test(void)
+{
+ unsigned int i;
+ unsigned int errors = 0;
+
+ for (i = 0; i < ARRAY_SIZE(test); i++) {
+ xfs_dahash_t hash;
+
+ hash = xfs_da_hashname(test_buf + test[i].start,
+ test[i].length);
+ if (hash != test[i].dahash)
+ errors++;
+ }
+
+ if (errors) {
+ printk(KERN_ERR "xfs dir/attr hash test failed %u times!",
+ errors);
+ return -ERANGE;
+ }
+
+ return 0;
+}
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2023 Oracle. All Rights Reserved.
+ * Author: Darrick J. Wong <djwong@kernel.org>
+ */
+#ifndef __XFS_DAHASH_TEST_H__
+#define __XFS_DAHASH_TEST_H__
+
+int xfs_dahash_test(void);
+
+#endif /* __XFS_DAHASH_TEST_H__ */
+
*/
if (xfs_has_allocsize(mp))
prealloc_blocks = mp->m_allocsize_blocks;
- else
+ else if (allocfork == XFS_DATA_FORK)
prealloc_blocks = xfs_iomap_prealloc_size(ip, allocfork,
offset, count, &icur);
+ else
+ prealloc_blocks = xfs_iomap_prealloc_size(ip, allocfork,
+ offset, count, &ccur);
if (prealloc_blocks) {
xfs_extlen_t align;
xfs_off_t end_offset;
#include "xfs_attr_item.h"
#include "xfs_xattr.h"
#include "xfs_iunlink_item.h"
+#include "xfs_dahash_test.h"
#include <linux/magic.h>
#include <linux/fs_context.h>
xfs_check_ondisk_structs();
+ error = xfs_dahash_test();
+ if (error)
+ return error;
+
printk(KERN_INFO XFS_VERSION_STRING " with "
XFS_BUILD_OPTIONS " enabled\n");
DEFINE_ALLOC_EVENT(xfs_alloc_vextent_loopfailed);
DEFINE_ALLOC_EVENT(xfs_alloc_vextent_allfailed);
+DEFINE_ALLOC_EVENT(xfs_alloc_vextent_this_ag);
+DEFINE_ALLOC_EVENT(xfs_alloc_vextent_start_ag);
+DEFINE_ALLOC_EVENT(xfs_alloc_vextent_first_ag);
+DEFINE_ALLOC_EVENT(xfs_alloc_vextent_exact_bno);
+DEFINE_ALLOC_EVENT(xfs_alloc_vextent_near_bno);
+DEFINE_ALLOC_EVENT(xfs_alloc_vextent_finish);
+
TRACE_EVENT(xfs_alloc_cur_check,
TP_PROTO(struct xfs_mount *mp, xfs_btnum_t btnum, xfs_agblock_t bno,
xfs_extlen_t len, xfs_extlen_t diff, bool new),
struct block_device *bdev = inode->i_sb->s_bdev;
unsigned int max = bdev_max_zone_append_sectors(bdev);
struct bio *bio;
- ssize_t size;
+ ssize_t size = 0;
int nr_pages;
ssize_t ret;
if (bio->bi_iter.bi_sector != wpsector) {
zonefs_warn(inode->i_sb,
"Corrupted write pointer %llu for zone at %llu\n",
- wpsector, z->z_sector);
+ bio->bi_iter.bi_sector, z->z_sector);
ret = -EIO;
}
}
#if IS_ENABLED(CONFIG_X86_ANDROID_TABLETS)
bool acpi_quirk_skip_i2c_client_enumeration(struct acpi_device *adev);
int acpi_quirk_skip_serdev_enumeration(struct device *controller_parent, bool *skip);
+bool acpi_quirk_skip_gpio_event_handlers(void);
#else
static inline bool acpi_quirk_skip_i2c_client_enumeration(struct acpi_device *adev)
{
*skip = false;
return 0;
}
+static inline bool acpi_quirk_skip_gpio_event_handlers(void)
+{
+ return false;
+}
#endif
#ifdef CONFIG_PM
#define CNTHCTL_EVNTEN (1 << 2)
#define CNTHCTL_EVNTDIR (1 << 3)
#define CNTHCTL_EVNTI (0xF << 4)
+#define CNTHCTL_ECV (1 << 12)
enum arch_timer_reg {
ARCH_TIMER_REG_CTRL,
*
* The returned array must be allocated with kmalloc() and will be
* freed by the caller. If the allocation fails, NULL should be
- * returned. num_output_fmts must be set to the returned array size.
+ * returned. num_input_fmts must be set to the returned array size.
* Formats listed in the returned array should be listed in decreasing
* preference order (the core will try all formats until it finds one
* that works). When the format is not supported NULL should be
- * returned and num_output_fmts should be set to 0.
+ * returned and num_input_fmts should be set to 0.
*
* This method is called on all elements of the bridge chain as part of
* the bus format negotiation process that happens in
void drm_gem_lru_init(struct drm_gem_lru *lru, struct mutex *lock);
void drm_gem_lru_remove(struct drm_gem_object *obj);
void drm_gem_lru_move_tail(struct drm_gem_lru *lru, struct drm_gem_object *obj);
-unsigned long drm_gem_lru_scan(struct drm_gem_lru *lru, unsigned nr_to_scan,
+unsigned long drm_gem_lru_scan(struct drm_gem_lru *lru,
+ unsigned int nr_to_scan,
+ unsigned long *remaining,
bool (*shrink)(struct drm_gem_object *obj));
#endif /* __DRM_GEM_H__ */
enum kvm_arch_timers {
TIMER_PTIMER,
TIMER_VTIMER,
+ NR_KVM_EL0_TIMERS,
+ TIMER_HVTIMER = NR_KVM_EL0_TIMERS,
+ TIMER_HPTIMER,
NR_KVM_TIMERS
};
TIMER_REG_CVAL,
TIMER_REG_TVAL,
TIMER_REG_CTL,
+ TIMER_REG_VOFF,
};
struct arch_timer_offset {
* structure. If NULL, assume a zero offset.
*/
u64 *vm_offset;
+ /*
+ * If set, pointer to one of the offsets in the vcpu's sysreg
+ * array. If NULL, assume a zero offset.
+ */
+ u64 *vcpu_offset;
};
struct arch_timer_vm_data {
/* Offset applied to the virtual timer/counter */
u64 voffset;
+ /* Offset applied to the physical timer/counter */
+ u64 poffset;
+
+ /* The PPI for each timer, global to the VM */
+ u8 ppi[NR_KVM_TIMERS];
};
struct arch_timer_context {
struct kvm_vcpu *vcpu;
- /* Timer IRQ */
- struct kvm_irq_level irq;
-
/* Emulated Timer (may be unused) */
struct hrtimer hrtimer;
+ u64 ns_frac;
/* Offset for this counter/timer */
struct arch_timer_offset offset;
*/
bool loaded;
+ /* Output level of the timer IRQ */
+ struct {
+ bool level;
+ } irq;
+
/* Duplicated state from arch_timer.c for convenience */
u32 host_timer_irq;
- u32 host_timer_irq_flags;
};
struct timer_map {
struct arch_timer_context *direct_vtimer;
struct arch_timer_context *direct_ptimer;
+ struct arch_timer_context *emul_vtimer;
struct arch_timer_context *emul_ptimer;
};
void kvm_timer_update_run(struct kvm_vcpu *vcpu);
void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu);
+void kvm_timer_init_vm(struct kvm *kvm);
+
u64 kvm_arm_timer_get_reg(struct kvm_vcpu *, u64 regid);
int kvm_arm_timer_set_reg(struct kvm_vcpu *, u64 regid, u64 value);
void kvm_timer_init_vhe(void);
-bool kvm_arch_timer_get_input_level(int vintid);
-
#define vcpu_timer(v) (&(v)->arch.timer_cpu)
#define vcpu_get_timer(v,t) (&vcpu_timer(v)->timers[(t)])
#define vcpu_vtimer(v) (&(v)->arch.timer_cpu.timers[TIMER_VTIMER])
#define vcpu_ptimer(v) (&(v)->arch.timer_cpu.timers[TIMER_PTIMER])
+#define vcpu_hvtimer(v) (&(v)->arch.timer_cpu.timers[TIMER_HVTIMER])
+#define vcpu_hptimer(v) (&(v)->arch.timer_cpu.timers[TIMER_HPTIMER])
#define arch_timer_ctx_index(ctx) ((ctx) - vcpu_timer((ctx)->vcpu)->timers)
+#define timer_vm_data(ctx) (&(ctx)->vcpu->kvm->arch.timer_data)
+#define timer_irq(ctx) (timer_vm_data(ctx)->ppi[arch_timer_ctx_index(ctx)])
+
u64 kvm_arm_timer_read_sysreg(struct kvm_vcpu *vcpu,
enum kvm_arch_timers tmr,
enum kvm_arch_timer_regs treg);
#include <asm/kvm_emulate.h>
-int kvm_hvc_call_handler(struct kvm_vcpu *vcpu);
+int kvm_smccc_call_handler(struct kvm_vcpu *vcpu);
static inline u32 smccc_get_function(struct kvm_vcpu *vcpu)
{
struct kvm_one_reg;
void kvm_arm_init_hypercalls(struct kvm *kvm);
+void kvm_arm_teardown_hypercalls(struct kvm *kvm);
int kvm_arm_get_fw_num_regs(struct kvm_vcpu *vcpu);
int kvm_arm_copy_fw_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
int kvm_arm_get_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
+int kvm_vm_smccc_has_attr(struct kvm *kvm, struct kvm_device_attr *attr);
+int kvm_vm_smccc_set_attr(struct kvm *kvm, struct kvm_device_attr *attr);
+
#endif
int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq,
u32 vintid, struct irq_ops *ops);
int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid);
+int kvm_vgic_get_map(struct kvm_vcpu *vcpu, unsigned int vintid);
bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int vintid);
int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu);
#include <linux/phy.h>
#if IS_ENABLED(CONFIG_ACPI_MDIO)
-int acpi_mdiobus_register(struct mii_bus *mdio, struct fwnode_handle *fwnode);
+int __acpi_mdiobus_register(struct mii_bus *mdio, struct fwnode_handle *fwnode,
+ struct module *owner);
+
+static inline int
+acpi_mdiobus_register(struct mii_bus *mdio, struct fwnode_handle *handle)
+{
+ return __acpi_mdiobus_register(mdio, handle, THIS_MODULE);
+}
#else /* CONFIG_ACPI_MDIO */
static inline int
acpi_mdiobus_register(struct mii_bus *mdio, struct fwnode_handle *fwnode)
*(listptr) = rq; \
} while (0)
+#define rq_list_add_tail(lastpptr, rq) do { \
+ (rq)->rq_next = NULL; \
+ **(lastpptr) = rq; \
+ *(lastpptr) = &rq->rq_next; \
+} while (0)
+
#define rq_list_pop(listptr) \
({ \
struct request *__req = NULL; \
wake_up_process(waiter);
}
-unsigned long bdev_start_io_acct(struct block_device *bdev,
- unsigned int sectors, enum req_op op,
+unsigned long bdev_start_io_acct(struct block_device *bdev, enum req_op op,
unsigned long start_time);
void bdev_end_io_acct(struct block_device *bdev, enum req_op op,
- unsigned long start_time);
+ unsigned int sectors, unsigned long start_time);
unsigned long bio_start_io_acct(struct bio *bio);
void bio_end_io_acct_remapped(struct bio *bio, unsigned long start_time,
struct clk_hw *hws[];
};
-#define CLK_OF_DECLARE(name, compat, fn) OF_DECLARE_1(clk, name, compat, fn)
+#define CLK_OF_DECLARE(name, compat, fn) \
+ static void __init __##name##_of_clk_init_declare(struct device_node *np) \
+ { \
+ fn(np); \
+ fwnode_dev_initialized(of_fwnode_handle(np), true); \
+ } \
+ OF_DECLARE_1(clk, name, compat, __##name##_of_clk_init_declare)
/*
* Use this macro when you have a driver that requires two initialization
static inline int exception_enter(void) { return 0; }
static inline void exception_exit(enum ctx_state prev_ctx) { }
static inline int ct_state(void) { return -1; }
+static inline int __ct_state(void) { return -1; }
static __always_inline bool context_tracking_guest_enter(void) { return false; }
static inline void context_tracking_guest_exit(void) { }
#define CT_WARN_ON(cond) do { } while (0)
#ifdef CONFIG_CONTEXT_TRACKING
DECLARE_PER_CPU(struct context_tracking, context_tracking);
+#endif
+#ifdef CONFIG_CONTEXT_TRACKING_USER
static __always_inline int __ct_state(void)
{
return arch_atomic_read(this_cpu_ptr(&context_tracking.state)) & CT_STATE_MASK;
for_each_andnot_bit(cpu, cpumask_bits(mask1), cpumask_bits(mask2), small_cpumask_bits)
/**
+ * for_each_cpu_or - iterate over every cpu present in either mask
+ * @cpu: the (optionally unsigned) integer iterator
+ * @mask1: the first cpumask pointer
+ * @mask2: the second cpumask pointer
+ *
+ * This saves a temporary CPU mask in many places. It is equivalent to:
+ * struct cpumask tmp;
+ * cpumask_or(&tmp, &mask1, &mask2);
+ * for_each_cpu(cpu, &tmp)
+ * ...
+ *
+ * After the loop, cpu is >= nr_cpu_ids.
+ */
+#define for_each_cpu_or(cpu, mask1, mask2) \
+ for_each_or_bit(cpu, cpumask_bits(mask1), cpumask_bits(mask2), small_cpumask_bits)
+
+/**
* cpumask_any_but - return a "random" in a cpumask, but not this one.
* @mask: the cpumask to search
* @cpu: the cpu to ignore.
}
extern void efi_init (void);
+extern void efi_earlycon_reprobe(void);
#ifdef CONFIG_EFI
extern void efi_enter_virtual_mode (void); /* switch EFI to virtual mode, if possible */
#else
/* delay between mkwrite and deferred handler */
unsigned long delay;
bool sort_pagereflist; /* sort pagelist by offset */
+ int open_count; /* number of opened files; protected by fb_info lock */
struct mutex lock; /* mutex that protects the pageref list */
struct list_head pagereflist; /* list of pagerefs for touched pages */
/* callback */
unsigned long nbits, unsigned long start);
unsigned long _find_next_andnot_bit(const unsigned long *addr1, const unsigned long *addr2,
unsigned long nbits, unsigned long start);
+unsigned long _find_next_or_bit(const unsigned long *addr1, const unsigned long *addr2,
+ unsigned long nbits, unsigned long start);
unsigned long _find_next_zero_bit(const unsigned long *addr, unsigned long nbits,
unsigned long start);
extern unsigned long _find_first_bit(const unsigned long *addr, unsigned long size);
}
#endif
+#ifndef find_next_or_bit
+/**
+ * find_next_or_bit - find the next set bit in either memory regions
+ * @addr1: The first address to base the search on
+ * @addr2: The second address to base the search on
+ * @size: The bitmap size in bits
+ * @offset: The bitnumber to start searching at
+ *
+ * Returns the bit number for the next set bit
+ * If no bits are set, returns @size.
+ */
+static inline
+unsigned long find_next_or_bit(const unsigned long *addr1,
+ const unsigned long *addr2, unsigned long size,
+ unsigned long offset)
+{
+ if (small_const_nbits(size)) {
+ unsigned long val;
+
+ if (unlikely(offset >= size))
+ return size;
+
+ val = (*addr1 | *addr2) & GENMASK(size - 1, offset);
+ return val ? __ffs(val) : size;
+ }
+
+ return _find_next_or_bit(addr1, addr2, size, offset);
+}
+#endif
+
#ifndef find_next_zero_bit
/**
* find_next_zero_bit - find the next cleared bit in a memory region
(bit) = find_next_andnot_bit((addr1), (addr2), (size), (bit)), (bit) < (size);\
(bit)++)
+#define for_each_or_bit(bit, addr1, addr2, size) \
+ for ((bit) = 0; \
+ (bit) = find_next_or_bit((addr1), (addr2), (size), (bit)), (bit) < (size);\
+ (bit)++)
+
/* same as for_each_set_bit() but use bit as value to start with */
#define for_each_set_bit_from(bit, addr, size) \
for (; (bit) = find_next_bit((addr), (size), (bit)), (bit) < (size); (bit)++)
void icc_node_add(struct icc_node *node, struct icc_provider *provider);
void icc_node_del(struct icc_node *node);
int icc_nodes_remove(struct icc_provider *provider);
+void icc_provider_init(struct icc_provider *provider);
+int icc_provider_register(struct icc_provider *provider);
+void icc_provider_deregister(struct icc_provider *provider);
int icc_provider_add(struct icc_provider *provider);
void icc_provider_del(struct icc_provider *provider);
struct icc_node_data *of_icc_get_from_provider(struct of_phandle_args *spec);
return -ENOTSUPP;
}
+static inline void icc_provider_init(struct icc_provider *provider) { }
+
+static inline int icc_provider_register(struct icc_provider *provider)
+{
+ return -ENOTSUPP;
+}
+
+static inline void icc_provider_deregister(struct icc_provider *provider) { }
+
static inline int icc_provider_add(struct icc_provider *provider)
{
return -ENOTSUPP;
const void *cmd;
union {
/* callback to defer completions to task context */
- void (*task_work_cb)(struct io_uring_cmd *cmd);
+ void (*task_work_cb)(struct io_uring_cmd *cmd, unsigned);
/* used for polled completion */
void *cookie;
};
#if defined(CONFIG_IO_URING)
int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
struct iov_iter *iter, void *ioucmd);
-void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret, ssize_t res2);
+void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret, ssize_t res2,
+ unsigned issue_flags);
void io_uring_cmd_complete_in_task(struct io_uring_cmd *ioucmd,
- void (*task_work_cb)(struct io_uring_cmd *));
+ void (*task_work_cb)(struct io_uring_cmd *, unsigned));
struct sock *io_uring_get_socket(struct file *file);
void __io_uring_cancel(bool cancel_all);
void __io_uring_free(struct task_struct *tsk);
return -EOPNOTSUPP;
}
static inline void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret,
- ssize_t ret2)
+ ssize_t ret2, unsigned issue_flags)
{
}
static inline void io_uring_cmd_complete_in_task(struct io_uring_cmd *ioucmd,
- void (*task_work_cb)(struct io_uring_cmd *))
+ void (*task_work_cb)(struct io_uring_cmd *, unsigned))
{
}
static inline struct sock *io_uring_get_socket(struct file *file)
/*
* Bit 63 of the memslot generation number is an "update in-progress flag",
- * e.g. is temporarily set for the duration of install_new_memslots().
+ * e.g. is temporarily set for the duration of kvm_swap_active_memslots().
* This flag effectively creates a unique generation number that is used to
* mark cached memslot data, e.g. MMIO accesses, as potentially being stale,
* i.e. may (or may not) have come from the previous memslots generation.
* use by the VM. To be used under the slots_lock (above) or in a
* kvm->srcu critical section where acquiring the slots_lock would
* lead to deadlock with the synchronize_srcu in
- * install_new_memslots.
+ * kvm_swap_active_memslots().
*/
struct mutex slots_arch_lock;
struct mm_struct *mm; /* userspace tied to this vm */
* is topped up (__kvm_mmu_topup_memory_cache()).
*/
struct kvm_mmu_memory_cache {
- int nobjs;
gfp_t gfp_zero;
gfp_t gfp_custom;
struct kmem_cache *kmem_cache;
int capacity;
+ int nobjs;
void **objects;
};
#endif
#define nlm4_fbig cpu_to_be32(NLM_FBIG)
#define nlm4_failed cpu_to_be32(NLM_FAILED)
+void nlm4svc_set_file_lock_range(struct file_lock *fl, u64 off, u64 len);
bool nlm4svc_decode_void(struct svc_rqst *rqstp, struct xdr_stream *xdr);
bool nlm4svc_decode_testargs(struct svc_rqst *rqstp, struct xdr_stream *xdr);
bool nlm4svc_decode_lockargs(struct svc_rqst *rqstp, struct xdr_stream *xdr);
* relationship HH alignment <= LL alignment.
*/
#define LL_RESERVED_SPACE(dev) \
- ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
+ ((((dev)->hard_header_len + READ_ONCE((dev)->needed_headroom)) \
+ & ~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
#define LL_RESERVED_SPACE_EXTRA(dev,extra) \
- ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
+ ((((dev)->hard_header_len + READ_ONCE((dev)->needed_headroom) + (extra)) \
+ & ~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
struct header_ops {
int (*create) (struct sk_buff *skb, struct net_device *dev,
struct nvme_tcp_term_pdu {
struct nvme_tcp_hdr hdr;
__le16 fes;
- __le32 fei;
- __u8 rsvd[8];
+ __le16 feil;
+ __le16 feiu;
+ __u8 rsvd[10];
};
/**
nvme_opcode_name(nvme_cmd_compare), \
nvme_opcode_name(nvme_cmd_write_zeroes), \
nvme_opcode_name(nvme_cmd_dsm), \
+ nvme_opcode_name(nvme_cmd_verify), \
nvme_opcode_name(nvme_cmd_resv_register), \
nvme_opcode_name(nvme_cmd_resv_report), \
nvme_opcode_name(nvme_cmd_resv_acquire), \
nvme_admin_opcode_name(nvme_admin_ns_mgmt), \
nvme_admin_opcode_name(nvme_admin_activate_fw), \
nvme_admin_opcode_name(nvme_admin_download_fw), \
+ nvme_admin_opcode_name(nvme_admin_dev_self_test), \
nvme_admin_opcode_name(nvme_admin_ns_attach), \
nvme_admin_opcode_name(nvme_admin_keep_alive), \
nvme_admin_opcode_name(nvme_admin_directive_send), \
nvme_admin_opcode_name(nvme_admin_directive_recv), \
+ nvme_admin_opcode_name(nvme_admin_virtual_mgmt), \
+ nvme_admin_opcode_name(nvme_admin_nvme_mi_send), \
+ nvme_admin_opcode_name(nvme_admin_nvme_mi_recv), \
nvme_admin_opcode_name(nvme_admin_dbbuf), \
nvme_admin_opcode_name(nvme_admin_format_nvm), \
nvme_admin_opcode_name(nvme_admin_security_send), \
#if IS_ENABLED(CONFIG_OF_MDIO)
bool of_mdiobus_child_is_phy(struct device_node *child);
-int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np);
-int devm_of_mdiobus_register(struct device *dev, struct mii_bus *mdio,
- struct device_node *np);
+int __of_mdiobus_register(struct mii_bus *mdio, struct device_node *np,
+ struct module *owner);
+
+static inline int of_mdiobus_register(struct mii_bus *mdio,
+ struct device_node *np)
+{
+ return __of_mdiobus_register(mdio, np, THIS_MODULE);
+}
+
+int __devm_of_mdiobus_register(struct device *dev, struct mii_bus *mdio,
+ struct device_node *np, struct module *owner);
+
+static inline int devm_of_mdiobus_register(struct device *dev,
+ struct mii_bus *mdio,
+ struct device_node *np)
+{
+ return __devm_of_mdiobus_register(dev, mdio, np, THIS_MODULE);
+}
+
struct mdio_device *of_mdio_find_device(struct device_node *np);
struct phy_device *of_phy_find_device(struct device_node *phy_np);
struct phy_device *
unsigned int flags);
struct resource *pci_bus_resource_n(const struct pci_bus *bus, int n);
void pci_bus_remove_resources(struct pci_bus *bus);
+void pci_bus_remove_resource(struct pci_bus *bus, struct resource *res);
int devm_request_pci_bus_resources(struct device *dev,
struct list_head *resources);
void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount,
s32 batch);
s64 __percpu_counter_sum(struct percpu_counter *fbc);
-s64 percpu_counter_sum_all(struct percpu_counter *fbc);
int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch);
void percpu_counter_sync(struct percpu_counter *fbc);
return percpu_counter_read(fbc);
}
-static inline s64 percpu_counter_sum_all(struct percpu_counter *fbc)
-{
- return percpu_counter_read(fbc);
-}
-
static inline bool percpu_counter_initialized(struct percpu_counter *fbc)
{
return true;
int unicast_filter_entries;
int tx_fifo_size;
int rx_fifo_size;
- u32 addr64;
+ u32 host_dma_width;
u32 rx_queues_to_use;
u32 tx_queues_to_use;
u8 rx_sched_algorithm;
#ifdef CONFIG_EFI
extern struct efifb_dmi_info efifb_dmi_list[];
-void sysfb_apply_efi_quirks(struct platform_device *pd);
+void sysfb_apply_efi_quirks(void);
+void sysfb_set_efifb_fwnode(struct platform_device *pd);
#else /* CONFIG_EFI */
-static inline void sysfb_apply_efi_quirks(struct platform_device *pd)
+static inline void sysfb_apply_efi_quirks(void)
+{
+}
+
+static inline void sysfb_set_efifb_fwnode(struct platform_device *pd)
{
}
struct device_node *np,
char *type, void *devdata,
const struct thermal_cooling_device_ops *ops);
+void thermal_cooling_device_update(struct thermal_cooling_device *);
void thermal_cooling_device_unregister(struct thermal_cooling_device *);
struct thermal_zone_device *thermal_zone_get_zone_by_name(const char *name);
int thermal_zone_get_temp(struct thermal_zone_device *tz, int *temp);
* not add unwanted padding between the beginning of the section and the
* structure. Force alignment to the same alignment as the section start.
*
- * When lockdep is enabled, we make sure to always do the RCU portions of
- * the tracepoint code, regardless of whether tracing is on. However,
- * don't check if the condition is false, due to interaction with idle
- * instrumentation. This lets us find RCU issues triggered with tracepoints
- * even when this tracepoint is off. This code has no purpose other than
- * poking RCU a bit.
+ * When lockdep is enabled, we make sure to always test if RCU is
+ * "watching" regardless if the tracepoint is enabled or not. Tracepoints
+ * require RCU to be active, and it should always warn at the tracepoint
+ * site if it is not watching, as it will need to be active when the
+ * tracepoint is enabled.
*/
#define __DECLARE_TRACE(name, proto, args, cond, data_proto) \
extern int __traceiter_##name(data_proto); \
TP_ARGS(args), \
TP_CONDITION(cond), 0); \
if (IS_ENABLED(CONFIG_LOCKDEP) && (cond)) { \
- rcu_read_lock_sched_notrace(); \
- rcu_dereference_sched(__tracepoint_##name.funcs);\
- rcu_read_unlock_sched_notrace(); \
+ WARN_ON_ONCE(!rcu_is_watching()); \
} \
} \
__DECLARE_TRACE_RCU(name, PARAMS(proto), PARAMS(args), \
void hci_conn_del_sysfs(struct hci_conn *conn);
#define SET_HCIDEV_DEV(hdev, pdev) ((hdev)->dev.parent = (pdev))
+#define GET_HCIDEV_DEV(hdev) ((hdev)->dev.parent)
/* ----- LMP capabilities ----- */
#define lmp_encrypt_capable(dev) ((dev)->features[0][0] & LMP_ENCRYPT)
#ifdef CONFIG_NET
u32 bpf_xdp_metadata_kfunc_id(int id);
bool bpf_dev_bound_kfunc_id(u32 btf_id);
+void xdp_set_features_flag(struct net_device *dev, xdp_features_t val);
void xdp_features_set_redirect_target(struct net_device *dev, bool support_sg);
void xdp_features_clear_redirect_target(struct net_device *dev);
#else
static inline bool bpf_dev_bound_kfunc_id(u32 btf_id) { return false; }
static inline void
+xdp_set_features_flag(struct net_device *dev, xdp_features_t val)
+{
+}
+
+static inline void
xdp_features_set_redirect_target(struct net_device *dev, bool support_sg)
{
}
}
#endif
+static inline void xdp_clear_features_flag(struct net_device *dev)
+{
+ xdp_set_features_flag(dev, 0);
+}
+
#endif /* __LINUX_NET_XDP_H__ */
const char * model; /* ... after scan; point to static string */
const char * rev; /* ... "nullnullnullnull" before scan */
+#define SCSI_DEFAULT_VPD_LEN 255 /* default SCSI VPD page size (max) */
struct scsi_vpd __rcu *vpd_pg0;
struct scsi_vpd __rcu *vpd_pg83;
struct scsi_vpd __rcu *vpd_pg80;
* creation time */
unsigned ignore_media_change:1; /* Ignore MEDIA CHANGE on resume */
unsigned silence_suspend:1; /* Do not print runtime PM related messages */
+ unsigned no_vpd_size:1; /* No VPD size reported in header */
unsigned int queue_stopped; /* request queue is quiesced */
bool offline_already; /* Device offline message logged */
#define BLIST_IGN_MEDIA_CHANGE ((__force blist_flags_t)(1ULL << 11))
/* do not do automatic start on add */
#define BLIST_NOSTARTONADD ((__force blist_flags_t)(1ULL << 12))
-#define __BLIST_UNUSED_13 ((__force blist_flags_t)(1ULL << 13))
+/* do not ask for VPD page size first on some broken targets */
+#define BLIST_NO_VPD_SIZE ((__force blist_flags_t)(1ULL << 13))
#define __BLIST_UNUSED_14 ((__force blist_flags_t)(1ULL << 14))
#define __BLIST_UNUSED_15 ((__force blist_flags_t)(1ULL << 15))
#define __BLIST_UNUSED_16 ((__force blist_flags_t)(1ULL << 16))
#define __BLIST_HIGH_UNUSED (~(__BLIST_LAST_USED | \
(__force blist_flags_t) \
((__force __u64)__BLIST_LAST_USED - 1ULL)))
-#define __BLIST_UNUSED_MASK (__BLIST_UNUSED_13 | \
- __BLIST_UNUSED_14 | \
+#define __BLIST_UNUSED_MASK (__BLIST_UNUSED_14 | \
__BLIST_UNUSED_15 | \
__BLIST_UNUSED_16 | \
__BLIST_UNUSED_24 | \
__entry->align_offset = info->align_offset;
),
- TP_printk("addr=0x%lx err=%ld total_vm=0x%lx flags=0x%lx len=0x%lx lo=0x%lx hi=0x%lx mask=0x%lx ofs=0x%lx\n",
+ TP_printk("addr=0x%lx err=%ld total_vm=0x%lx flags=0x%lx len=0x%lx lo=0x%lx hi=0x%lx mask=0x%lx ofs=0x%lx",
IS_ERR_VALUE(__entry->addr) ? 0 : __entry->addr,
IS_ERR_VALUE(__entry->addr) ? __entry->addr : 0,
__entry->total_vm, __entry->flags, __entry->length,
__entry->mt = &mm->mm_mt;
),
- TP_printk("mt_mod %p, DESTROY\n",
+ TP_printk("mt_mod %p, DESTROY",
__entry->mt
)
);
-/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause */
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
/* Do not edit directly, auto-generated from: */
/* Documentation/netlink/specs/fou.yaml */
/* YNL-GEN uapi header */
__u64 nr;
__u64 args[6];
__u64 ret;
- __u32 longmode;
- __u32 pad;
+
+ union {
+#ifndef __KERNEL__
+ __u32 longmode;
+#endif
+ __u64 flags;
+ };
} hypercall;
/* KVM_EXIT_TPR_ACCESS */
struct {
#define KVM_CAP_S390_PROTECTED_ASYNC_DISABLE 224
#define KVM_CAP_DIRTY_LOG_RING_WITH_BITMAP 225
#define KVM_CAP_PMU_EVENT_MASKED_EVENTS 226
+#define KVM_CAP_COUNTER_OFFSET 227
#ifdef KVM_CAP_IRQ_ROUTING
#define KVM_SET_PMU_EVENT_FILTER _IOW(KVMIO, 0xb2, struct kvm_pmu_event_filter)
#define KVM_PPC_SVM_OFF _IO(KVMIO, 0xb3)
#define KVM_ARM_MTE_COPY_TAGS _IOR(KVMIO, 0xb4, struct kvm_arm_copy_mte_tags)
+/* Available with KVM_CAP_COUNTER_OFFSET */
+#define KVM_ARM_SET_COUNTER_OFFSET _IOW(KVMIO, 0xb5, struct kvm_arm_counter_offset)
/* ioctl for vm fd */
#define KVM_CREATE_DEVICE _IOWR(KVMIO, 0xe0, struct kvm_create_device)
-/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause */
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
/* Do not edit directly, auto-generated from: */
/* Documentation/netlink/specs/netdev.yaml */
/* YNL-GEN uapi header */
NETDEV_XDP_ACT_HW_OFFLOAD = 16,
NETDEV_XDP_ACT_RX_SG = 32,
NETDEV_XDP_ACT_NDO_XMIT_SG = 64,
+
+ NETDEV_XDP_ACT_MASK = 127,
};
enum {
TCA_ROOT_FLAGS,
TCA_ROOT_COUNT,
TCA_ROOT_TIME_DELTA, /* in msecs */
+ TCA_ROOT_EXT_WARN_MSG,
__TCA_ROOT_MAX,
#define TCA_ROOT_MAX (__TCA_ROOT_MAX - 1)
};
};
DEFINE_GUEST_HANDLE_STRUCT(xenpf_symdata);
+#define XENPF_get_dom0_console 64
+
struct xen_platform_op {
uint32_t cmd;
uint32_t interface_version; /* XENPF_INTERFACE_VERSION */
struct xenpf_mem_hotadd mem_add;
struct xenpf_core_parking core_parking;
struct xenpf_symdata symdata;
+ struct dom0_vga_console_info dom0_console;
uint8_t pad[128];
} u;
};
#ifdef CONFIG_BOOT_CONFIG
/* Is bootconfig on command line? */
-static bool bootconfig_found = IS_ENABLED(CONFIG_BOOT_CONFIG_FORCE);
+static bool bootconfig_found;
static size_t initargs_offs;
#else
# define bootconfig_found false
err = parse_args("bootconfig", tmp_cmdline, NULL, 0, 0, 0, NULL,
bootconfig_params);
- if (IS_ERR(err) || !bootconfig_found)
+ if (IS_ERR(err) || !(bootconfig_found || IS_ENABLED(CONFIG_BOOT_CONFIG_FORCE)))
return;
/* parse_args() stops at the next param of '--' and returns an address */
initargs_offs = err - tmp_cmdline;
if (!data) {
- pr_err("'bootconfig' found on command line, but no bootconfig found\n");
+ /* If user intended to use bootconfig, show an error level message */
+ if (bootconfig_found)
+ pr_err("'bootconfig' found on command line, but no bootconfig found\n");
+ else
+ pr_info("No bootconfig data provided, so skipping bootconfig");
return;
}
unsigned long nr = ctx->file_alloc_end;
int ret;
+ if (!table->bitmap)
+ return -ENFILE;
+
do {
ret = find_next_zero_bit(table->bitmap, nr, table->alloc_hint);
if (ret != nr)
* completes with -EOVERFLOW, then the sender must ensure that a
* later IORING_OP_MSG_RING delivers the message.
*/
- if (!io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0))
+ if (!io_post_aux_cqe(target_ctx, msg->user_data, ret, 0))
ret = -EOVERFLOW;
out_unlock:
io_double_unlock_ctx(target_ctx);
struct io_ring_ctx *ctx = req->ctx;
struct file *src_file = msg->src_file;
+ if (msg->len)
+ return -EINVAL;
if (target_ctx == ctx)
return -EINVAL;
if (target_ctx->flags & IORING_SETUP_R_DISABLED)
struct sockaddr __user *addr;
int addr_len;
bool in_progress;
+ bool seen_econnaborted;
};
struct io_sr_msg {
conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
conn->addr_len = READ_ONCE(sqe->addr2);
- conn->in_progress = false;
+ conn->in_progress = conn->seen_econnaborted = false;
return 0;
}
ret = __sys_connect_file(req->file, &io->address,
connect->addr_len, file_flags);
- if ((ret == -EAGAIN || ret == -EINPROGRESS) && force_nonblock) {
+ if ((ret == -EAGAIN || ret == -EINPROGRESS || ret == -ECONNABORTED)
+ && force_nonblock) {
if (ret == -EINPROGRESS) {
connect->in_progress = true;
- } else {
- if (req_has_async_data(req))
- return -EAGAIN;
- if (io_alloc_async_data(req)) {
- ret = -ENOMEM;
+ return -EAGAIN;
+ }
+ if (ret == -ECONNABORTED) {
+ if (connect->seen_econnaborted)
goto out;
- }
- memcpy(req->async_data, &__io, sizeof(__io));
+ connect->seen_econnaborted = true;
+ }
+ if (req_has_async_data(req))
+ return -EAGAIN;
+ if (io_alloc_async_data(req)) {
+ ret = -ENOMEM;
+ goto out;
}
+ memcpy(req->async_data, &__io, sizeof(__io));
return -EAGAIN;
}
if (ret == -ERESTARTSYS)
unsigned nr, struct io_rsrc_data **pdata)
{
struct io_rsrc_data *data;
- int ret = -ENOMEM;
+ int ret = 0;
unsigned i;
data = kzalloc(sizeof(*data), GFP_KERNEL);
}
#endif
io_free_file_tables(&ctx->file_table);
+ io_file_table_set_alloc_range(ctx, 0, 0);
io_rsrc_data_free(ctx->file_data);
ctx->file_data = NULL;
ctx->nr_user_files = 0;
}
}
if (folio) {
- folio_put_refs(folio, nr_pages - 1);
+ /*
+ * The pages are bound to the folio, it doesn't
+ * actually unpin them but drops all but one reference,
+ * which is usually put down by io_buffer_unmap().
+ * Note, needs a better helper.
+ */
+ unpin_user_pages(&pages[1], nr_pages - 1);
nr_pages = 1;
}
}
set_cpus_allowed_ptr(current, cpumask_of(sqd->sq_cpu));
else
set_cpus_allowed_ptr(current, cpu_online_mask);
- current->flags |= PF_NO_SETAFFINITY;
mutex_lock(&sqd->lock);
while (1) {
static void io_uring_cmd_work(struct io_kiocb *req, bool *locked)
{
struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
+ unsigned issue_flags = *locked ? 0 : IO_URING_F_UNLOCKED;
- ioucmd->task_work_cb(ioucmd);
+ ioucmd->task_work_cb(ioucmd, issue_flags);
}
void io_uring_cmd_complete_in_task(struct io_uring_cmd *ioucmd,
- void (*task_work_cb)(struct io_uring_cmd *))
+ void (*task_work_cb)(struct io_uring_cmd *, unsigned))
{
struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
* Called by consumers of io_uring_cmd, if they originally returned
* -EIOCBQUEUED upon receiving the command.
*/
-void io_uring_cmd_done(struct io_uring_cmd *ioucmd, ssize_t ret, ssize_t res2)
+void io_uring_cmd_done(struct io_uring_cmd *ioucmd, ssize_t ret, ssize_t res2,
+ unsigned issue_flags)
{
struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
/* order with io_iopoll_req_issued() checking ->iopoll_complete */
smp_store_release(&req->iopoll_completed, 1);
else
- io_req_complete_post(req, 0);
+ io_req_complete_post(req, issue_flags);
}
EXPORT_SYMBOL_GPL(io_uring_cmd_done);
{
/* Only used as heuristic here to derive limit. */
bpf_jit_limit_max = bpf_jit_alloc_exec_limit();
- bpf_jit_limit = min_t(u64, round_up(bpf_jit_limit_max >> 2,
+ bpf_jit_limit = min_t(u64, round_up(bpf_jit_limit_max >> 1,
PAGE_SIZE), LONG_MAX);
return 0;
}
continue;
if (type == STACK_MISC)
continue;
+ if (type == STACK_INVALID && env->allow_uninit_stack)
+ continue;
verbose(env, "invalid read from stack off %d+%d size %d\n",
off, i, size);
return -EACCES;
continue;
if (type == STACK_ZERO)
continue;
+ if (type == STACK_INVALID && env->allow_uninit_stack)
+ continue;
verbose(env, "invalid read from stack off %d+%d size %d\n",
off, i, size);
return -EACCES;
stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE];
if (*stype == STACK_MISC)
goto mark;
- if (*stype == STACK_ZERO) {
+ if ((*stype == STACK_ZERO) ||
+ (*stype == STACK_INVALID && env->allow_uninit_stack)) {
if (clobber) {
/* helper can write anything into the stack */
*stype = STACK_MISC;
if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID)
continue;
+ if (env->allow_uninit_stack &&
+ old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_MISC)
+ continue;
+
/* explored stack has more populated slots than current stack
* and these slots were used
*/
if (len & (sizeof(compat_ulong_t)-1))
return -EINVAL;
- if (!alloc_cpumask_var(&mask, GFP_KERNEL))
+ if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
return -ENOMEM;
ret = sched_getaffinity(pid, mask);
arch_enter_from_user_mode(regs);
lockdep_hardirqs_off(CALLER_ADDR0);
- CT_WARN_ON(ct_state() != CONTEXT_USER);
+ CT_WARN_ON(__ct_state() != CONTEXT_USER);
user_exit_irqoff();
instrumentation_begin();
static void exit_to_user_mode_prepare(struct pt_regs *regs)
{
- unsigned long ti_work = read_thread_flags();
+ unsigned long ti_work;
lockdep_assert_irqs_disabled();
/* Flush pending rcuog wakeup before the last need_resched() check */
tick_nohz_user_enter_prepare();
+ ti_work = read_thread_flags();
if (unlikely(ti_work & EXIT_TO_USER_MODE_WORK))
ti_work = exit_to_user_mode_loop(regs, ti_work);
/* Inherit group flags from the previous leader */
sibling->group_caps = event->group_caps;
- if (!RB_EMPTY_NODE(&event->group_node)) {
+ if (sibling->attach_state & PERF_ATTACH_CONTEXT) {
add_event_to_groups(sibling, event->ctx);
if (sibling->state == PERF_EVENT_STATE_ACTIVE)
if (likely(!ctx->nr_events))
return;
- if (is_active ^ EVENT_TIME) {
+ if (!(is_active & EVENT_TIME)) {
/* start ctx time */
__update_context_time(ctx, false);
perf_cgroup_set_timestamp(cpuctx);
perf_event_header__init_id(&bpf_event->event_id.header,
&sample, event);
- ret = perf_output_begin(&handle, data, event,
+ ret = perf_output_begin(&handle, &sample, event,
bpf_event->event_id.header.size);
if (ret)
return;
for (i = 0; i < NR_MM_COUNTERS; i++) {
long x = percpu_counter_sum(&mm->rss_stat[i]);
- if (likely(!x))
- continue;
-
- /* Making sure this is not due to race with CPU offlining. */
- x = percpu_counter_sum_all(&mm->rss_stat[i]);
if (unlikely(x))
pr_alert("BUG: Bad rss-counter state mm:%p type:%s val:%ld\n",
mm, resident_page_types[i], x);
KCSAN_INSTRUMENT_BARRIERS_selftest.o := y
obj-$(CONFIG_KCSAN_SELFTEST) += selftest.o
-CFLAGS_kcsan_test.o := $(CFLAGS_KCSAN) -g -fno-omit-frame-pointer
+CFLAGS_kcsan_test.o := $(CFLAGS_KCSAN) -fno-omit-frame-pointer
CFLAGS_kcsan_test.o += $(DISABLE_STRUCTLEAK_PLUGIN)
obj-$(CONFIG_KCSAN_KUNIT_TEST) += kcsan_test.o
void activate_task(struct rq *rq, struct task_struct *p, int flags)
{
+ if (task_on_rq_migrating(p))
+ flags |= ENQUEUE_MIGRATED;
+
enqueue_task(rq, p, flags);
p->on_rq = TASK_ON_RQ_QUEUED;
if (len & (sizeof(unsigned long)-1))
return -EINVAL;
- if (!alloc_cpumask_var(&mask, GFP_KERNEL))
+ if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
return -ENOMEM;
ret = sched_getaffinity(pid, mask);
if (ret == 0) {
unsigned int retlen = min(len, cpumask_size());
- if (copy_to_user(user_mask_ptr, mask, retlen))
+ if (copy_to_user(user_mask_ptr, cpumask_bits(mask), retlen))
ret = -EFAULT;
else
ret = retlen;
#endif
}
+static inline bool entity_is_long_sleeper(struct sched_entity *se)
+{
+ struct cfs_rq *cfs_rq;
+ u64 sleep_time;
+
+ if (se->exec_start == 0)
+ return false;
+
+ cfs_rq = cfs_rq_of(se);
+
+ sleep_time = rq_clock_task(rq_of(cfs_rq));
+
+ /* Happen while migrating because of clock task divergence */
+ if (sleep_time <= se->exec_start)
+ return false;
+
+ sleep_time -= se->exec_start;
+ if (sleep_time > ((1ULL << 63) / scale_load_down(NICE_0_LOAD)))
+ return true;
+
+ return false;
+}
+
static void
place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
{
u64 vruntime = cfs_rq->min_vruntime;
- u64 sleep_time;
/*
* The 'current' period is already promised to the current tasks,
/*
* Pull vruntime of the entity being placed to the base level of
- * cfs_rq, to prevent boosting it if placed backwards. If the entity
- * slept for a long time, don't even try to compare its vruntime with
- * the base as it may be too far off and the comparison may get
- * inversed due to s64 overflow.
- */
- sleep_time = rq_clock_task(rq_of(cfs_rq)) - se->exec_start;
- if ((s64)sleep_time > 60LL * NSEC_PER_SEC)
+ * cfs_rq, to prevent boosting it if placed backwards.
+ * However, min_vruntime can advance much faster than real time, with
+ * the extreme being when an entity with the minimal weight always runs
+ * on the cfs_rq. If the waking entity slept for a long time, its
+ * vruntime difference from min_vruntime may overflow s64 and their
+ * comparison may get inversed, so ignore the entity's original
+ * vruntime in that case.
+ * The maximal vruntime speedup is given by the ratio of normal to
+ * minimal weight: scale_load_down(NICE_0_LOAD) / MIN_SHARES.
+ * When placing a migrated waking entity, its exec_start has been set
+ * from a different rq. In order to take into account a possible
+ * divergence between new and prev rq's clocks task because of irq and
+ * stolen time, we take an additional margin.
+ * So, cutting off on the sleep time of
+ * 2^63 / scale_load_down(NICE_0_LOAD) ~ 104 days
+ * should be safe.
+ */
+ if (entity_is_long_sleeper(se))
se->vruntime = vruntime;
else
se->vruntime = max_vruntime(se->vruntime, vruntime);
if (flags & ENQUEUE_WAKEUP)
place_entity(cfs_rq, se, 0);
+ /* Entity has migrated, no longer consider this task hot */
+ if (flags & ENQUEUE_MIGRATED)
+ se->exec_start = 0;
check_schedstat_required();
update_stats_enqueue_fair(cfs_rq, se, flags);
/* Tell new CPU we are migrated */
se->avg.last_update_time = 0;
- /* We have migrated, no longer consider this task hot */
- se->exec_start = 0;
-
update_scan_period(p, new_cpu);
}
key.flags = end; /* overload flags, as it is unsigned long */
for (pg = ftrace_pages_start; pg; pg = pg->next) {
- if (end < pg->records[0].ip ||
+ if (pg->index == 0 ||
+ end < pg->records[0].ip ||
start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
continue;
rec = bsearch(&key, pg->records, pg->index,
arch_ftrace_set_direct_caller(fregs, addr);
}
-struct ftrace_ops direct_ops = {
+static struct ftrace_ops direct_ops = {
.func = call_direct_funcs,
.flags = FTRACE_OPS_FL_DIRECT | FTRACE_OPS_FL_SAVE_REGS
| FTRACE_OPS_FL_PERMANENT,
if (trace_event_file_is_valid(gen_kprobe_test))
gen_kprobe_test = NULL;
/* We got an error after creating the event, delete it */
- ret = kprobe_event_delete("gen_kprobe_test");
+ kprobe_event_delete("gen_kprobe_test");
goto out;
}
if (trace_event_file_is_valid(gen_kretprobe_test))
gen_kretprobe_test = NULL;
/* We got an error after creating the event, delete it */
- ret = kprobe_event_delete("gen_kretprobe_test");
+ kprobe_event_delete("gen_kretprobe_test");
goto out;
}
local_set(&bpage->commit, 0);
}
-/*
- * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
- * this issue out.
- */
static void free_buffer_page(struct buffer_page *bpage)
{
free_page((unsigned long)bpage->page);
static const struct file_operations tracing_fops = {
.open = tracing_open,
.read = seq_read,
+ .read_iter = seq_read_iter,
+ .splice_read = generic_file_splice_read,
.write = tracing_write_stub,
.llseek = tracing_lseek,
.release = tracing_release,
{
const char *field_name = "";
+ if (WARN_ON_ONCE(!field))
+ return field_name;
+
if (level > 1)
return field_name;
goto out;
}
+ /* Some types cannot be a value */
+ if (hist_field->flags & (HIST_FIELD_FL_GRAPH | HIST_FIELD_FL_PERCENT |
+ HIST_FIELD_FL_BUCKET | HIST_FIELD_FL_LOG2 |
+ HIST_FIELD_FL_SYM | HIST_FIELD_FL_SYM_OFFSET |
+ HIST_FIELD_FL_SYSCALL | HIST_FIELD_FL_STACKTRACE)) {
+ hist_err(file->tr, HIST_ERR_BAD_FIELD_MODIFIER, errpos(field_str));
+ ret = -EINVAL;
+ }
+
hist_data->fields[val_idx] = hist_field;
++hist_data->n_vals;
cpumask_clear(current_mask);
cpumask_set_cpu(next_cpu, current_mask);
- sched_setaffinity(0, current_mask);
+ set_cpus_allowed_ptr(current, current_mask);
return;
change_mode:
}
- sched_setaffinity(kthread->pid, current_mask);
+ set_cpus_allowed_ptr(kthread, current_mask);
kdata->kthread = kthread;
wake_up_process(kthread);
{
struct task_struct *kthread;
+ /* Do not start a new hwlatd thread if it is already running */
+ if (per_cpu(hwlat_per_cpu_data, cpu).kthread)
+ return 0;
+
kthread = kthread_run_on_cpu(kthread_fn, NULL, cpu, "hwlatd/%u");
if (IS_ERR(kthread)) {
pr_err(BANNER "could not start sampling thread\n");
*/
cpumask_and(current_mask, cpu_online_mask, tr->tracing_cpumask);
- for_each_online_cpu(cpu)
- per_cpu(hwlat_per_cpu_data, cpu).kthread = NULL;
-
for_each_cpu(cpu, current_mask) {
retval = start_cpu_kthread(cpu);
if (retval)
/*
* Per-cpu runtime information.
*/
-DEFINE_PER_CPU(struct osnoise_variables, per_cpu_osnoise_var);
+static DEFINE_PER_CPU(struct osnoise_variables, per_cpu_osnoise_var);
/*
* this_cpu_osn_var - Return the per-cpu osnoise_variables on its relative CPU
u64 count;
};
-DEFINE_PER_CPU(struct timerlat_variables, per_cpu_timerlat_var);
+static DEFINE_PER_CPU(struct timerlat_variables, per_cpu_timerlat_var);
/*
* this_cpu_tmr_var - Return the per-cpu timerlat_variables on its relative CPU
/*
* Protect the interface.
*/
-struct mutex interface_lock;
+static struct mutex interface_lock;
/*
* Tracer data.
/*
* osnoise/timerlat_period: min 100 us, max 1 s
*/
-u64 timerlat_min_period = 100;
-u64 timerlat_max_period = 1000000;
+static u64 timerlat_min_period = 100;
+static u64 timerlat_max_period = 1000000;
static struct trace_min_max_param timerlat_period = {
.lock = &interface_lock,
.val = &osnoise_data.timerlat_period,
static void dhry_benchmark(void)
{
+ unsigned int cpu = get_cpu();
int i, n;
if (iterations > 0) {
}
report:
+ put_cpu();
if (n >= 0)
- pr_info("CPU%u: Dhrystones per Second: %d (%d DMIPS)\n",
- smp_processor_id(), n, n / DHRY_VAX);
+ pr_info("CPU%u: Dhrystones per Second: %d (%d DMIPS)\n", cpu,
+ n, n / DHRY_VAX);
else if (n == -EAGAIN)
pr_err("Please increase the number of iterations\n");
else
EXPORT_SYMBOL(_find_next_andnot_bit);
#endif
+#ifndef find_next_or_bit
+unsigned long _find_next_or_bit(const unsigned long *addr1, const unsigned long *addr2,
+ unsigned long nbits, unsigned long start)
+{
+ return FIND_NEXT_BIT(addr1[idx] | addr2[idx], /* nop */, nbits, start);
+}
+EXPORT_SYMBOL(_find_next_or_bit);
+#endif
+
#ifndef find_next_zero_bit
unsigned long _find_next_zero_bit(const unsigned long *addr, unsigned long nbits,
unsigned long start)
*/
static inline bool mas_skip_node(struct ma_state *mas)
{
- unsigned char slot, slot_count;
- unsigned long *pivots;
- enum maple_type mt;
+ if (mas_is_err(mas))
+ return false;
- mt = mte_node_type(mas->node);
- slot_count = mt_slots[mt] - 1;
do {
if (mte_is_root(mas->node)) {
- slot = mas->offset;
- if (slot > slot_count) {
+ if (mas->offset >= mas_data_end(mas)) {
mas_set_err(mas, -EBUSY);
return false;
}
} else {
mas_ascend(mas);
- slot = mas->offset;
- mt = mte_node_type(mas->node);
- slot_count = mt_slots[mt] - 1;
}
- } while (slot > slot_count);
-
- mas->offset = ++slot;
- pivots = ma_pivots(mas_mn(mas), mt);
- if (slot > 0)
- mas->min = pivots[slot - 1] + 1;
-
- if (slot <= slot_count)
- mas->max = pivots[slot];
+ } while (mas->offset >= mas_data_end(mas));
+ mas->offset++;
return true;
}
}
EXPORT_SYMBOL(percpu_counter_sync);
-static s64 __percpu_counter_sum_mask(struct percpu_counter *fbc,
- const struct cpumask *cpu_mask)
+/*
+ * Add up all the per-cpu counts, return the result. This is a more accurate
+ * but much slower version of percpu_counter_read_positive().
+ *
+ * We use the cpu mask of (cpu_online_mask | cpu_dying_mask) to capture sums
+ * from CPUs that are in the process of being taken offline. Dying cpus have
+ * been removed from the online mask, but may not have had the hotplug dead
+ * notifier called to fold the percpu count back into the global counter sum.
+ * By including dying CPUs in the iteration mask, we avoid this race condition
+ * so __percpu_counter_sum() just does the right thing when CPUs are being taken
+ * offline.
+ */
+s64 __percpu_counter_sum(struct percpu_counter *fbc)
{
s64 ret;
int cpu;
raw_spin_lock_irqsave(&fbc->lock, flags);
ret = fbc->count;
- for_each_cpu(cpu, cpu_mask) {
+ for_each_cpu_or(cpu, cpu_online_mask, cpu_dying_mask) {
s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
ret += *pcount;
}
raw_spin_unlock_irqrestore(&fbc->lock, flags);
return ret;
}
-
-/*
- * Add up all the per-cpu counts, return the result. This is a more accurate
- * but much slower version of percpu_counter_read_positive()
- */
-s64 __percpu_counter_sum(struct percpu_counter *fbc)
-{
- return __percpu_counter_sum_mask(fbc, cpu_online_mask);
-}
EXPORT_SYMBOL(__percpu_counter_sum);
-/*
- * This is slower version of percpu_counter_sum as it traverses all possible
- * cpus. Use this only in the cases where accurate data is needed in the
- * presense of CPUs getting offlined.
- */
-s64 percpu_counter_sum_all(struct percpu_counter *fbc)
-{
- return __percpu_counter_sum_mask(fbc, cpu_possible_mask);
-}
-EXPORT_SYMBOL(percpu_counter_sum_all);
-
int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp,
struct lock_class_key *key)
{
rcu_read_unlock();
}
+static noinline void check_empty_area_fill(struct maple_tree *mt)
+{
+ const unsigned long max = 0x25D78000;
+ unsigned long size;
+ int loop, shift;
+ MA_STATE(mas, mt, 0, 0);
+
+ mt_set_non_kernel(99999);
+ for (shift = 12; shift <= 16; shift++) {
+ loop = 5000;
+ size = 1 << shift;
+ while (loop--) {
+ mas_set(&mas, 0);
+ mas_lock(&mas);
+ MT_BUG_ON(mt, mas_empty_area(&mas, 0, max, size) != 0);
+ MT_BUG_ON(mt, mas.last != mas.index + size - 1);
+ mas_store_gfp(&mas, (void *)size, GFP_KERNEL);
+ mas_unlock(&mas);
+ mas_reset(&mas);
+ }
+ }
+
+ /* No space left. */
+ size = 0x1000;
+ rcu_read_lock();
+ MT_BUG_ON(mt, mas_empty_area(&mas, 0, max, size) != -EBUSY);
+ rcu_read_unlock();
+
+ /* Fill a depth 3 node to the maximum */
+ for (unsigned long i = 629440511; i <= 629440800; i += 6)
+ mtree_store_range(mt, i, i + 5, (void *)i, GFP_KERNEL);
+ /* Make space in the second-last depth 4 node */
+ mtree_erase(mt, 631668735);
+ /* Make space in the last depth 4 node */
+ mtree_erase(mt, 629506047);
+ mas_reset(&mas);
+ /* Search from just after the gap in the second-last depth 4 */
+ rcu_read_lock();
+ MT_BUG_ON(mt, mas_empty_area(&mas, 629506048, 690000000, 0x5000) != 0);
+ rcu_read_unlock();
+ mt_set_non_kernel(0);
+}
+
static DEFINE_MTREE(tree);
static int maple_tree_seed(void)
{
check_empty_area_window(&tree);
mtree_destroy(&tree);
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ check_empty_area_fill(&tree);
+ mtree_destroy(&tree);
+
+
#if defined(BENCH)
skip:
#endif
#include <linux/kernel.h>
-#define assert(x) WARN_ON((x))
+#define assert(x) WARN_ON(!(x))
#endif /* ZSTD_DEPS_ASSERT */
#endif /* ZSTD_DEPS_NEED_ASSERT */
static void HUF_fillDTableX2(HUF_DEltX2* DTable, const U32 targetLog,
const sortedSymbol_t* sortedList,
- const U32* rankStart, rankVal_t rankValOrigin, const U32 maxWeight,
+ const U32* rankStart, rankValCol_t *rankValOrigin, const U32 maxWeight,
const U32 nbBitsBaseline)
{
U32* const rankVal = rankValOrigin[0];
if (srcSize == 0) return 0;
RETURN_ERROR(dstBuffer_null, "");
}
- ZSTD_memcpy(dst, src, srcSize);
+ ZSTD_memmove(dst, src, srcSize);
return srcSize;
}
/* Loop on each block */
while (1) {
+ BYTE* oBlockEnd = oend;
size_t decodedSize;
blockProperties_t blockProperties;
size_t const cBlockSize = ZSTD_getcBlockSize(ip, remainingSrcSize, &blockProperties);
remainingSrcSize -= ZSTD_blockHeaderSize;
RETURN_ERROR_IF(cBlockSize > remainingSrcSize, srcSize_wrong, "");
+ if (ip >= op && ip < oBlockEnd) {
+ /* We are decompressing in-place. Limit the output pointer so that we
+ * don't overwrite the block that we are currently reading. This will
+ * fail decompression if the input & output pointers aren't spaced
+ * far enough apart.
+ *
+ * This is important to set, even when the pointers are far enough
+ * apart, because ZSTD_decompressBlock_internal() can decide to store
+ * literals in the output buffer, after the block it is decompressing.
+ * Since we don't want anything to overwrite our input, we have to tell
+ * ZSTD_decompressBlock_internal to never write past ip.
+ *
+ * See ZSTD_allocateLiteralsBuffer() for reference.
+ */
+ oBlockEnd = op + (ip - op);
+ }
+
switch(blockProperties.blockType)
{
case bt_compressed:
- decodedSize = ZSTD_decompressBlock_internal(dctx, op, (size_t)(oend-op), ip, cBlockSize, /* frame */ 1, not_streaming);
+ decodedSize = ZSTD_decompressBlock_internal(dctx, op, (size_t)(oBlockEnd-op), ip, cBlockSize, /* frame */ 1, not_streaming);
break;
case bt_raw :
+ /* Use oend instead of oBlockEnd because this function is safe to overlap. It uses memmove. */
decodedSize = ZSTD_copyRawBlock(op, (size_t)(oend-op), ip, cBlockSize);
break;
case bt_rle :
- decodedSize = ZSTD_setRleBlock(op, (size_t)(oend-op), *ip, blockProperties.origSize);
+ decodedSize = ZSTD_setRleBlock(op, (size_t)(oBlockEnd-op), *ip, blockProperties.origSize);
break;
case bt_reserved :
default:
accessed = false;
else
accessed = true;
- folio_put(folio);
goto out;
}
if (need_lock)
folio_unlock(folio);
- folio_put(folio);
out:
*folio_sz = folio_size(folio);
+ folio_put(folio);
return accessed;
}
folio_mark_accessed(folio);
else
folio_deactivate(folio);
- folio_put(folio);
applied += folio_nr_pages(folio);
+ folio_put(folio);
}
return applied * PAGE_SIZE;
}
{
struct mm_struct *mm = vma->vm_mm;
pgtable_t pgtable;
- pmd_t _pmd;
+ pmd_t _pmd, old_pmd;
int i;
/*
*
* See Documentation/mm/mmu_notifier.rst
*/
- pmdp_huge_clear_flush(vma, haddr, pmd);
+ old_pmd = pmdp_huge_clear_flush(vma, haddr, pmd);
pgtable = pgtable_trans_huge_withdraw(mm, pmd);
pmd_populate(mm, &_pmd, pgtable);
pte_t *pte, entry;
entry = pfn_pte(my_zero_pfn(haddr), vma->vm_page_prot);
entry = pte_mkspecial(entry);
+ if (pmd_uffd_wp(old_pmd))
+ entry = pte_mkuffd_wp(entry);
pte = pte_offset_map(&_pmd, haddr);
VM_BUG_ON(!pte_none(*pte));
set_pte_at(mm, haddr, pte, entry);
obj-y := core.o report.o
-CFLAGS_kfence_test.o := -g -fno-omit-frame-pointer -fno-optimize-sibling-calls
+CFLAGS_kfence_test.o := -fno-omit-frame-pointer -fno-optimize-sibling-calls
obj-$(CONFIG_KFENCE_KUNIT_TEST) += kfence_test.o
};
DEFINE_SEQ_ATTRIBUTE(objects);
-static int __init kfence_debugfs_init(void)
+static int kfence_debugfs_init(void)
{
- struct dentry *kfence_dir = debugfs_create_dir("kfence", NULL);
+ struct dentry *kfence_dir;
+ if (!READ_ONCE(kfence_enabled))
+ return 0;
+
+ kfence_dir = debugfs_create_dir("kfence", NULL);
debugfs_create_file("stats", 0444, kfence_dir, NULL, &stats_fops);
debugfs_create_file("objects", 0400, kfence_dir, NULL, &objects_fops);
return 0;
}
kfence_init_enable();
+ kfence_debugfs_init();
+
return 0;
}
mm = mm_slot->slot.mm;
mmap_read_lock(mm);
+
+ /*
+ * Exit right away if mm is exiting to avoid lockdep issue in
+ * the maple tree
+ */
+ if (ksm_test_exit(mm))
+ goto mm_exiting;
+
for_each_vma(vmi, vma) {
- if (ksm_test_exit(mm))
- break;
if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma)
continue;
err = unmerge_ksm_pages(vma,
goto error;
}
+mm_exiting:
remove_trailing_rmap_items(&mm_slot->rmap_list);
mmap_read_unlock(mm);
/* Obtain the lock on page, remove all ptes. */
static int migrate_folio_unmap(new_page_t get_new_page, free_page_t put_new_page,
unsigned long private, struct folio *src,
- struct folio **dstp, int force, bool avoid_force_lock,
- enum migrate_mode mode, enum migrate_reason reason,
- struct list_head *ret)
+ struct folio **dstp, enum migrate_mode mode,
+ enum migrate_reason reason, struct list_head *ret)
{
struct folio *dst;
int rc = -EAGAIN;
dst->private = NULL;
if (!folio_trylock(src)) {
- if (!force || mode == MIGRATE_ASYNC)
+ if (mode == MIGRATE_ASYNC)
goto out;
/*
if (current->flags & PF_MEMALLOC)
goto out;
- /*
- * We have locked some folios and are going to wait to lock
- * this folio. To avoid a potential deadlock, let's bail
- * out and not do that. The locked folios will be moved and
- * unlocked, then we can wait to lock this folio.
- */
- if (avoid_force_lock) {
- rc = -EDEADLOCK;
- goto out;
- }
-
folio_lock(src);
}
locked = true;
rc = -EBUSY;
goto out;
}
- if (!force)
- goto out;
folio_wait_writeback(src);
}
/* Establish migration ptes */
VM_BUG_ON_FOLIO(folio_test_anon(src) &&
!folio_test_ksm(src) && !anon_vma, src);
- try_to_migrate(src, TTU_BATCH_FLUSH);
+ try_to_migrate(src, mode == MIGRATE_ASYNC ? TTU_BATCH_FLUSH : 0);
page_was_mapped = 1;
}
* A folio that has not been unmapped will be restored to
* right list unless we want to retry.
*/
- if (rc == -EAGAIN || rc == -EDEADLOCK)
+ if (rc == -EAGAIN)
ret = NULL;
migrate_folio_undo_src(src, page_was_mapped, anon_vma, locked, ret);
#define NR_MAX_BATCHED_MIGRATION 512
#endif
#define NR_MAX_MIGRATE_PAGES_RETRY 10
+#define NR_MAX_MIGRATE_ASYNC_RETRY 3
+#define NR_MAX_MIGRATE_SYNC_RETRY \
+ (NR_MAX_MIGRATE_PAGES_RETRY - NR_MAX_MIGRATE_ASYNC_RETRY)
struct migrate_pages_stats {
int nr_succeeded; /* Normal and large folios migrated successfully, in
/*
* migrate_pages_batch() first unmaps folios in the from list as many as
* possible, then move the unmapped folios.
+ *
+ * We only batch migration if mode == MIGRATE_ASYNC to avoid to wait a
+ * lock or bit when we have locked more than one folio. Which may cause
+ * deadlock (e.g., for loop device). So, if mode != MIGRATE_ASYNC, the
+ * length of the from list must be <= 1.
*/
static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page,
free_page_t put_new_page, unsigned long private,
enum migrate_mode mode, int reason, struct list_head *ret_folios,
- struct migrate_pages_stats *stats)
+ struct list_head *split_folios, struct migrate_pages_stats *stats,
+ int nr_pass)
{
- int retry;
+ int retry = 1;
int large_retry = 1;
int thp_retry = 1;
int nr_failed = 0;
bool is_large = false;
bool is_thp = false;
struct folio *folio, *folio2, *dst = NULL, *dst2;
- int rc, rc_saved, nr_pages;
- LIST_HEAD(split_folios);
+ int rc, rc_saved = 0, nr_pages;
LIST_HEAD(unmap_folios);
LIST_HEAD(dst_folios);
bool nosplit = (reason == MR_NUMA_MISPLACED);
- bool no_split_folio_counting = false;
- bool avoid_force_lock;
-retry:
- rc_saved = 0;
- avoid_force_lock = false;
- retry = 1;
- for (pass = 0;
- pass < NR_MAX_MIGRATE_PAGES_RETRY && (retry || large_retry);
- pass++) {
+ VM_WARN_ON_ONCE(mode != MIGRATE_ASYNC &&
+ !list_empty(from) && !list_is_singular(from));
+
+ for (pass = 0; pass < nr_pass && (retry || large_retry); pass++) {
retry = 0;
large_retry = 0;
thp_retry = 0;
if (!thp_migration_supported() && is_thp) {
nr_large_failed++;
stats->nr_thp_failed++;
- if (!try_split_folio(folio, &split_folios)) {
+ if (!try_split_folio(folio, split_folios)) {
stats->nr_thp_split++;
continue;
}
}
rc = migrate_folio_unmap(get_new_page, put_new_page, private,
- folio, &dst, pass > 2, avoid_force_lock,
- mode, reason, ret_folios);
+ folio, &dst, mode, reason, ret_folios);
/*
* The rules are:
* Success: folio will be freed
* Unmap: folio will be put on unmap_folios list,
* dst folio put on dst_folios list
* -EAGAIN: stay on the from list
- * -EDEADLOCK: stay on the from list
* -ENOMEM: stay on the from list
* Other errno: put on ret_folios list
*/
stats->nr_thp_failed += is_thp;
/* Large folio NUMA faulting doesn't split to retry. */
if (!nosplit) {
- int ret = try_split_folio(folio, &split_folios);
+ int ret = try_split_folio(folio, split_folios);
if (!ret) {
stats->nr_thp_split += is_thp;
break;
}
}
- } else if (!no_split_folio_counting) {
+ } else {
nr_failed++;
}
stats->nr_failed_pages += nr_pages + nr_retry_pages;
- /*
- * There might be some split folios of fail-to-migrate large
- * folios left in split_folios list. Move them to ret_folios
- * list so that they could be put back to the right list by
- * the caller otherwise the folio refcnt will be leaked.
- */
- list_splice_init(&split_folios, ret_folios);
/* nr_failed isn't updated for not used */
nr_large_failed += large_retry;
stats->nr_thp_failed += thp_retry;
goto out;
else
goto move;
- case -EDEADLOCK:
- /*
- * The folio cannot be locked for potential deadlock.
- * Go move (and unlock) all locked folios. Then we can
- * try again.
- */
- rc_saved = rc;
- goto move;
case -EAGAIN:
if (is_large) {
large_retry++;
thp_retry += is_thp;
- } else if (!no_split_folio_counting) {
+ } else {
retry++;
}
nr_retry_pages += nr_pages;
stats->nr_thp_succeeded += is_thp;
break;
case MIGRATEPAGE_UNMAP:
- /*
- * We have locked some folios, don't force lock
- * to avoid deadlock.
- */
- avoid_force_lock = true;
list_move_tail(&folio->lru, &unmap_folios);
list_add_tail(&dst->lru, &dst_folios);
break;
if (is_large) {
nr_large_failed++;
stats->nr_thp_failed += is_thp;
- } else if (!no_split_folio_counting) {
+ } else {
nr_failed++;
}
try_to_unmap_flush();
retry = 1;
- for (pass = 0;
- pass < NR_MAX_MIGRATE_PAGES_RETRY && (retry || large_retry);
- pass++) {
+ for (pass = 0; pass < nr_pass && (retry || large_retry); pass++) {
retry = 0;
large_retry = 0;
thp_retry = 0;
if (is_large) {
large_retry++;
thp_retry += is_thp;
- } else if (!no_split_folio_counting) {
+ } else {
retry++;
}
nr_retry_pages += nr_pages;
if (is_large) {
nr_large_failed++;
stats->nr_thp_failed += is_thp;
- } else if (!no_split_folio_counting) {
+ } else {
nr_failed++;
}
dst2 = list_next_entry(dst, lru);
}
- /*
- * Try to migrate split folios of fail-to-migrate large folios, no
- * nr_failed counting in this round, since all split folios of a
- * large folio is counted as 1 failure in the first round.
- */
- if (rc >= 0 && !list_empty(&split_folios)) {
- /*
- * Move non-migrated folios (after NR_MAX_MIGRATE_PAGES_RETRY
- * retries) to ret_folios to avoid migrating them again.
- */
- list_splice_init(from, ret_folios);
- list_splice_init(&split_folios, from);
- no_split_folio_counting = true;
- goto retry;
- }
+ return rc;
+}
+static int migrate_pages_sync(struct list_head *from, new_page_t get_new_page,
+ free_page_t put_new_page, unsigned long private,
+ enum migrate_mode mode, int reason, struct list_head *ret_folios,
+ struct list_head *split_folios, struct migrate_pages_stats *stats)
+{
+ int rc, nr_failed = 0;
+ LIST_HEAD(folios);
+ struct migrate_pages_stats astats;
+
+ memset(&astats, 0, sizeof(astats));
+ /* Try to migrate in batch with MIGRATE_ASYNC mode firstly */
+ rc = migrate_pages_batch(from, get_new_page, put_new_page, private, MIGRATE_ASYNC,
+ reason, &folios, split_folios, &astats,
+ NR_MAX_MIGRATE_ASYNC_RETRY);
+ stats->nr_succeeded += astats.nr_succeeded;
+ stats->nr_thp_succeeded += astats.nr_thp_succeeded;
+ stats->nr_thp_split += astats.nr_thp_split;
+ if (rc < 0) {
+ stats->nr_failed_pages += astats.nr_failed_pages;
+ stats->nr_thp_failed += astats.nr_thp_failed;
+ list_splice_tail(&folios, ret_folios);
+ return rc;
+ }
+ stats->nr_thp_failed += astats.nr_thp_split;
+ nr_failed += astats.nr_thp_split;
/*
- * We have unlocked all locked folios, so we can force lock now, let's
- * try again.
+ * Fall back to migrate all failed folios one by one synchronously. All
+ * failed folios except split THPs will be retried, so their failure
+ * isn't counted
*/
- if (rc == -EDEADLOCK)
- goto retry;
+ list_splice_tail_init(&folios, from);
+ while (!list_empty(from)) {
+ list_move(from->next, &folios);
+ rc = migrate_pages_batch(&folios, get_new_page, put_new_page,
+ private, mode, reason, ret_folios,
+ split_folios, stats, NR_MAX_MIGRATE_SYNC_RETRY);
+ list_splice_tail_init(&folios, ret_folios);
+ if (rc < 0)
+ return rc;
+ nr_failed += rc;
+ }
- return rc;
+ return nr_failed;
}
/*
struct folio *folio, *folio2;
LIST_HEAD(folios);
LIST_HEAD(ret_folios);
+ LIST_HEAD(split_folios);
struct migrate_pages_stats stats;
trace_mm_migrate_pages_start(mode, reason);
mode, reason, &stats, &ret_folios);
if (rc_gather < 0)
goto out;
+
again:
nr_pages = 0;
list_for_each_entry_safe(folio, folio2, from, lru) {
}
nr_pages += folio_nr_pages(folio);
- if (nr_pages > NR_MAX_BATCHED_MIGRATION)
+ if (nr_pages >= NR_MAX_BATCHED_MIGRATION)
break;
}
- if (nr_pages > NR_MAX_BATCHED_MIGRATION)
- list_cut_before(&folios, from, &folio->lru);
+ if (nr_pages >= NR_MAX_BATCHED_MIGRATION)
+ list_cut_before(&folios, from, &folio2->lru);
else
list_splice_init(from, &folios);
- rc = migrate_pages_batch(&folios, get_new_page, put_new_page, private,
- mode, reason, &ret_folios, &stats);
+ if (mode == MIGRATE_ASYNC)
+ rc = migrate_pages_batch(&folios, get_new_page, put_new_page, private,
+ mode, reason, &ret_folios, &split_folios, &stats,
+ NR_MAX_MIGRATE_PAGES_RETRY);
+ else
+ rc = migrate_pages_sync(&folios, get_new_page, put_new_page, private,
+ mode, reason, &ret_folios, &split_folios, &stats);
list_splice_tail_init(&folios, &ret_folios);
if (rc < 0) {
rc_gather = rc;
+ list_splice_tail(&split_folios, &ret_folios);
goto out;
}
+ if (!list_empty(&split_folios)) {
+ /*
+ * Failure isn't counted since all split folios of a large folio
+ * is counted as 1 failure already. And, we only try to migrate
+ * with minimal effort, force MIGRATE_ASYNC mode and retry once.
+ */
+ migrate_pages_batch(&split_folios, get_new_page, put_new_page, private,
+ MIGRATE_ASYNC, reason, &ret_folios, NULL, &stats, 1);
+ list_splice_tail_init(&split_folios, &ret_folios);
+ }
rc_gather += rc;
if (!list_empty(from))
goto again;
* Hugepages under user process are always in RAM and never
* swapped out, but theoretically it needs to be checked.
*/
- present = pte && !huge_pte_none(huge_ptep_get(pte));
+ present = pte && !huge_pte_none_mostly(huge_ptep_get(pte));
for (; addr != end; vec++, addr += PAGE_SIZE)
*vec = present;
walk->private = vec;
if (map_deny_write_exec(vma, vma->vm_flags)) {
error = -EACCES;
- if (file)
- goto close_and_free_vma;
- else if (vma->vm_file)
- goto unmap_and_free_vma;
- else
- goto free_vma;
+ goto close_and_free_vma;
}
/* Allow architectures to sanity-check the vm_flags */
if (map_deny_write_exec(vma, newflags)) {
error = -EACCES;
- goto out;
+ break;
}
/* Allow architectures to sanity-check the new flags */
unsigned int order, bool check_free, fpi_t fpi_flags)
{
int bad = 0;
+ bool skip_kasan_poison = should_skip_kasan_poison(page, fpi_flags);
bool init = want_init_on_free();
VM_BUG_ON_PAGE(PageTail(page), page);
* With hardware tag-based KASAN, memory tags must be set before the
* page becomes unavailable via debug_pagealloc or arch_free_page.
*/
- if (!should_skip_kasan_poison(page, fpi_flags)) {
+ if (!skip_kasan_poison) {
kasan_poison_pages(page, order, init);
/* Memory is already initialized if KASAN did it internally. */
return 0;
}
-#if (defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)) || defined(CONFIG_SMP)
+#if defined(CONFIG_NUMA) || defined(CONFIG_SMP)
/*
* Allocates and initializes node for a node on each slab cache, used for
* either memory or cpu hotplug. If memory is being hot-added, the kmem_cache_node
unsigned int order, unsigned int nr_pages, struct page **pages)
{
unsigned int nr_allocated = 0;
+ gfp_t alloc_gfp = gfp;
+ bool nofail = false;
struct page *page;
int i;
* more permissive.
*/
if (!order) {
+ /* bulk allocator doesn't support nofail req. officially */
gfp_t bulk_gfp = gfp & ~__GFP_NOFAIL;
while (nr_allocated < nr_pages) {
if (nr != nr_pages_request)
break;
}
+ } else if (gfp & __GFP_NOFAIL) {
+ /*
+ * Higher order nofail allocations are really expensive and
+ * potentially dangerous (pre-mature OOM, disruptive reclaim
+ * and compaction etc.
+ */
+ alloc_gfp &= ~__GFP_NOFAIL;
+ nofail = true;
}
/* High-order pages or fallback path if "bulk" fails. */
-
while (nr_allocated < nr_pages) {
if (fatal_signal_pending(current))
break;
if (nid == NUMA_NO_NODE)
- page = alloc_pages(gfp, order);
+ page = alloc_pages(alloc_gfp, order);
else
- page = alloc_pages_node(nid, gfp, order);
- if (unlikely(!page))
- break;
+ page = alloc_pages_node(nid, alloc_gfp, order);
+ if (unlikely(!page)) {
+ if (!nofail)
+ break;
+
+ /* fall back to the zero order allocations */
+ alloc_gfp |= __GFP_NOFAIL;
+ order = 0;
+ continue;
+ }
+
/*
* Higher order allocations must be able to be treated as
* indepdenent small pages by callers (as they can with
return -ENXIO;
}
- if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
- hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
- hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
- hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
+ switch (hci_skb_pkt_type(skb)) {
+ case HCI_EVENT_PKT:
+ break;
+ case HCI_ACLDATA_PKT:
+ /* Detect if ISO packet has been sent as ACL */
+ if (hci_conn_num(hdev, ISO_LINK)) {
+ __u16 handle = __le16_to_cpu(hci_acl_hdr(skb)->handle);
+ __u8 type;
+
+ type = hci_conn_lookup_type(hdev, hci_handle(handle));
+ if (type == ISO_LINK)
+ hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
+ }
+ break;
+ case HCI_SCODATA_PKT:
+ break;
+ case HCI_ISODATA_PKT:
+ break;
+ default:
kfree_skb(skb);
return -EINVAL;
}
cancel_work_sync(&hdev->cmd_sync_work);
cancel_work_sync(&hdev->reenable_adv_work);
+ mutex_lock(&hdev->cmd_sync_work_lock);
list_for_each_entry_safe(entry, tmp, &hdev->cmd_sync_work_list, list) {
if (entry->destroy)
entry->destroy(hdev, entry->data, -ECANCELED);
list_del(&entry->list);
kfree(entry);
}
+ mutex_unlock(&hdev->cmd_sync_work_lock);
}
void __hci_cmd_sync_cancel(struct hci_dev *hdev, int err)
return err;
}
+static int hci_pause_addr_resolution(struct hci_dev *hdev)
+{
+ int err;
+
+ if (!use_ll_privacy(hdev))
+ return 0;
+
+ if (!hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
+ return 0;
+
+ /* Cannot disable addr resolution if scanning is enabled or
+ * when initiating an LE connection.
+ */
+ if (hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
+ hci_lookup_le_connect(hdev)) {
+ bt_dev_err(hdev, "Command not allowed when scan/LE connect");
+ return -EPERM;
+ }
+
+ /* Cannot disable addr resolution if advertising is enabled. */
+ err = hci_pause_advertising_sync(hdev);
+ if (err) {
+ bt_dev_err(hdev, "Pause advertising failed: %d", err);
+ return err;
+ }
+
+ err = hci_le_set_addr_resolution_enable_sync(hdev, 0x00);
+ if (err)
+ bt_dev_err(hdev, "Unable to disable Address Resolution: %d",
+ err);
+
+ /* Return if address resolution is disabled and RPA is not used. */
+ if (!err && scan_use_rpa(hdev))
+ return err;
+
+ hci_resume_advertising_sync(hdev);
+ return err;
+}
+
struct sk_buff *hci_read_local_oob_data_sync(struct hci_dev *hdev,
bool extended, struct sock *sk)
{
u8 filter_policy;
int err;
- /* Pause advertising if resolving list can be used as controllers are
+ /* Pause advertising if resolving list can be used as controllers
* cannot accept resolving list modifications while advertising.
*/
if (use_ll_privacy(hdev)) {
HCI_INIT(hci_read_flow_control_mode_sync),
/* HCI_OP_READ_LOCATION_DATA */
HCI_INIT(hci_read_location_data_sync),
+ {}
};
static int hci_init1_sync(struct hci_dev *hdev)
static const struct hci_init_stage amp_init2[] = {
/* HCI_OP_READ_LOCAL_FEATURES */
HCI_INIT(hci_read_local_features_sync),
+ {}
};
/* Read Buffer Size (ACL mtu, max pkt, etc.) */
cancel_interleave_scan(hdev);
- /* Pause advertising since active scanning disables address resolution
- * which advertising depend on in order to generate its RPAs.
- */
- if (use_ll_privacy(hdev) && hci_dev_test_flag(hdev, HCI_PRIVACY)) {
- err = hci_pause_advertising_sync(hdev);
- if (err) {
- bt_dev_err(hdev, "pause advertising failed: %d", err);
- goto failed;
- }
- }
-
- /* Disable address resolution while doing active scanning since the
- * accept list shall not be used and all reports shall reach the host
- * anyway.
+ /* Pause address resolution for active scan and stop advertising if
+ * privacy is enabled.
*/
- err = hci_le_set_addr_resolution_enable_sync(hdev, 0x00);
- if (err) {
- bt_dev_err(hdev, "Unable to disable Address Resolution: %d",
- err);
+ err = hci_pause_addr_resolution(hdev);
+ if (err)
goto failed;
- }
/* All active scans will be done with either a resolvable private
* address (when privacy feature has been enabled) or non-resolvable
void iso_recv(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
{
struct iso_conn *conn = hcon->iso_data;
- struct hci_iso_data_hdr *hdr;
__u16 pb, ts, len;
if (!conn)
}
if (ts) {
+ struct hci_iso_ts_data_hdr *hdr;
+
/* TODO: add timestamp to the packet? */
hdr = skb_pull_data(skb, HCI_ISO_TS_DATA_HDR_SIZE);
if (!hdr) {
goto drop;
}
+ len = __le16_to_cpu(hdr->slen);
} else {
+ struct hci_iso_data_hdr *hdr;
+
hdr = skb_pull_data(skb, HCI_ISO_DATA_HDR_SIZE);
if (!hdr) {
BT_ERR("Frame is too short (len %d)", skb->len);
goto drop;
}
+
+ len = __le16_to_cpu(hdr->slen);
}
- len = __le16_to_cpu(hdr->slen);
flags = hci_iso_data_flags(len);
len = hci_iso_data_len(len);
}
EXPORT_SYMBOL_GPL(l2cap_chan_del);
+static void __l2cap_chan_list_id(struct l2cap_conn *conn, u16 id,
+ l2cap_chan_func_t func, void *data)
+{
+ struct l2cap_chan *chan, *l;
+
+ list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
+ if (chan->ident == id)
+ func(chan, data);
+ }
+}
+
static void __l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
void *data)
{
static void l2cap_chan_ecred_connect_reject(struct l2cap_chan *chan)
{
- struct l2cap_conn *conn = chan->conn;
- struct l2cap_ecred_conn_rsp rsp;
- u16 result;
-
- if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
- result = L2CAP_CR_LE_AUTHORIZATION;
- else
- result = L2CAP_CR_LE_BAD_PSM;
-
l2cap_state_change(chan, BT_DISCONN);
- memset(&rsp, 0, sizeof(rsp));
-
- rsp.result = cpu_to_le16(result);
-
- l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
- &rsp);
+ __l2cap_ecred_conn_rsp_defer(chan);
}
static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
break;
case L2CAP_MODE_EXT_FLOWCTL:
l2cap_chan_ecred_connect_reject(chan);
- break;
+ return;
}
}
}
&rsp);
}
-void __l2cap_ecred_conn_rsp_defer(struct l2cap_chan *chan)
+static void l2cap_ecred_list_defer(struct l2cap_chan *chan, void *data)
{
+ int *result = data;
+
+ if (*result || test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
+ return;
+
+ switch (chan->state) {
+ case BT_CONNECT2:
+ /* If channel still pending accept add to result */
+ (*result)++;
+ return;
+ case BT_CONNECTED:
+ return;
+ default:
+ /* If not connected or pending accept it has been refused */
+ *result = -ECONNREFUSED;
+ return;
+ }
+}
+
+struct l2cap_ecred_rsp_data {
struct {
struct l2cap_ecred_conn_rsp rsp;
- __le16 dcid[5];
+ __le16 scid[L2CAP_ECRED_MAX_CID];
} __packed pdu;
+ int count;
+};
+
+static void l2cap_ecred_rsp_defer(struct l2cap_chan *chan, void *data)
+{
+ struct l2cap_ecred_rsp_data *rsp = data;
+
+ if (test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
+ return;
+
+ /* Reset ident so only one response is sent */
+ chan->ident = 0;
+
+ /* Include all channels pending with the same ident */
+ if (!rsp->pdu.rsp.result)
+ rsp->pdu.rsp.dcid[rsp->count++] = cpu_to_le16(chan->scid);
+ else
+ l2cap_chan_del(chan, ECONNRESET);
+}
+
+void __l2cap_ecred_conn_rsp_defer(struct l2cap_chan *chan)
+{
struct l2cap_conn *conn = chan->conn;
- u16 ident = chan->ident;
- int i = 0;
+ struct l2cap_ecred_rsp_data data;
+ u16 id = chan->ident;
+ int result = 0;
- if (!ident)
+ if (!id)
return;
- BT_DBG("chan %p ident %d", chan, ident);
+ BT_DBG("chan %p id %d", chan, id);
- pdu.rsp.mtu = cpu_to_le16(chan->imtu);
- pdu.rsp.mps = cpu_to_le16(chan->mps);
- pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
- pdu.rsp.result = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
+ memset(&data, 0, sizeof(data));
- mutex_lock(&conn->chan_lock);
+ data.pdu.rsp.mtu = cpu_to_le16(chan->imtu);
+ data.pdu.rsp.mps = cpu_to_le16(chan->mps);
+ data.pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
+ data.pdu.rsp.result = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
- list_for_each_entry(chan, &conn->chan_l, list) {
- if (chan->ident != ident)
- continue;
+ /* Verify that all channels are ready */
+ __l2cap_chan_list_id(conn, id, l2cap_ecred_list_defer, &result);
- /* Reset ident so only one response is sent */
- chan->ident = 0;
+ if (result > 0)
+ return;
- /* Include all channels pending with the same ident */
- pdu.dcid[i++] = cpu_to_le16(chan->scid);
- }
+ if (result < 0)
+ data.pdu.rsp.result = cpu_to_le16(L2CAP_CR_LE_AUTHORIZATION);
- mutex_unlock(&conn->chan_lock);
+ /* Build response */
+ __l2cap_chan_list_id(conn, id, l2cap_ecred_rsp_defer, &data);
- l2cap_send_cmd(conn, ident, L2CAP_ECRED_CONN_RSP,
- sizeof(pdu.rsp) + i * sizeof(__le16), &pdu);
+ l2cap_send_cmd(conn, id, L2CAP_ECRED_CONN_RSP,
+ sizeof(data.pdu.rsp) + (data.count * sizeof(__le16)),
+ &data.pdu);
}
void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
chan->ident = cmd->ident;
+ chan->mode = L2CAP_MODE_EXT_FLOWCTL;
if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
l2cap_state_change(chan, BT_CONNECT2);
MGMT_OP_SET_EXP_FEATURE,
MGMT_STATUS_INVALID_INDEX);
- /* Changes can only be made when controller is powered down */
- if (hdev_is_powered(hdev))
- return mgmt_cmd_status(sk, hdev->id,
- MGMT_OP_SET_EXP_FEATURE,
- MGMT_STATUS_REJECTED);
-
/* Parameters are limited to a single octet */
if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
return mgmt_cmd_status(sk, hdev->id,
{ add_ext_adv_data, MGMT_ADD_EXT_ADV_DATA_SIZE,
HCI_MGMT_VAR_LEN },
{ add_adv_patterns_monitor_rssi,
- MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE },
+ MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
+ HCI_MGMT_VAR_LEN },
{ set_mesh, MGMT_SET_MESH_RECEIVER_SIZE,
HCI_MGMT_VAR_LEN },
{ mesh_features, MGMT_MESH_READ_FEATURES_SIZE },
-// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+// SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
/* Do not edit directly, auto-generated from: */
/* Documentation/netlink/specs/netdev.yaml */
/* YNL-GEN kernel source */
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
/* Do not edit directly, auto-generated from: */
/* Documentation/netlink/specs/netdev.yaml */
/* YNL-GEN kernel header */
* @ctx: XDP context pointer.
* @timestamp: Return value pointer.
*
- * Returns 0 on success or ``-errno`` on error.
+ * Return:
+ * * Returns 0 on success or ``-errno`` on error.
+ * * ``-EOPNOTSUPP`` : means device driver does not implement kfunc
+ * * ``-ENODATA`` : means no RX-timestamp available for this frame
*/
__bpf_kfunc int bpf_xdp_metadata_rx_timestamp(const struct xdp_md *ctx, u64 *timestamp)
{
* @ctx: XDP context pointer.
* @hash: Return value pointer.
*
- * Returns 0 on success or ``-errno`` on error.
+ * Return:
+ * * Returns 0 on success or ``-errno`` on error.
+ * * ``-EOPNOTSUPP`` : means device driver doesn't implement kfunc
+ * * ``-ENODATA`` : means no RX-hash available for this frame
*/
__bpf_kfunc int bpf_xdp_metadata_rx_hash(const struct xdp_md *ctx, u32 *hash)
{
}
late_initcall(xdp_metadata_init);
+void xdp_set_features_flag(struct net_device *dev, xdp_features_t val)
+{
+ val &= NETDEV_XDP_ACT_MASK;
+ if (dev->xdp_features == val)
+ return;
+
+ dev->xdp_features = val;
+
+ if (dev->reg_state == NETREG_REGISTERED)
+ call_netdevice_notifiers(NETDEV_XDP_FEAT_CHANGE, dev);
+}
+EXPORT_SYMBOL_GPL(xdp_set_features_flag);
+
void xdp_features_set_redirect_target(struct net_device *dev, bool support_sg)
{
- dev->xdp_features |= NETDEV_XDP_ACT_NDO_XMIT;
- if (support_sg)
- dev->xdp_features |= NETDEV_XDP_ACT_NDO_XMIT_SG;
+ xdp_features_t val = (dev->xdp_features | NETDEV_XDP_ACT_NDO_XMIT);
- call_netdevice_notifiers(NETDEV_XDP_FEAT_CHANGE, dev);
+ if (support_sg)
+ val |= NETDEV_XDP_ACT_NDO_XMIT_SG;
+ xdp_set_features_flag(dev, val);
}
EXPORT_SYMBOL_GPL(xdp_features_set_redirect_target);
void xdp_features_clear_redirect_target(struct net_device *dev)
{
- dev->xdp_features &= ~(NETDEV_XDP_ACT_NDO_XMIT |
- NETDEV_XDP_ACT_NDO_XMIT_SG);
- call_netdevice_notifiers(NETDEV_XDP_FEAT_CHANGE, dev);
+ xdp_features_t val = dev->xdp_features;
+
+ val &= ~(NETDEV_XDP_ACT_NDO_XMIT | NETDEV_XDP_ACT_NDO_XMIT_SG);
+ xdp_set_features_flag(dev, val);
}
EXPORT_SYMBOL_GPL(xdp_features_clear_redirect_target);
int new_master_mtu;
int old_master_mtu;
int mtu_limit;
+ int overhead;
int cpu_mtu;
int err;
largest_mtu = slave_mtu;
}
- mtu_limit = min_t(int, master->max_mtu, dev->max_mtu);
+ overhead = dsa_tag_protocol_overhead(cpu_dp->tag_ops);
+ mtu_limit = min_t(int, master->max_mtu, dev->max_mtu + overhead);
old_master_mtu = master->mtu;
- new_master_mtu = largest_mtu + dsa_tag_protocol_overhead(cpu_dp->tag_ops);
+ new_master_mtu = largest_mtu + overhead;
if (new_master_mtu > mtu_limit)
return -ERANGE;
out_port_failed:
if (new_master_mtu != old_master_mtu)
- dsa_port_mtu_change(cpu_dp, old_master_mtu -
- dsa_tag_protocol_overhead(cpu_dp->tag_ops));
+ dsa_port_mtu_change(cpu_dp, old_master_mtu - overhead);
out_cpu_failed:
if (new_master_mtu != old_master_mtu)
dev_set_mtu(master, old_master_mtu);
skb = nskb;
}
- dev_sw_netstats_rx_add(skb->dev, skb->len);
+ dev_sw_netstats_rx_add(skb->dev, skb->len + ETH_HLEN);
if (dsa_skb_defer_rx_timestamp(p, skb))
return 0;
#include <linux/dsa/brcm.h>
#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
#include <linux/list.h>
#include <linux/slab.h>
static struct sk_buff *brcm_leg_tag_rcv(struct sk_buff *skb,
struct net_device *dev)
{
+ int len = BRCM_LEG_TAG_LEN;
int source_port;
u8 *brcm_tag;
if (!skb->dev)
return NULL;
+ /* VLAN tag is added by BCM63xx internal switch */
+ if (netdev_uses_dsa(skb->dev))
+ len += VLAN_HLEN;
+
/* Remove Broadcom tag and update checksum */
- skb_pull_rcsum(skb, BRCM_LEG_TAG_LEN);
+ skb_pull_rcsum(skb, len);
dsa_default_offload_fwd_mark(skb);
- dsa_strip_etype_header(skb, BRCM_LEG_TAG_LEN);
+ dsa_strip_etype_header(skb, len);
return skb;
}
node_dst = find_node_by_addr_A(&port->hsr->node_db,
eth_hdr(skb)->h_dest);
if (!node_dst) {
- if (net_ratelimit())
+ if (port->hsr->prot_version != PRP_V1 && net_ratelimit())
netdev_err(skb->dev, "%s: Unknown node\n", __func__);
return;
}
cfg->fc_scope = RT_SCOPE_UNIVERSE;
}
+ if (!cfg->fc_table)
+ cfg->fc_table = RT_TABLE_MAIN;
+
if (cmd == SIOCDELRT)
return 0;
-// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+// SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
/* Do not edit directly, auto-generated from: */
/* Documentation/netlink/specs/fou.yaml */
/* YNL-GEN kernel source */
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
/* Do not edit directly, auto-generated from: */
/* Documentation/netlink/specs/fou.yaml */
/* YNL-GEN kernel header */
#if IS_ENABLED(CONFIG_IPV6)
struct in6_addr addr_any = {};
- if (sk->sk_family != tb->family)
+ if (sk->sk_family != tb->family) {
+ if (sk->sk_family == AF_INET)
+ return net_eq(ib2_net(tb), net) && tb->port == port &&
+ tb->l3mdev == l3mdev &&
+ ipv6_addr_equal(&tb->v6_rcv_saddr, &addr_any);
+
return false;
+ }
if (sk->sk_family == AF_INET6)
return net_eq(ib2_net(tb), net) && tb->port == port &&
truncate = true;
}
- nhoff = skb_network_header(skb) - skb_mac_header(skb);
+ nhoff = skb_network_offset(skb);
if (skb->protocol == htons(ETH_P_IP) &&
(ntohs(ip_hdr(skb)->tot_len) > skb->len - nhoff))
truncate = true;
int thoff;
if (skb_transport_header_was_set(skb))
- thoff = skb_transport_header(skb) - skb_mac_header(skb);
+ thoff = skb_transport_offset(skb);
else
thoff = nhoff + sizeof(struct ipv6hdr);
if (ntohs(ipv6_hdr(skb)->payload_len) > skb->len - thoff)
}
headroom += LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len;
- if (headroom > dev->needed_headroom)
- dev->needed_headroom = headroom;
+ if (headroom > READ_ONCE(dev->needed_headroom))
+ WRITE_ONCE(dev->needed_headroom, headroom);
- if (skb_cow_head(skb, dev->needed_headroom)) {
+ if (skb_cow_head(skb, READ_ONCE(dev->needed_headroom))) {
ip_rt_put(rt);
goto tx_dropped;
}
max_headroom = LL_RESERVED_SPACE(rt->dst.dev) + sizeof(struct iphdr)
+ rt->dst.header_len + ip_encap_hlen(&tunnel->encap);
- if (max_headroom > dev->needed_headroom)
- dev->needed_headroom = max_headroom;
+ if (max_headroom > READ_ONCE(dev->needed_headroom))
+ WRITE_ONCE(dev->needed_headroom, max_headroom);
- if (skb_cow_head(skb, dev->needed_headroom)) {
+ if (skb_cow_head(skb, READ_ONCE(dev->needed_headroom))) {
ip_rt_put(rt);
DEV_STATS_INC(dev, tx_dropped);
kfree_skb(skb);
th->window = htons(min(req->rsk_rcv_wnd, 65535U));
tcp_options_write(th, NULL, &opts);
th->doff = (tcp_header_size >> 2);
- __TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTSEGS);
+ TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTSEGS);
#ifdef CONFIG_TCP_MD5SIG
/* Okay, we have all we need - do the md5 hash if needed */
truncate = true;
}
- nhoff = skb_network_header(skb) - skb_mac_header(skb);
+ nhoff = skb_network_offset(skb);
if (skb->protocol == htons(ETH_P_IP) &&
(ntohs(ip_hdr(skb)->tot_len) > skb->len - nhoff))
truncate = true;
int thoff;
if (skb_transport_header_was_set(skb))
- thoff = skb_transport_header(skb) - skb_mac_header(skb);
+ thoff = skb_transport_offset(skb);
else
thoff = nhoff + sizeof(struct ipv6hdr);
if (ntohs(ipv6_hdr(skb)->payload_len) > skb->len - thoff)
*/
max_headroom = LL_RESERVED_SPACE(dst->dev) + sizeof(struct ipv6hdr)
+ dst->header_len + t->hlen;
- if (max_headroom > dev->needed_headroom)
- dev->needed_headroom = max_headroom;
+ if (max_headroom > READ_ONCE(dev->needed_headroom))
+ WRITE_ONCE(dev->needed_headroom, max_headroom);
err = ip6_tnl_encap(skb, t, &proto, fl6);
if (err)
u16 ippathid;
u8 ipflags1;
u8 iptype;
- u32 res2[8];
+ u32 res2[9];
};
struct iucv_irq_list {
if (!sband)
return -EINVAL;
+ if (params->basic_rates) {
+ if (!ieee80211_parse_bitrates(link->conf->chandef.width,
+ wiphy->bands[sband->band],
+ params->basic_rates,
+ params->basic_rates_len,
+ &link->conf->basic_rates))
+ return -EINVAL;
+ changed |= BSS_CHANGED_BASIC_RATES;
+ ieee80211_check_rate_mask(link);
+ }
+
if (params->use_cts_prot >= 0) {
link->conf->use_cts_prot = params->use_cts_prot;
changed |= BSS_CHANGED_ERP_CTS_PROT;
changed |= BSS_CHANGED_ERP_SLOT;
}
- if (params->basic_rates) {
- ieee80211_parse_bitrates(link->conf->chandef.width,
- wiphy->bands[sband->band],
- params->basic_rates,
- params->basic_rates_len,
- &link->conf->basic_rates);
- changed |= BSS_CHANGED_BASIC_RATES;
- ieee80211_check_rate_mask(link);
- }
-
if (params->ap_isolate >= 0) {
if (params->ap_isolate)
sdata->flags |= IEEE80211_SDATA_DONT_BRIDGE_PACKETS;
struct list_head active_txqs[IEEE80211_NUM_ACS];
u16 schedule_round[IEEE80211_NUM_ACS];
+ /* serializes ieee80211_handle_wake_tx_queue */
+ spinlock_t handle_wake_tx_queue_lock;
+
u16 airtime_flags;
u32 aql_txq_limit_low[IEEE80211_NUM_ACS];
u32 aql_txq_limit_high[IEEE80211_NUM_ACS];
local->aql_threshold = IEEE80211_AQL_THRESHOLD;
atomic_set(&local->aql_total_pending_airtime, 0);
+ spin_lock_init(&local->handle_wake_tx_queue_lock);
+
INIT_LIST_HEAD(&local->chanctx_list);
mutex_init(&local->chanctx_mtx);
mesh_rmc_check(sdata, eth->h_source, mesh_hdr))
return RX_DROP_MONITOR;
- /* Frame has reached destination. Don't forward */
- if (ether_addr_equal(sdata->vif.addr, eth->h_dest))
- goto rx_accept;
-
- if (!ifmsh->mshcfg.dot11MeshForwarding) {
- if (is_multicast_ether_addr(eth->h_dest))
- goto rx_accept;
-
- return RX_DROP_MONITOR;
- }
-
/* forward packet */
if (sdata->crypto_tx_tailroom_needed_cnt)
tailroom = IEEE80211_ENCRYPT_TAILROOM;
rcu_read_unlock();
}
+ /* Frame has reached destination. Don't forward */
+ if (ether_addr_equal(sdata->vif.addr, eth->h_dest))
+ goto rx_accept;
+
+ if (!ifmsh->mshcfg.dot11MeshForwarding) {
+ if (is_multicast_ether_addr(eth->h_dest))
+ goto rx_accept;
+
+ return RX_DROP_MONITOR;
+ }
+
skb_set_queue_mapping(skb, ieee802_1d_to_ac[skb->priority]);
ieee80211_fill_mesh_addresses(&hdr, &hdr.frame_control,
struct ieee80211_sub_if_data *sdata = vif_to_sdata(txq->vif);
struct ieee80211_txq *queue;
+ spin_lock(&local->handle_wake_tx_queue_lock);
+
/* Use ieee80211_next_txq() for airtime fairness accounting */
ieee80211_txq_schedule_start(hw, txq->ac);
while ((queue = ieee80211_next_txq(hw, txq->ac))) {
ieee80211_return_txq(hw, queue, false);
}
ieee80211_txq_schedule_end(hw, txq->ac);
+ spin_unlock(&local->handle_wake_tx_queue_lock);
}
EXPORT_SYMBOL(ieee80211_handle_wake_tx_queue);
u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
struct sta_info *sta, struct sk_buff *skb)
{
+ const struct ethhdr *eth = (void *)skb->data;
struct mac80211_qos_map *qos_map;
bool qos;
skb_get_hash(skb);
/* all mesh/ocb stations are required to support WME */
- if (sta && (sdata->vif.type == NL80211_IFTYPE_MESH_POINT ||
- sdata->vif.type == NL80211_IFTYPE_OCB))
+ if ((sdata->vif.type == NL80211_IFTYPE_MESH_POINT &&
+ !is_multicast_ether_addr(eth->h_dest)) ||
+ (sdata->vif.type == NL80211_IFTYPE_OCB && sta))
qos = true;
else if (sta)
qos = sta->sta.wme;
return ret;
}
+static struct lock_class_key mptcp_slock_keys[2];
+static struct lock_class_key mptcp_keys[2];
+
static int mptcp_pm_nl_create_listen_socket(struct sock *sk,
struct mptcp_pm_addr_entry *entry)
{
+ bool is_ipv6 = sk->sk_family == AF_INET6;
int addrlen = sizeof(struct sockaddr_in);
struct sockaddr_storage addr;
struct socket *ssock;
if (!newsk)
return -EINVAL;
+ /* The subflow socket lock is acquired in a nested to the msk one
+ * in several places, even by the TCP stack, and this msk is a kernel
+ * socket: lockdep complains. Instead of propagating the _nested
+ * modifiers in several places, re-init the lock class for the msk
+ * socket to an mptcp specific one.
+ */
+ sock_lock_init_class_and_name(newsk,
+ is_ipv6 ? "mlock-AF_INET6" : "mlock-AF_INET",
+ &mptcp_slock_keys[is_ipv6],
+ is_ipv6 ? "msk_lock-AF_INET6" : "msk_lock-AF_INET",
+ &mptcp_keys[is_ipv6]);
+
lock_sock(newsk);
ssock = __mptcp_nmpc_socket(mptcp_sk(newsk));
release_sock(newsk);
if (sk->sk_socket && !ssk->sk_socket)
mptcp_sock_graft(ssk, sk->sk_socket);
- mptcp_propagate_sndbuf((struct sock *)msk, ssk);
mptcp_sockopt_sync_locked(msk, ssk);
return true;
}
goto out;
}
- sock_orphan(ssk);
subflow->disposable = 1;
/* if ssk hit tcp_done(), tcp_cleanup_ulp() cleared the related ops
* reference owned by msk;
*/
if (!inet_csk(ssk)->icsk_ulp_ops) {
+ WARN_ON_ONCE(!sock_flag(ssk, SOCK_DEAD));
kfree_rcu(subflow, rcu);
+ } else if (msk->in_accept_queue && msk->first == ssk) {
+ /* if the first subflow moved to a close state, e.g. due to
+ * incoming reset and we reach here before inet_child_forget()
+ * the TCP stack could later try to close it via
+ * inet_csk_listen_stop(), or deliver it to the user space via
+ * accept().
+ * We can't delete the subflow - or risk a double free - nor let
+ * the msk survive - or will be leaked in the non accept scenario:
+ * fallback and let TCP cope with the subflow cleanup.
+ */
+ WARN_ON_ONCE(sock_flag(ssk, SOCK_DEAD));
+ mptcp_subflow_drop_ctx(ssk);
} else {
/* otherwise tcp will dispose of the ssk and subflow ctx */
- if (ssk->sk_state == TCP_LISTEN) {
- tcp_set_state(ssk, TCP_CLOSE);
- mptcp_subflow_queue_clean(sk, ssk);
- inet_csk_listen_stop(ssk);
+ if (ssk->sk_state == TCP_LISTEN)
mptcp_event_pm_listener(ssk, MPTCP_EVENT_LISTENER_CLOSED);
- }
+
__tcp_close(ssk, 0);
/* close acquired an extra ref */
return 0;
}
-static void __mptcp_close_subflow(struct mptcp_sock *msk)
+static void __mptcp_close_subflow(struct sock *sk)
{
struct mptcp_subflow_context *subflow, *tmp;
+ struct mptcp_sock *msk = mptcp_sk(sk);
might_sleep();
if (!skb_queue_empty_lockless(&ssk->sk_receive_queue))
continue;
- mptcp_close_ssk((struct sock *)msk, ssk, subflow);
+ mptcp_close_ssk(sk, ssk, subflow);
+ }
+
+ /* if the MPC subflow has been closed before the msk is accepted,
+ * msk will never be accept-ed, close it now
+ */
+ if (!msk->first && msk->in_accept_queue) {
+ sock_set_flag(sk, SOCK_DEAD);
+ inet_sk_state_store(sk, TCP_CLOSE);
}
}
__mptcp_check_send_data_fin(sk);
mptcp_check_data_fin(sk);
+ if (test_and_clear_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags))
+ __mptcp_close_subflow(sk);
+
/* There is no point in keeping around an orphaned sk timedout or
* closed, but we need the msk around to reply to incoming DATA_FIN,
* even if it is orphaned and in FIN_WAIT2 state
}
}
- if (test_and_clear_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags))
- __mptcp_close_subflow(msk);
-
if (test_and_clear_bit(MPTCP_WORK_RTX, &msk->flags))
__mptcp_retrans(sk);
msk->local_key = subflow_req->local_key;
msk->token = subflow_req->token;
msk->subflow = NULL;
+ msk->in_accept_queue = 1;
WRITE_ONCE(msk->fully_established, false);
if (mp_opt->suboptions & OPTION_MPTCP_CSUMREQD)
WRITE_ONCE(msk->csum_enabled, true);
security_inet_csk_clone(nsk, req);
bh_unlock_sock(nsk);
- /* keep a single reference */
- __sock_put(nsk);
+ /* note: the newly allocated socket refcount is 2 now */
return nsk;
}
goto out;
}
- /* acquire the 2nd reference for the owning socket */
- sock_hold(new_mptcp_sock);
newsk = new_mptcp_sock;
MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLEPASSIVEACK);
} else {
struct sock *newsk = newsock->sk;
set_bit(SOCK_CUSTOM_SOCKOPT, &newsock->flags);
+ msk->in_accept_queue = 0;
lock_sock(newsk);
- /* PM/worker can now acquire the first subflow socket
- * lock without racing with listener queue cleanup,
- * we can notify it, if needed.
- *
- * Even if remote has reset the initial subflow by now
- * the refcnt is still at least one.
- */
- subflow = mptcp_subflow_ctx(msk->first);
- list_add(&subflow->node, &msk->conn_list);
- sock_hold(msk->first);
- if (mptcp_is_fully_established(newsk))
- mptcp_pm_fully_established(msk, msk->first, GFP_KERNEL);
-
- mptcp_rcv_space_init(msk, msk->first);
- mptcp_propagate_sndbuf(newsk, msk->first);
-
/* set ssk->sk_socket of accept()ed flows to mptcp socket.
* This is needed so NOSPACE flag can be set from tcp stack.
*/
u8 recvmsg_inq:1,
cork:1,
nodelay:1,
- fastopening:1;
+ fastopening:1,
+ in_accept_queue:1;
int connect_flags;
struct work_struct work;
struct sk_buff *ooo_last_skb;
struct mptcp_subflow_context *subflow);
void __mptcp_subflow_send_ack(struct sock *ssk);
void mptcp_subflow_reset(struct sock *ssk);
-void mptcp_subflow_queue_clean(struct sock *sk, struct sock *ssk);
void mptcp_sock_graft(struct sock *sk, struct socket *parent);
struct socket *__mptcp_nmpc_socket(const struct mptcp_sock *msk);
bool __mptcp_close(struct sock *sk, long timeout);
bool mptcp_subflow_active(struct mptcp_subflow_context *subflow);
+void mptcp_subflow_drop_ctx(struct sock *ssk);
+
static inline void mptcp_subflow_tcp_fallback(struct sock *sk,
struct mptcp_subflow_context *ctx)
{
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
struct sock *sk = subflow->conn;
+ /* mptcp_mp_fail_no_response() can reach here on an already closed
+ * socket
+ */
+ if (ssk->sk_state == TCP_CLOSE)
+ return;
+
/* must hold: tcp_done() could drop last reference on parent */
sock_hold(sk);
- tcp_set_state(ssk, TCP_CLOSE);
tcp_send_active_reset(ssk, GFP_ATOMIC);
tcp_done(ssk);
if (!test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &mptcp_sk(sk)->flags) &&
static struct tcp_request_sock_ops subflow_request_sock_ipv6_ops __ro_after_init;
static struct inet_connection_sock_af_ops subflow_v6_specific __ro_after_init;
static struct inet_connection_sock_af_ops subflow_v6m_specific __ro_after_init;
-static struct proto tcpv6_prot_override;
+static struct proto tcpv6_prot_override __ro_after_init;
static int subflow_v6_conn_request(struct sock *sk, struct sk_buff *skb)
{
static void mptcp_force_close(struct sock *sk)
{
- /* the msk is not yet exposed to user-space */
+ /* the msk is not yet exposed to user-space, and refcount is 2 */
inet_sk_state_store(sk, TCP_CLOSE);
sk_common_release(sk);
+ sock_put(sk);
}
static void subflow_ulp_fallback(struct sock *sk,
mptcp_subflow_ops_undo_override(sk);
}
-static void subflow_drop_ctx(struct sock *ssk)
+void mptcp_subflow_drop_ctx(struct sock *ssk)
{
struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(ssk);
struct mptcp_options_received mp_opt;
bool fallback, fallback_is_fatal;
struct sock *new_msk = NULL;
+ struct mptcp_sock *owner;
struct sock *child;
pr_debug("listener=%p, req=%p, conn=%p", listener, req, listener->conn);
if (new_msk)
mptcp_copy_inaddrs(new_msk, child);
- subflow_drop_ctx(child);
+ mptcp_subflow_drop_ctx(child);
goto out;
}
ctx->setsockopt_seq = listener->setsockopt_seq;
if (ctx->mp_capable) {
+ owner = mptcp_sk(new_msk);
+
/* this can't race with mptcp_close(), as the msk is
* not yet exposted to user-space
*/
/* record the newly created socket as the first msk
* subflow, but don't link it yet into conn_list
*/
- WRITE_ONCE(mptcp_sk(new_msk)->first, child);
+ WRITE_ONCE(owner->first, child);
/* new mpc subflow takes ownership of the newly
* created mptcp socket
*/
mptcp_sk(new_msk)->setsockopt_seq = ctx->setsockopt_seq;
- mptcp_pm_new_connection(mptcp_sk(new_msk), child, 1);
- mptcp_token_accept(subflow_req, mptcp_sk(new_msk));
+ mptcp_pm_new_connection(owner, child, 1);
+ mptcp_token_accept(subflow_req, owner);
ctx->conn = new_msk;
new_msk = NULL;
* uses the correct data
*/
mptcp_copy_inaddrs(ctx->conn, child);
+ mptcp_propagate_sndbuf(ctx->conn, child);
+
+ mptcp_rcv_space_init(owner, child);
+ list_add(&ctx->node, &owner->conn_list);
+ sock_hold(child);
/* with OoO packets we can reach here without ingress
* mpc option
*/
- if (mp_opt.suboptions & OPTION_MPTCP_MPC_ACK)
+ if (mp_opt.suboptions & OPTION_MPTCP_MPC_ACK) {
mptcp_subflow_fully_established(ctx, &mp_opt);
+ mptcp_pm_fully_established(owner, child, GFP_ATOMIC);
+ ctx->pm_notified = 1;
+ }
} else if (ctx->mp_join) {
- struct mptcp_sock *owner;
-
owner = subflow_req->msk;
if (!owner) {
subflow_add_reset_reason(skb, MPTCP_RST_EPROHIBIT);
return child;
dispose_child:
- subflow_drop_ctx(child);
+ mptcp_subflow_drop_ctx(child);
tcp_rsk(req)->drop_req = true;
inet_csk_prepare_for_destroy_sock(child);
tcp_done(child);
}
static struct inet_connection_sock_af_ops subflow_specific __ro_after_init;
-static struct proto tcp_prot_override;
+static struct proto tcp_prot_override __ro_after_init;
enum mapping_status {
MAPPING_OK,
{
struct sock *sk = mptcp_subflow_ctx(ssk)->conn;
+ /* bail early if this is a no-op, so that we avoid introducing a
+ * problematic lockdep dependency between TCP accept queue lock
+ * and msk socket spinlock
+ */
+ if (!sk->sk_socket)
+ return;
+
mptcp_data_lock(sk);
if (!sock_owned_by_user(sk))
__mptcp_error_report(sk);
}
}
-void mptcp_subflow_queue_clean(struct sock *listener_sk, struct sock *listener_ssk)
-{
- struct request_sock_queue *queue = &inet_csk(listener_ssk)->icsk_accept_queue;
- struct mptcp_sock *msk, *next, *head = NULL;
- struct request_sock *req;
-
- /* build a list of all unaccepted mptcp sockets */
- spin_lock_bh(&queue->rskq_lock);
- for (req = queue->rskq_accept_head; req; req = req->dl_next) {
- struct mptcp_subflow_context *subflow;
- struct sock *ssk = req->sk;
- struct mptcp_sock *msk;
-
- if (!sk_is_mptcp(ssk))
- continue;
-
- subflow = mptcp_subflow_ctx(ssk);
- if (!subflow || !subflow->conn)
- continue;
-
- /* skip if already in list */
- msk = mptcp_sk(subflow->conn);
- if (msk->dl_next || msk == head)
- continue;
-
- msk->dl_next = head;
- head = msk;
- }
- spin_unlock_bh(&queue->rskq_lock);
- if (!head)
- return;
-
- /* can't acquire the msk socket lock under the subflow one,
- * or will cause ABBA deadlock
- */
- release_sock(listener_ssk);
-
- for (msk = head; msk; msk = next) {
- struct sock *sk = (struct sock *)msk;
- bool do_cancel_work;
-
- sock_hold(sk);
- lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
- next = msk->dl_next;
- msk->first = NULL;
- msk->dl_next = NULL;
-
- do_cancel_work = __mptcp_close(sk, 0);
- release_sock(sk);
- if (do_cancel_work) {
- /* lockdep will report a false positive ABBA deadlock
- * between cancel_work_sync and the listener socket.
- * The involved locks belong to different sockets WRT
- * the existing AB chain.
- * Using a per socket key is problematic as key
- * deregistration requires process context and must be
- * performed at socket disposal time, in atomic
- * context.
- * Just tell lockdep to consider the listener socket
- * released here.
- */
- mutex_release(&listener_sk->sk_lock.dep_map, _RET_IP_);
- mptcp_cancel_work(sk);
- mutex_acquire(&listener_sk->sk_lock.dep_map,
- SINGLE_DEPTH_NESTING, 0, _RET_IP_);
- }
- sock_put(sk);
- }
-
- /* we are still under the listener msk socket lock */
- lock_sock_nested(listener_ssk, SINGLE_DEPTH_NESTING);
-}
-
static int subflow_ulp_init(struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
* when the subflow is still unaccepted
*/
release = ctx->disposable || list_empty(&ctx->node);
+
+ /* inet_child_forget() does not call sk_state_change(),
+ * explicitly trigger the socket close machinery
+ */
+ if (!release && !test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW,
+ &mptcp_sk(sk)->flags))
+ mptcp_schedule_work(sk);
sock_put(sk);
}
pdev = to_platform_device(dev->dev.parent);
if (pdev) {
np = pdev->dev.of_node;
- if (np && (of_get_property(np, "mellanox,multi-host", NULL) ||
- of_get_property(np, "mlx,multi-host", NULL)))
+ if (np && (of_property_read_bool(np, "mellanox,multi-host") ||
+ of_property_read_bool(np, "mlx,multi-host")))
ndp->mlx_multi_host = true;
}
const struct nft_expr *expr,
const struct nlattr * const tb[])
{
- u32 plen = sizeof_field(struct nf_nat_range, min_addr.all);
+ u32 plen = sizeof_field(struct nf_nat_range, min_proto.all);
struct nft_masq *priv = nft_expr_priv(expr);
int err;
priv->flags |= NF_NAT_RANGE_MAP_IPS;
}
- plen = sizeof_field(struct nf_nat_range, min_addr.all);
+ plen = sizeof_field(struct nf_nat_range, min_proto.all);
if (tb[NFTA_NAT_REG_PROTO_MIN]) {
err = nft_parse_register_load(tb[NFTA_NAT_REG_PROTO_MIN],
&priv->sreg_proto_min, plen);
unsigned int plen;
int err;
- plen = sizeof_field(struct nf_nat_range, min_addr.all);
+ plen = sizeof_field(struct nf_nat_range, min_proto.all);
if (tb[NFTA_REDIR_REG_PROTO_MIN]) {
err = nft_parse_register_load(tb[NFTA_REDIR_REG_PROTO_MIN],
&priv->sreg_proto_min, plen);
.name = "redir",
.ops = &nft_redir_inet_ops,
.policy = nft_redir_policy,
- .maxattr = NFTA_MASQ_MAX,
+ .maxattr = NFTA_REDIR_MAX,
.owner = THIS_MODULE,
};
t->tca__pad1 = 0;
t->tca__pad2 = 0;
+ if (extack && extack->_msg &&
+ nla_put_string(skb, TCA_ROOT_EXT_WARN_MSG, extack->_msg))
+ goto out_nlmsg_trim;
+
nest = nla_nest_start_noflag(skb, TCA_ACT_TAB);
if (!nest)
goto out_nlmsg_trim;
if (tcf_action_dump(skb, actions, bind, ref, false) < 0)
goto out_nlmsg_trim;
- if (extack && extack->_msg &&
- nla_put_string(skb, TCA_EXT_WARN_MSG, extack->_msg))
- goto out_nlmsg_trim;
-
nla_nest_end(skb, nest);
nlh->nlmsg_len = skb_tail_pointer(skb) - b;
out_nl:
smc_nl_exit();
out_ism:
+ smc_clc_exit();
smc_ism_exit();
out_pernet_subsys_stat:
unregister_pernet_subsys(&smc_net_stat_ops);
union smc_host_cursor cfed;
int rc;
+ if (unlikely(!READ_ONCE(conn->sndbuf_desc)))
+ return -ENOBUFS;
+
smc_cdc_add_pending_send(conn, pend);
conn->tx_cdc_seq++;
if (lgr->terminating)
return; /* lgr already terminating */
/* cancel free_work sync, will terminate when lgr->freeing is set */
- cancel_delayed_work_sync(&lgr->free_work);
+ cancel_delayed_work(&lgr->free_work);
lgr->terminating = 1;
/* kill remaining link group connections */
err = crypto_ahash_final(req);
if (err)
goto out_free_ahash;
- memcpy(cksumout->data, checksumdata, cksumout->len);
+
+ memcpy(cksumout->data, checksumdata,
+ min_t(int, cksumout->len, crypto_ahash_digestsize(tfm)));
out_free_ahash:
ahash_request_free(req);
buf->tail[0].iov_len += GSS_KRB5_TOK_HDR_LEN;
buf->len += GSS_KRB5_TOK_HDR_LEN;
- /* Do the HMAC */
- hmac.len = GSS_KRB5_MAX_CKSUM_LEN;
+ hmac.len = kctx->gk5e->cksumlength;
hmac.data = buf->tail[0].iov_base + buf->tail[0].iov_len;
/*
if (ret)
goto out_err;
- /* Calculate our hmac over the plaintext data */
- our_hmac_obj.len = sizeof(our_hmac);
+ our_hmac_obj.len = kctx->gk5e->cksumlength;
our_hmac_obj.data = our_hmac;
ret = gss_krb5_checksum(ahash, NULL, 0, &subbuf, 0, &our_hmac_obj);
if (ret)
}
static bool virtio_transport_inc_rx_pkt(struct virtio_vsock_sock *vvs,
- struct sk_buff *skb)
+ u32 len)
{
- if (vvs->rx_bytes + skb->len > vvs->buf_alloc)
+ if (vvs->rx_bytes + len > vvs->buf_alloc)
return false;
- vvs->rx_bytes += skb->len;
+ vvs->rx_bytes += len;
return true;
}
static void virtio_transport_dec_rx_pkt(struct virtio_vsock_sock *vvs,
- struct sk_buff *skb)
+ u32 len)
{
- int len;
-
- len = skb_headroom(skb) - sizeof(struct virtio_vsock_hdr) - skb->len;
vvs->rx_bytes -= len;
vvs->fwd_cnt += len;
}
spin_lock_bh(&vvs->rx_lock);
while (total < len && !skb_queue_empty(&vvs->rx_queue)) {
- skb = __skb_dequeue(&vvs->rx_queue);
+ skb = skb_peek(&vvs->rx_queue);
bytes = len - total;
if (bytes > skb->len)
skb_pull(skb, bytes);
if (skb->len == 0) {
- virtio_transport_dec_rx_pkt(vvs, skb);
+ u32 pkt_len = le32_to_cpu(virtio_vsock_hdr(skb)->len);
+
+ virtio_transport_dec_rx_pkt(vvs, pkt_len);
+ __skb_unlink(skb, &vvs->rx_queue);
consume_skb(skb);
- } else {
- __skb_queue_head(&vvs->rx_queue, skb);
}
}
while (!msg_ready) {
struct virtio_vsock_hdr *hdr;
+ size_t pkt_len;
skb = __skb_dequeue(&vvs->rx_queue);
if (!skb)
break;
hdr = virtio_vsock_hdr(skb);
+ pkt_len = (size_t)le32_to_cpu(hdr->len);
if (dequeued_len >= 0) {
- size_t pkt_len;
size_t bytes_to_copy;
- pkt_len = (size_t)le32_to_cpu(hdr->len);
bytes_to_copy = min(user_buf_len, pkt_len);
if (bytes_to_copy) {
dequeued_len = err;
} else {
user_buf_len -= bytes_to_copy;
- skb_pull(skb, bytes_to_copy);
}
spin_lock_bh(&vvs->rx_lock);
msg->msg_flags |= MSG_EOR;
}
- virtio_transport_dec_rx_pkt(vvs, skb);
+ virtio_transport_dec_rx_pkt(vvs, pkt_len);
kfree_skb(skb);
}
spin_lock_bh(&vvs->rx_lock);
- can_enqueue = virtio_transport_inc_rx_pkt(vvs, skb);
+ can_enqueue = virtio_transport_inc_rx_pkt(vvs, len);
if (!can_enqueue) {
free_pkt = true;
goto out;
[NL80211_STA_WME_MAX_SP] = { .type = NLA_U8 },
};
+static struct netlink_range_validation nl80211_punct_bitmap_range = {
+ .min = 0,
+ .max = 0xffff,
+};
+
static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
[0] = { .strict_start_type = NL80211_ATTR_HE_OBSS_PD },
[NL80211_ATTR_WIPHY] = { .type = NLA_U32 },
[NL80211_ATTR_MLD_ADDR] = NLA_POLICY_EXACT_LEN(ETH_ALEN),
[NL80211_ATTR_MLO_SUPPORT] = { .type = NLA_FLAG },
[NL80211_ATTR_MAX_NUM_AKM_SUITES] = { .type = NLA_REJECT },
- [NL80211_ATTR_PUNCT_BITMAP] = NLA_POLICY_RANGE(NLA_U8, 0, 0xffff),
+ [NL80211_ATTR_PUNCT_BITMAP] =
+ NLA_POLICY_FULL_RANGE(NLA_U32, &nl80211_punct_bitmap_range),
};
/* policy for the key attributes */
struct cfg80211_chan_def *chandef;
chandef = wdev_chandef(wdev, link_id);
- if (!chandef)
+ if (!chandef || !chandef->chan)
continue;
/*
static struct cfg80211_bss *nl80211_assoc_bss(struct cfg80211_registered_device *rdev,
const u8 *ssid, int ssid_len,
- struct nlattr **attrs,
- const u8 **bssid_out)
+ struct nlattr **attrs)
{
struct ieee80211_channel *chan;
struct cfg80211_bss *bss;
if (!bss)
return ERR_PTR(-ENOENT);
- *bssid_out = bssid;
return bss;
}
struct net_device *dev = info->user_ptr[1];
struct cfg80211_assoc_request req = {};
struct nlattr **attrs = NULL;
- const u8 *bssid, *ssid;
+ const u8 *ap_addr, *ssid;
unsigned int link_id;
int err, ssid_len;
return -EINVAL;
req.ap_mld_addr = nla_data(info->attrs[NL80211_ATTR_MLD_ADDR]);
+ ap_addr = req.ap_mld_addr;
attrs = kzalloc(attrsize, GFP_KERNEL);
if (!attrs)
goto free;
}
req.links[link_id].bss =
- nl80211_assoc_bss(rdev, ssid, ssid_len, attrs,
- &bssid);
+ nl80211_assoc_bss(rdev, ssid, ssid_len, attrs);
if (IS_ERR(req.links[link_id].bss)) {
err = PTR_ERR(req.links[link_id].bss);
req.links[link_id].bss = NULL;
if (req.link_id >= 0)
return -EINVAL;
- req.bss = nl80211_assoc_bss(rdev, ssid, ssid_len, info->attrs,
- &bssid);
+ req.bss = nl80211_assoc_bss(rdev, ssid, ssid_len, info->attrs);
if (IS_ERR(req.bss))
return PTR_ERR(req.bss);
+ ap_addr = req.bss->bssid;
}
err = nl80211_crypto_settings(rdev, info, &req.crypto, 1);
dev->ieee80211_ptr->conn_owner_nlportid =
info->snd_portid;
memcpy(dev->ieee80211_ptr->disconnect_bssid,
- bssid, ETH_ALEN);
+ ap_addr, ETH_ALEN);
}
wdev_unlock(dev->ieee80211_ptr);
static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
{
- u32 npgs_rem, chunk_size = mr->chunk_size, headroom = mr->headroom;
bool unaligned_chunks = mr->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG;
- u64 npgs, addr = mr->addr, size = mr->len;
- unsigned int chunks, chunks_rem;
+ u32 chunk_size = mr->chunk_size, headroom = mr->headroom;
+ u64 addr = mr->addr, size = mr->len;
+ u32 chunks_rem, npgs_rem;
+ u64 chunks, npgs;
int err;
if (chunk_size < XDP_UMEM_MIN_CHUNK_SIZE || chunk_size > PAGE_SIZE) {
if (npgs > U32_MAX)
return -EINVAL;
- chunks = (unsigned int)div_u64_rem(size, chunk_size, &chunks_rem);
- if (chunks == 0)
+ chunks = div_u64_rem(size, chunk_size, &chunks_rem);
+ if (!chunks || chunks > U32_MAX)
return -EINVAL;
if (!unaligned_chunks && chunks_rem)
umem->headroom = headroom;
umem->chunk_size = chunk_size;
umem->chunks = chunks;
- umem->npgs = (u32)npgs;
+ umem->npgs = npgs;
umem->pgs = NULL;
umem->user = NULL;
umem->flags = mr->flags;
goto error;
}
- if (!(inner_mode->flags & XFRM_MODE_FLAG_TUNNEL)) {
- NL_SET_ERR_MSG(extack, "Only tunnel modes can accommodate an AF_UNSPEC selector");
- goto error;
- }
-
x->inner_mode = *inner_mode;
if (x->props.family == AF_INET)
return -EMSGSIZE;
ap = nla_data(nla);
- memcpy(ap, aead, sizeof(*aead));
+ strscpy_pad(ap->alg_name, aead->alg_name, sizeof(ap->alg_name));
+ ap->alg_key_len = aead->alg_key_len;
+ ap->alg_icv_len = aead->alg_icv_len;
if (redact_secret && aead->alg_key_len)
memset(ap->alg_key, 0, (aead->alg_key_len + 7) / 8);
return -EMSGSIZE;
ap = nla_data(nla);
- memcpy(ap, ealg, sizeof(*ealg));
+ strscpy_pad(ap->alg_name, ealg->alg_name, sizeof(ap->alg_name));
+ ap->alg_key_len = ealg->alg_key_len;
if (redact_secret && ealg->alg_key_len)
memset(ap->alg_key, 0, (ealg->alg_key_len + 7) / 8);
return 0;
}
+static int copy_to_user_calg(struct xfrm_algo *calg, struct sk_buff *skb)
+{
+ struct nlattr *nla = nla_reserve(skb, XFRMA_ALG_COMP, sizeof(*calg));
+ struct xfrm_algo *ap;
+
+ if (!nla)
+ return -EMSGSIZE;
+
+ ap = nla_data(nla);
+ strscpy_pad(ap->alg_name, calg->alg_name, sizeof(ap->alg_name));
+ ap->alg_key_len = 0;
+
+ return 0;
+}
+
+static int copy_to_user_encap(struct xfrm_encap_tmpl *ep, struct sk_buff *skb)
+{
+ struct nlattr *nla = nla_reserve(skb, XFRMA_ENCAP, sizeof(*ep));
+ struct xfrm_encap_tmpl *uep;
+
+ if (!nla)
+ return -EMSGSIZE;
+
+ uep = nla_data(nla);
+ memset(uep, 0, sizeof(*uep));
+
+ uep->encap_type = ep->encap_type;
+ uep->encap_sport = ep->encap_sport;
+ uep->encap_dport = ep->encap_dport;
+ uep->encap_oa = ep->encap_oa;
+
+ return 0;
+}
+
static int xfrm_smark_put(struct sk_buff *skb, struct xfrm_mark *m)
{
int ret = 0;
goto out;
}
if (x->calg) {
- ret = nla_put(skb, XFRMA_ALG_COMP, sizeof(*(x->calg)), x->calg);
+ ret = copy_to_user_calg(x->calg, skb);
if (ret)
goto out;
}
if (x->encap) {
- ret = nla_put(skb, XFRMA_ENCAP, sizeof(*x->encap), x->encap);
+ ret = copy_to_user_encap(x->encap, skb);
if (ret)
goto out;
}
/generate_rust_target
/insert-sys-cert
/kallsyms
-/list-gitignored
/module.lds
/recordmcount
/sign-file
endif
# The following programs are only built on demand
-hostprogs += list-gitignored unifdef
+hostprogs += unifdef
# The module linker script is preprocessed on demand
targets += module.lds
# Makefile for the different targets used to generate full packages of a kernel
include $(srctree)/scripts/Kbuild.include
+include $(srctree)/scripts/Makefile.lib
KERNELPATH := kernel-$(subst -,_,$(KERNELRELEASE))
KBUILD_PKG_ROOTCMD ?="fakeroot -u"
tar -I $(KGZIP) -c $(RCS_TAR_IGNORE) -f $(2).tar.gz \
--transform 's:^:$(2)/:S' $(TAR_CONTENT) $(3)
-# .tmp_filelist .tmp_filelist_exclude
+# tarball compression
# ---------------------------------------------------------------------------
-scripts/list-gitignored: FORCE
- $(Q)$(MAKE) -f $(srctree)/Makefile scripts_package
+%.tar.gz: %.tar
+ $(call cmd,gzip)
-# 1f5d3a6b6532e25a5cdf1f311956b2b03d343a48 removed '*.rej' from .gitignore,
-# but it is definitely a generated file.
-filechk_filelist = \
- $< --exclude='*.rej' --output=$@_exclude --prefix=./ --rootdir=$(srctree) --stat=-
+%.tar.bz2: %.tar
+ $(call cmd,bzip2)
-.tmp_filelist: scripts/list-gitignored FORCE
- $(call filechk,filelist)
+%.tar.xz: %.tar
+ $(call cmd,xzmisc)
-# tarball
-# ---------------------------------------------------------------------------
-
-quiet_cmd_tar = TAR $@
- cmd_tar = tar -c -f $@ $(tar-compress-opt) $(tar-exclude-opt) \
- --owner=0 --group=0 --sort=name \
- --transform 's:^\.:$*:S' -C $(tar-rootdir) .
-
-tar-rootdir := $(srctree)
+%.tar.zst: %.tar
+ $(call cmd,zstd)
-%.tar:
- $(call cmd,tar)
-
-%.tar.gz: private tar-compress-opt := -I $(KGZIP)
-%.tar.gz:
- $(call cmd,tar)
+# Git
+# ---------------------------------------------------------------------------
-%.tar.bz2: private tar-compress-opt := -I $(KBZIP2)
-%.tar.bz2:
- $(call cmd,tar)
+filechk_HEAD = git -C $(srctree) rev-parse --verify HEAD 2>/dev/null
-%.tar.xz: private tar-compress-opt := -I $(XZ)
-%.tar.xz:
- $(call cmd,tar)
+.tmp_HEAD: check-git FORCE
+ $(call filechk,HEAD)
-%.tar.zst: private tar-compress-opt := -I $(ZSTD)
-%.tar.zst:
- $(call cmd,tar)
+PHONY += check-git
+check-git:
+ @if ! $(srctree)/scripts/check-git; then \
+ echo >&2 "error: creating source package requires git repository"; \
+ false; \
+ fi
# Linux source tarball
# ---------------------------------------------------------------------------
-linux.tar.gz: tar-exclude-opt = --exclude=./$@ --exclude-from=$<_exclude
-linux.tar.gz: .tmp_filelist
+quiet_cmd_archive_linux = ARCHIVE $@
+ cmd_archive_linux = \
+ git -C $(srctree) archive --output=$$(realpath $@) --prefix=$(basename $@)/ $$(cat $<)
+
+targets += linux.tar
+linux.tar: .tmp_HEAD FORCE
+ $(call if_changed,archive_linux)
# rpm-pkg
# ---------------------------------------------------------------------------
srcrpm-pkg: linux.tar.gz
$(CONFIG_SHELL) $(MKSPEC) >$(objtree)/kernel.spec
+rpmbuild $(RPMOPTS) --target $(UTS_MACHINE)-linux -bs kernel.spec \
- --define='_smp_mflags %{nil}' --define='_sourcedir .' --define='_srcrpmdir .'
+ --define='_smp_mflags %{nil}' --define='_sourcedir rpmbuild/SOURCES' --define='_srcrpmdir .'
# binrpm-pkg
# ---------------------------------------------------------------------------
# dir-pkg tar*-pkg - tarball targets
# ---------------------------------------------------------------------------
-tar-pkg-tarball = linux-$(KERNELRELEASE)-$(ARCH).$(1)
-tar-pkg-phony = $(subst .,,$(1))-pkg
-
tar-install: FORCE
$(Q)$(MAKE) -f $(srctree)/Makefile
+$(Q)$(srctree)/scripts/package/buildtar $@
+quiet_cmd_tar = TAR $@
+ cmd_tar = cd $<; tar cf ../$@ --owner=root --group=root --sort=name *
+
+linux-$(KERNELRELEASE)-$(ARCH).tar: tar-install
+ $(call cmd,tar)
+
PHONY += dir-pkg
dir-pkg: tar-install
@echo "Kernel tree successfully created in $<"
-define tar-pkg-rule
-PHONY += $(tar-pkg-phony)
-$(tar-pkg-phony): $(tar-pkg-tarball)
+PHONY += tar-pkg
+tar-pkg: linux-$(KERNELRELEASE)-$(ARCH).tar
@:
-$(tar-pkg-tarball): private tar-rootdir := tar-install
-$(tar-pkg-tarball): tar-install
-endef
-
-$(foreach x, tar tar.gz tar.bz2 tar.xz tar.zst, $(eval $(call tar-pkg-rule,$(x))))
+tar%-pkg: linux-$(KERNELRELEASE)-$(ARCH).tar.% FORCE
+ @:
# perf-tar*-src-pkg - generate a source tarball with perf source
# ---------------------------------------------------------------------------
-perf-tar-src-pkg-tarball = perf-$(KERNELVERSION).$(1)
-perf-tar-src-pkg-phony = perf-$(subst .,,$(1))-src-pkg
-
-quiet_cmd_stage_perf_src = STAGE $@
- cmd_stage_perf_src = \
- rm -rf $@; \
- mkdir -p $@; \
- tar -c -f - --exclude-from=$<_exclude -C $(srctree) --files-from=$(srctree)/tools/perf/MANIFEST | \
- tar -x -f - -C $@
-
-.tmp_perf: .tmp_filelist
- $(call cmd,stage_perf_src)
-
-filechk_perf_head = \
- if test -z "$(git -C $(srctree) rev-parse --show-cdup 2>/dev/null)" && \
- head=$$(git -C $(srctree) rev-parse --verify HEAD 2>/dev/null); then \
- echo $$head; \
- else \
- echo "not a git tree"; \
- fi
+.tmp_perf:
+ $(Q)mkdir .tmp_perf
-.tmp_perf/HEAD: .tmp_perf FORCE
- $(call filechk,perf_head)
+.tmp_perf/HEAD: .tmp_HEAD | .tmp_perf
+ $(call cmd,copy)
quiet_cmd_perf_version_file = GEN $@
cmd_perf_version_file = cd $(srctree)/tools/perf; util/PERF-VERSION-GEN $(dir $(abspath $@))
-# PERF-VERSION-FILE and HEAD are independent, but this avoids updating the
+# PERF-VERSION-FILE and .tmp_HEAD are independent, but this avoids updating the
# timestamp of PERF-VERSION-FILE.
# The best is to fix tools/perf/util/PERF-VERSION-GEN.
-.tmp_perf/PERF-VERSION-FILE: .tmp_perf/HEAD $(srctree)/tools/perf/util/PERF-VERSION-GEN
+.tmp_perf/PERF-VERSION-FILE: .tmp_HEAD $(srctree)/tools/perf/util/PERF-VERSION-GEN | .tmp_perf
$(call cmd,perf_version_file)
-define perf-tar-src-pkg-rule
-PHONY += $(perf-tar-src-pkg-phony)
-$(perf-tar-src-pkg-phony): $(perf-tar-src-pkg-tarball)
- @:
+quiet_cmd_archive_perf = ARCHIVE $@
+ cmd_archive_perf = \
+ git -C $(srctree) archive --output=$$(realpath $@) --prefix=$(basename $@)/ \
+ --add-file=$$(realpath $(word 2, $^)) \
+ --add-file=$$(realpath $(word 3, $^)) \
+ $$(cat $(word 2, $^))^{tree} $$(cat $<)
-$(perf-tar-src-pkg-tarball): private tar-rootdir := .tmp_perf
-$(perf-tar-src-pkg-tarball): .tmp_filelist .tmp_perf/HEAD .tmp_perf/PERF-VERSION-FILE
-endef
+targets += perf-$(KERNELVERSION).tar
+perf-$(KERNELVERSION).tar: tools/perf/MANIFEST .tmp_perf/HEAD .tmp_perf/PERF-VERSION-FILE FORCE
+ $(call if_changed,archive_perf)
+
+PHONY += perf-tar-src-pkg
+perf-tar-src-pkg: perf-$(KERNELVERSION).tar
+ @:
-$(foreach x, tar tar.gz tar.bz2 tar.xz tar.zst, $(eval $(call perf-tar-src-pkg-rule,$(x))))
+perf-tar%-src-pkg: perf-$(KERNELVERSION).tar.% FORCE
+ @:
# Help text displayed when executing 'make help'
# ---------------------------------------------------------------------------
PHONY += FORCE
FORCE:
+# Read all saved command lines and dependencies for the $(targets) we
+# may be building above, using $(if_changed{,_dep}). As an
+# optimization, we don't need to read them if the target does not
+# exist, we will rebuild anyway in that case.
+
+existing-targets := $(wildcard $(sort $(targets)))
+
+-include $(foreach f,$(existing-targets),$(dir $(f)).$(notdir $(f)).cmd)
+
.PHONY: $(PHONY)
--- /dev/null
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# succeed if we are in a git repository
+
+srctree="$(dirname $0)/.."
+
+if ! git -C "${srctree}" rev-parse --verify HEAD >/dev/null 2>/dev/null; then
+ exit 1
+fi
+
+if ! test -z $(git -C "${srctree}" rev-parse --show-cdup 2>/dev/null); then
+ exit 1
+fi
#define __IGNORE_truncate
#define __IGNORE_stat
#define __IGNORE_lstat
-#define __IGNORE_fstat
#define __IGNORE_fcntl
#define __IGNORE_fadvise64
#define __IGNORE_newfstatat
/* 64-bit ports never needed these, and new 32-bit ports can use statx */
#define __IGNORE_fstat64
#define __IGNORE_fstatat64
+
+/* Newer ports are not required to provide fstat in favor of statx */
+#define __IGNORE_fstat
EOF
}
"kallsyms_markers",
"kallsyms_token_table",
"kallsyms_token_index",
+ "kallsyms_seqs_of_names",
/* Exclude linker generated symbols which vary between passes */
"_SDA_BASE_", /* ppc */
"_SDA2_BASE_", /* ppc */
void conf_set_changed(bool val)
{
- if (conf_changed_callback && conf_changed != val)
- conf_changed_callback();
+ bool changed = conf_changed != val;
conf_changed = val;
+
+ if (conf_changed_callback && changed)
+ conf_changed_callback();
}
bool conf_get_changed(void)
+++ /dev/null
-// SPDX-License-Identifier: GPL-2.0-only
-//
-// Traverse the source tree, parsing all .gitignore files, and print file paths
-// that are ignored by git.
-// The output is suitable to the --exclude-from option of tar.
-// This is useful until the --exclude-vcs-ignores option gets working correctly.
-//
-// Copyright (C) 2023 Masahiro Yamada <masahiroy@kernel.org>
-// (a lot of code imported from GIT)
-
-#include <assert.h>
-#include <dirent.h>
-#include <errno.h>
-#include <fcntl.h>
-#include <getopt.h>
-#include <stdarg.h>
-#include <stdbool.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <sys/stat.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-// Imported from commit 23c56f7bd5f1667f8b793d796bf30e39545920f6 in GIT
-//
-//---------------------------(IMPORT FROM GIT BEGIN)---------------------------
-
-// Copied from environment.c
-
-static bool ignore_case;
-
-// Copied from git-compat-util.h
-
-/* Sane ctype - no locale, and works with signed chars */
-#undef isascii
-#undef isspace
-#undef isdigit
-#undef isalpha
-#undef isalnum
-#undef isprint
-#undef islower
-#undef isupper
-#undef tolower
-#undef toupper
-#undef iscntrl
-#undef ispunct
-#undef isxdigit
-
-static const unsigned char sane_ctype[256];
-#define GIT_SPACE 0x01
-#define GIT_DIGIT 0x02
-#define GIT_ALPHA 0x04
-#define GIT_GLOB_SPECIAL 0x08
-#define GIT_REGEX_SPECIAL 0x10
-#define GIT_PATHSPEC_MAGIC 0x20
-#define GIT_CNTRL 0x40
-#define GIT_PUNCT 0x80
-#define sane_istest(x,mask) ((sane_ctype[(unsigned char)(x)] & (mask)) != 0)
-#define isascii(x) (((x) & ~0x7f) == 0)
-#define isspace(x) sane_istest(x,GIT_SPACE)
-#define isdigit(x) sane_istest(x,GIT_DIGIT)
-#define isalpha(x) sane_istest(x,GIT_ALPHA)
-#define isalnum(x) sane_istest(x,GIT_ALPHA | GIT_DIGIT)
-#define isprint(x) ((x) >= 0x20 && (x) <= 0x7e)
-#define islower(x) sane_iscase(x, 1)
-#define isupper(x) sane_iscase(x, 0)
-#define is_glob_special(x) sane_istest(x,GIT_GLOB_SPECIAL)
-#define iscntrl(x) (sane_istest(x,GIT_CNTRL))
-#define ispunct(x) sane_istest(x, GIT_PUNCT | GIT_REGEX_SPECIAL | \
- GIT_GLOB_SPECIAL | GIT_PATHSPEC_MAGIC)
-#define isxdigit(x) (hexval_table[(unsigned char)(x)] != -1)
-#define tolower(x) sane_case((unsigned char)(x), 0x20)
-#define toupper(x) sane_case((unsigned char)(x), 0)
-
-static inline int sane_case(int x, int high)
-{
- if (sane_istest(x, GIT_ALPHA))
- x = (x & ~0x20) | high;
- return x;
-}
-
-static inline int sane_iscase(int x, int is_lower)
-{
- if (!sane_istest(x, GIT_ALPHA))
- return 0;
-
- if (is_lower)
- return (x & 0x20) != 0;
- else
- return (x & 0x20) == 0;
-}
-
-// Copied from ctype.c
-
-enum {
- S = GIT_SPACE,
- A = GIT_ALPHA,
- D = GIT_DIGIT,
- G = GIT_GLOB_SPECIAL, /* *, ?, [, \\ */
- R = GIT_REGEX_SPECIAL, /* $, (, ), +, ., ^, {, | */
- P = GIT_PATHSPEC_MAGIC, /* other non-alnum, except for ] and } */
- X = GIT_CNTRL,
- U = GIT_PUNCT,
- Z = GIT_CNTRL | GIT_SPACE
-};
-
-static const unsigned char sane_ctype[256] = {
- X, X, X, X, X, X, X, X, X, Z, Z, X, X, Z, X, X, /* 0.. 15 */
- X, X, X, X, X, X, X, X, X, X, X, X, X, X, X, X, /* 16.. 31 */
- S, P, P, P, R, P, P, P, R, R, G, R, P, P, R, P, /* 32.. 47 */
- D, D, D, D, D, D, D, D, D, D, P, P, P, P, P, G, /* 48.. 63 */
- P, A, A, A, A, A, A, A, A, A, A, A, A, A, A, A, /* 64.. 79 */
- A, A, A, A, A, A, A, A, A, A, A, G, G, U, R, P, /* 80.. 95 */
- P, A, A, A, A, A, A, A, A, A, A, A, A, A, A, A, /* 96..111 */
- A, A, A, A, A, A, A, A, A, A, A, R, R, U, P, X, /* 112..127 */
- /* Nothing in the 128.. range */
-};
-
-// Copied from hex.c
-
-static const signed char hexval_table[256] = {
- -1, -1, -1, -1, -1, -1, -1, -1, /* 00-07 */
- -1, -1, -1, -1, -1, -1, -1, -1, /* 08-0f */
- -1, -1, -1, -1, -1, -1, -1, -1, /* 10-17 */
- -1, -1, -1, -1, -1, -1, -1, -1, /* 18-1f */
- -1, -1, -1, -1, -1, -1, -1, -1, /* 20-27 */
- -1, -1, -1, -1, -1, -1, -1, -1, /* 28-2f */
- 0, 1, 2, 3, 4, 5, 6, 7, /* 30-37 */
- 8, 9, -1, -1, -1, -1, -1, -1, /* 38-3f */
- -1, 10, 11, 12, 13, 14, 15, -1, /* 40-47 */
- -1, -1, -1, -1, -1, -1, -1, -1, /* 48-4f */
- -1, -1, -1, -1, -1, -1, -1, -1, /* 50-57 */
- -1, -1, -1, -1, -1, -1, -1, -1, /* 58-5f */
- -1, 10, 11, 12, 13, 14, 15, -1, /* 60-67 */
- -1, -1, -1, -1, -1, -1, -1, -1, /* 68-67 */
- -1, -1, -1, -1, -1, -1, -1, -1, /* 70-77 */
- -1, -1, -1, -1, -1, -1, -1, -1, /* 78-7f */
- -1, -1, -1, -1, -1, -1, -1, -1, /* 80-87 */
- -1, -1, -1, -1, -1, -1, -1, -1, /* 88-8f */
- -1, -1, -1, -1, -1, -1, -1, -1, /* 90-97 */
- -1, -1, -1, -1, -1, -1, -1, -1, /* 98-9f */
- -1, -1, -1, -1, -1, -1, -1, -1, /* a0-a7 */
- -1, -1, -1, -1, -1, -1, -1, -1, /* a8-af */
- -1, -1, -1, -1, -1, -1, -1, -1, /* b0-b7 */
- -1, -1, -1, -1, -1, -1, -1, -1, /* b8-bf */
- -1, -1, -1, -1, -1, -1, -1, -1, /* c0-c7 */
- -1, -1, -1, -1, -1, -1, -1, -1, /* c8-cf */
- -1, -1, -1, -1, -1, -1, -1, -1, /* d0-d7 */
- -1, -1, -1, -1, -1, -1, -1, -1, /* d8-df */
- -1, -1, -1, -1, -1, -1, -1, -1, /* e0-e7 */
- -1, -1, -1, -1, -1, -1, -1, -1, /* e8-ef */
- -1, -1, -1, -1, -1, -1, -1, -1, /* f0-f7 */
- -1, -1, -1, -1, -1, -1, -1, -1, /* f8-ff */
-};
-
-// Copied from wildmatch.h
-
-#define WM_CASEFOLD 1
-#define WM_PATHNAME 2
-
-#define WM_NOMATCH 1
-#define WM_MATCH 0
-#define WM_ABORT_ALL -1
-#define WM_ABORT_TO_STARSTAR -2
-
-// Copied from wildmatch.c
-
-typedef unsigned char uchar;
-
-// local modification: remove NEGATE_CLASS(2)
-
-#define CC_EQ(class, len, litmatch) ((len) == sizeof (litmatch)-1 \
- && *(class) == *(litmatch) \
- && strncmp((char*)class, litmatch, len) == 0)
-
-// local modification: simpilify macros
-#define ISBLANK(c) ((c) == ' ' || (c) == '\t')
-#define ISGRAPH(c) (isprint(c) && !isspace(c))
-#define ISPRINT(c) isprint(c)
-#define ISDIGIT(c) isdigit(c)
-#define ISALNUM(c) isalnum(c)
-#define ISALPHA(c) isalpha(c)
-#define ISCNTRL(c) iscntrl(c)
-#define ISLOWER(c) islower(c)
-#define ISPUNCT(c) ispunct(c)
-#define ISSPACE(c) isspace(c)
-#define ISUPPER(c) isupper(c)
-#define ISXDIGIT(c) isxdigit(c)
-
-/* Match pattern "p" against "text" */
-static int dowild(const uchar *p, const uchar *text, unsigned int flags)
-{
- uchar p_ch;
- const uchar *pattern = p;
-
- for ( ; (p_ch = *p) != '\0'; text++, p++) {
- int matched, match_slash, negated;
- uchar t_ch, prev_ch;
- if ((t_ch = *text) == '\0' && p_ch != '*')
- return WM_ABORT_ALL;
- if ((flags & WM_CASEFOLD) && ISUPPER(t_ch))
- t_ch = tolower(t_ch);
- if ((flags & WM_CASEFOLD) && ISUPPER(p_ch))
- p_ch = tolower(p_ch);
- switch (p_ch) {
- case '\\':
- /* Literal match with following character. Note that the test
- * in "default" handles the p[1] == '\0' failure case. */
- p_ch = *++p;
- /* FALLTHROUGH */
- default:
- if (t_ch != p_ch)
- return WM_NOMATCH;
- continue;
- case '?':
- /* Match anything but '/'. */
- if ((flags & WM_PATHNAME) && t_ch == '/')
- return WM_NOMATCH;
- continue;
- case '*':
- if (*++p == '*') {
- const uchar *prev_p = p - 2;
- while (*++p == '*') {}
- if (!(flags & WM_PATHNAME))
- /* without WM_PATHNAME, '*' == '**' */
- match_slash = 1;
- else if ((prev_p < pattern || *prev_p == '/') &&
- (*p == '\0' || *p == '/' ||
- (p[0] == '\\' && p[1] == '/'))) {
- /*
- * Assuming we already match 'foo/' and are at
- * <star star slash>, just assume it matches
- * nothing and go ahead match the rest of the
- * pattern with the remaining string. This
- * helps make foo/<*><*>/bar (<> because
- * otherwise it breaks C comment syntax) match
- * both foo/bar and foo/a/bar.
- */
- if (p[0] == '/' &&
- dowild(p + 1, text, flags) == WM_MATCH)
- return WM_MATCH;
- match_slash = 1;
- } else /* WM_PATHNAME is set */
- match_slash = 0;
- } else
- /* without WM_PATHNAME, '*' == '**' */
- match_slash = flags & WM_PATHNAME ? 0 : 1;
- if (*p == '\0') {
- /* Trailing "**" matches everything. Trailing "*" matches
- * only if there are no more slash characters. */
- if (!match_slash) {
- if (strchr((char *)text, '/'))
- return WM_NOMATCH;
- }
- return WM_MATCH;
- } else if (!match_slash && *p == '/') {
- /*
- * _one_ asterisk followed by a slash
- * with WM_PATHNAME matches the next
- * directory
- */
- const char *slash = strchr((char*)text, '/');
- if (!slash)
- return WM_NOMATCH;
- text = (const uchar*)slash;
- /* the slash is consumed by the top-level for loop */
- break;
- }
- while (1) {
- if (t_ch == '\0')
- break;
- /*
- * Try to advance faster when an asterisk is
- * followed by a literal. We know in this case
- * that the string before the literal
- * must belong to "*".
- * If match_slash is false, do not look past
- * the first slash as it cannot belong to '*'.
- */
- if (!is_glob_special(*p)) {
- p_ch = *p;
- if ((flags & WM_CASEFOLD) && ISUPPER(p_ch))
- p_ch = tolower(p_ch);
- while ((t_ch = *text) != '\0' &&
- (match_slash || t_ch != '/')) {
- if ((flags & WM_CASEFOLD) && ISUPPER(t_ch))
- t_ch = tolower(t_ch);
- if (t_ch == p_ch)
- break;
- text++;
- }
- if (t_ch != p_ch)
- return WM_NOMATCH;
- }
- if ((matched = dowild(p, text, flags)) != WM_NOMATCH) {
- if (!match_slash || matched != WM_ABORT_TO_STARSTAR)
- return matched;
- } else if (!match_slash && t_ch == '/')
- return WM_ABORT_TO_STARSTAR;
- t_ch = *++text;
- }
- return WM_ABORT_ALL;
- case '[':
- p_ch = *++p;
- if (p_ch == '^')
- p_ch = '!';
- /* Assign literal 1/0 because of "matched" comparison. */
- negated = p_ch == '!' ? 1 : 0;
- if (negated) {
- /* Inverted character class. */
- p_ch = *++p;
- }
- prev_ch = 0;
- matched = 0;
- do {
- if (!p_ch)
- return WM_ABORT_ALL;
- if (p_ch == '\\') {
- p_ch = *++p;
- if (!p_ch)
- return WM_ABORT_ALL;
- if (t_ch == p_ch)
- matched = 1;
- } else if (p_ch == '-' && prev_ch && p[1] && p[1] != ']') {
- p_ch = *++p;
- if (p_ch == '\\') {
- p_ch = *++p;
- if (!p_ch)
- return WM_ABORT_ALL;
- }
- if (t_ch <= p_ch && t_ch >= prev_ch)
- matched = 1;
- else if ((flags & WM_CASEFOLD) && ISLOWER(t_ch)) {
- uchar t_ch_upper = toupper(t_ch);
- if (t_ch_upper <= p_ch && t_ch_upper >= prev_ch)
- matched = 1;
- }
- p_ch = 0; /* This makes "prev_ch" get set to 0. */
- } else if (p_ch == '[' && p[1] == ':') {
- const uchar *s;
- int i;
- for (s = p += 2; (p_ch = *p) && p_ch != ']'; p++) {} /*SHARED ITERATOR*/
- if (!p_ch)
- return WM_ABORT_ALL;
- i = p - s - 1;
- if (i < 0 || p[-1] != ':') {
- /* Didn't find ":]", so treat like a normal set. */
- p = s - 2;
- p_ch = '[';
- if (t_ch == p_ch)
- matched = 1;
- continue;
- }
- if (CC_EQ(s,i, "alnum")) {
- if (ISALNUM(t_ch))
- matched = 1;
- } else if (CC_EQ(s,i, "alpha")) {
- if (ISALPHA(t_ch))
- matched = 1;
- } else if (CC_EQ(s,i, "blank")) {
- if (ISBLANK(t_ch))
- matched = 1;
- } else if (CC_EQ(s,i, "cntrl")) {
- if (ISCNTRL(t_ch))
- matched = 1;
- } else if (CC_EQ(s,i, "digit")) {
- if (ISDIGIT(t_ch))
- matched = 1;
- } else if (CC_EQ(s,i, "graph")) {
- if (ISGRAPH(t_ch))
- matched = 1;
- } else if (CC_EQ(s,i, "lower")) {
- if (ISLOWER(t_ch))
- matched = 1;
- } else if (CC_EQ(s,i, "print")) {
- if (ISPRINT(t_ch))
- matched = 1;
- } else if (CC_EQ(s,i, "punct")) {
- if (ISPUNCT(t_ch))
- matched = 1;
- } else if (CC_EQ(s,i, "space")) {
- if (ISSPACE(t_ch))
- matched = 1;
- } else if (CC_EQ(s,i, "upper")) {
- if (ISUPPER(t_ch))
- matched = 1;
- else if ((flags & WM_CASEFOLD) && ISLOWER(t_ch))
- matched = 1;
- } else if (CC_EQ(s,i, "xdigit")) {
- if (ISXDIGIT(t_ch))
- matched = 1;
- } else /* malformed [:class:] string */
- return WM_ABORT_ALL;
- p_ch = 0; /* This makes "prev_ch" get set to 0. */
- } else if (t_ch == p_ch)
- matched = 1;
- } while (prev_ch = p_ch, (p_ch = *++p) != ']');
- if (matched == negated ||
- ((flags & WM_PATHNAME) && t_ch == '/'))
- return WM_NOMATCH;
- continue;
- }
- }
-
- return *text ? WM_NOMATCH : WM_MATCH;
-}
-
-/* Match the "pattern" against the "text" string. */
-static int wildmatch(const char *pattern, const char *text, unsigned int flags)
-{
- // local modification: move WM_CASEFOLD here
- if (ignore_case)
- flags |= WM_CASEFOLD;
-
- return dowild((const uchar*)pattern, (const uchar*)text, flags);
-}
-
-// Copied from dir.h
-
-#define PATTERN_FLAG_NODIR 1
-#define PATTERN_FLAG_ENDSWITH 4
-#define PATTERN_FLAG_MUSTBEDIR 8
-#define PATTERN_FLAG_NEGATIVE 16
-
-// Copied from dir.c
-
-static int fspathncmp(const char *a, const char *b, size_t count)
-{
- return ignore_case ? strncasecmp(a, b, count) : strncmp(a, b, count);
-}
-
-static int simple_length(const char *match)
-{
- int len = -1;
-
- for (;;) {
- unsigned char c = *match++;
- len++;
- if (c == '\0' || is_glob_special(c))
- return len;
- }
-}
-
-static int no_wildcard(const char *string)
-{
- return string[simple_length(string)] == '\0';
-}
-
-static void parse_path_pattern(const char **pattern,
- int *patternlen,
- unsigned *flags,
- int *nowildcardlen)
-{
- const char *p = *pattern;
- size_t i, len;
-
- *flags = 0;
- if (*p == '!') {
- *flags |= PATTERN_FLAG_NEGATIVE;
- p++;
- }
- len = strlen(p);
- if (len && p[len - 1] == '/') {
- len--;
- *flags |= PATTERN_FLAG_MUSTBEDIR;
- }
- for (i = 0; i < len; i++) {
- if (p[i] == '/')
- break;
- }
- if (i == len)
- *flags |= PATTERN_FLAG_NODIR;
- *nowildcardlen = simple_length(p);
- /*
- * we should have excluded the trailing slash from 'p' too,
- * but that's one more allocation. Instead just make sure
- * nowildcardlen does not exceed real patternlen
- */
- if (*nowildcardlen > len)
- *nowildcardlen = len;
- if (*p == '*' && no_wildcard(p + 1))
- *flags |= PATTERN_FLAG_ENDSWITH;
- *pattern = p;
- *patternlen = len;
-}
-
-static void trim_trailing_spaces(char *buf)
-{
- char *p, *last_space = NULL;
-
- for (p = buf; *p; p++)
- switch (*p) {
- case ' ':
- if (!last_space)
- last_space = p;
- break;
- case '\\':
- p++;
- if (!*p)
- return;
- /* fallthrough */
- default:
- last_space = NULL;
- }
-
- if (last_space)
- *last_space = '\0';
-}
-
-static int match_basename(const char *basename, int basenamelen,
- const char *pattern, int prefix, int patternlen,
- unsigned flags)
-{
- if (prefix == patternlen) {
- if (patternlen == basenamelen &&
- !fspathncmp(pattern, basename, basenamelen))
- return 1;
- } else if (flags & PATTERN_FLAG_ENDSWITH) {
- /* "*literal" matching against "fooliteral" */
- if (patternlen - 1 <= basenamelen &&
- !fspathncmp(pattern + 1,
- basename + basenamelen - (patternlen - 1),
- patternlen - 1))
- return 1;
- } else {
- // local modification: call wildmatch() directly
- if (!wildmatch(pattern, basename, flags))
- return 1;
- }
- return 0;
-}
-
-static int match_pathname(const char *pathname, int pathlen,
- const char *base, int baselen,
- const char *pattern, int prefix, int patternlen)
-{
- // local modification: remove local variables
-
- /*
- * match with FNM_PATHNAME; the pattern has base implicitly
- * in front of it.
- */
- if (*pattern == '/') {
- pattern++;
- patternlen--;
- prefix--;
- }
-
- /*
- * baselen does not count the trailing slash. base[] may or
- * may not end with a trailing slash though.
- */
- if (pathlen < baselen + 1 ||
- (baselen && pathname[baselen] != '/') ||
- fspathncmp(pathname, base, baselen))
- return 0;
-
- // local modification: simplified because always baselen > 0
- pathname += baselen + 1;
- pathlen -= baselen + 1;
-
- if (prefix) {
- /*
- * if the non-wildcard part is longer than the
- * remaining pathname, surely it cannot match.
- */
- if (prefix > pathlen)
- return 0;
-
- if (fspathncmp(pattern, pathname, prefix))
- return 0;
- pattern += prefix;
- patternlen -= prefix;
- pathname += prefix;
- pathlen -= prefix;
-
- /*
- * If the whole pattern did not have a wildcard,
- * then our prefix match is all we need; we
- * do not need to call fnmatch at all.
- */
- if (!patternlen && !pathlen)
- return 1;
- }
-
- // local modification: call wildmatch() directly
- return !wildmatch(pattern, pathname, WM_PATHNAME);
-}
-
-// Copied from git/utf8.c
-
-static const char utf8_bom[] = "\357\273\277";
-
-//----------------------------(IMPORT FROM GIT END)----------------------------
-
-struct pattern {
- unsigned int flags;
- int nowildcardlen;
- int patternlen;
- int dirlen;
- char pattern[];
-};
-
-static struct pattern **pattern_list;
-static int nr_patterns, alloced_patterns;
-
-// Remember the number of patterns at each directory level
-static int *nr_patterns_at;
-// Track the current/max directory level;
-static int depth, max_depth;
-static bool debug_on;
-static FILE *out_fp, *stat_fp;
-static char *prefix = "";
-static char *progname;
-
-static void __attribute__((noreturn)) perror_exit(const char *s)
-{
- perror(s);
-
- exit(EXIT_FAILURE);
-}
-
-static void __attribute__((noreturn)) error_exit(const char *fmt, ...)
-{
- va_list args;
-
- fprintf(stderr, "%s: error: ", progname);
-
- va_start(args, fmt);
- vfprintf(stderr, fmt, args);
- va_end(args);
-
- exit(EXIT_FAILURE);
-}
-
-static void debug(const char *fmt, ...)
-{
- va_list args;
- int i;
-
- if (!debug_on)
- return;
-
- fprintf(stderr, "[DEBUG] ");
-
- for (i = 0; i < depth * 2; i++)
- fputc(' ', stderr);
-
- va_start(args, fmt);
- vfprintf(stderr, fmt, args);
- va_end(args);
-}
-
-static void *xrealloc(void *ptr, size_t size)
-{
- ptr = realloc(ptr, size);
- if (!ptr)
- perror_exit(progname);
-
- return ptr;
-}
-
-static void *xmalloc(size_t size)
-{
- return xrealloc(NULL, size);
-}
-
-// similar to last_matching_pattern_from_list() in GIT
-static bool is_ignored(const char *path, int pathlen, int dirlen, bool is_dir)
-{
- int i;
-
- // Search in the reverse order because the last matching pattern wins.
- for (i = nr_patterns - 1; i >= 0; i--) {
- struct pattern *p = pattern_list[i];
- unsigned int flags = p->flags;
- const char *gitignore_dir = p->pattern + p->patternlen + 1;
- bool ignored;
-
- if ((flags & PATTERN_FLAG_MUSTBEDIR) && !is_dir)
- continue;
-
- if (flags & PATTERN_FLAG_NODIR) {
- if (!match_basename(path + dirlen + 1,
- pathlen - dirlen - 1,
- p->pattern,
- p->nowildcardlen,
- p->patternlen,
- p->flags))
- continue;
- } else {
- if (!match_pathname(path, pathlen,
- gitignore_dir, p->dirlen,
- p->pattern,
- p->nowildcardlen,
- p->patternlen))
- continue;
- }
-
- debug("%s: matches %s%s%s (%s/.gitignore)\n", path,
- flags & PATTERN_FLAG_NEGATIVE ? "!" : "", p->pattern,
- flags & PATTERN_FLAG_MUSTBEDIR ? "/" : "",
- gitignore_dir);
-
- ignored = (flags & PATTERN_FLAG_NEGATIVE) == 0;
- if (ignored)
- debug("Ignore: %s\n", path);
-
- return ignored;
- }
-
- debug("%s: no match\n", path);
-
- return false;
-}
-
-static void add_pattern(const char *string, const char *dir, int dirlen)
-{
- struct pattern *p;
- int patternlen, nowildcardlen;
- unsigned int flags;
-
- parse_path_pattern(&string, &patternlen, &flags, &nowildcardlen);
-
- if (patternlen == 0)
- return;
-
- p = xmalloc(sizeof(*p) + patternlen + dirlen + 2);
-
- memcpy(p->pattern, string, patternlen);
- p->pattern[patternlen] = 0;
- memcpy(p->pattern + patternlen + 1, dir, dirlen);
- p->pattern[patternlen + 1 + dirlen] = 0;
-
- p->patternlen = patternlen;
- p->nowildcardlen = nowildcardlen;
- p->dirlen = dirlen;
- p->flags = flags;
-
- debug("Add pattern: %s%s%s\n",
- flags & PATTERN_FLAG_NEGATIVE ? "!" : "", p->pattern,
- flags & PATTERN_FLAG_MUSTBEDIR ? "/" : "");
-
- if (nr_patterns >= alloced_patterns) {
- alloced_patterns += 128;
- pattern_list = xrealloc(pattern_list,
- sizeof(*pattern_list) * alloced_patterns);
- }
-
- pattern_list[nr_patterns++] = p;
-}
-
-// similar to add_patterns_from_buffer() in GIT
-static void add_patterns_from_gitignore(const char *dir, int dirlen)
-{
- struct stat st;
- char path[PATH_MAX], *buf, *entry;
- size_t size;
- int fd, pathlen, i;
-
- pathlen = snprintf(path, sizeof(path), "%s/.gitignore", dir);
- if (pathlen >= sizeof(path))
- error_exit("%s: too long path was truncated\n", path);
-
- fd = open(path, O_RDONLY | O_NOFOLLOW);
- if (fd < 0) {
- if (errno != ENOENT)
- return perror_exit(path);
- return;
- }
-
- if (fstat(fd, &st) < 0)
- perror_exit(path);
-
- size = st.st_size;
-
- buf = xmalloc(size + 1);
- if (read(fd, buf, st.st_size) != st.st_size)
- perror_exit(path);
-
- buf[st.st_size] = '\n';
- if (close(fd))
- perror_exit(path);
-
- debug("Parse %s\n", path);
-
- entry = buf;
-
- // skip utf8 bom
- if (!strncmp(entry, utf8_bom, strlen(utf8_bom)))
- entry += strlen(utf8_bom);
-
- for (i = entry - buf; i < size; i++) {
- if (buf[i] == '\n') {
- if (entry != buf + i && entry[0] != '#') {
- buf[i - (i && buf[i-1] == '\r')] = 0;
- trim_trailing_spaces(entry);
- add_pattern(entry, dir, dirlen);
- }
- entry = buf + i + 1;
- }
- }
-
- free(buf);
-}
-
-// Save the current number of patterns and increment the depth
-static void increment_depth(void)
-{
- if (depth >= max_depth) {
- max_depth += 1;
- nr_patterns_at = xrealloc(nr_patterns_at,
- sizeof(*nr_patterns_at) * max_depth);
- }
-
- nr_patterns_at[depth] = nr_patterns;
- depth++;
-}
-
-// Decrement the depth, and free up the patterns of this directory level.
-static void decrement_depth(void)
-{
- depth--;
- assert(depth >= 0);
-
- while (nr_patterns > nr_patterns_at[depth])
- free(pattern_list[--nr_patterns]);
-}
-
-static void print_path(const char *path)
-{
- // The path always starts with "./"
- assert(strlen(path) >= 2);
-
- // Replace the root directory with a preferred prefix.
- // This is useful for the tar command.
- fprintf(out_fp, "%s%s\n", prefix, path + 2);
-}
-
-static void print_stat(const char *path, struct stat *st)
-{
- if (!stat_fp)
- return;
-
- if (!S_ISREG(st->st_mode) && !S_ISLNK(st->st_mode))
- return;
-
- assert(strlen(path) >= 2);
-
- fprintf(stat_fp, "%c %9ld %10ld %s\n",
- S_ISLNK(st->st_mode) ? 'l' : '-',
- st->st_size, st->st_mtim.tv_sec, path + 2);
-}
-
-// Traverse the entire directory tree, parsing .gitignore files.
-// Print file paths that are not tracked by git.
-//
-// Return true if all files under the directory are ignored, false otherwise.
-static bool traverse_directory(const char *dir, int dirlen)
-{
- bool all_ignored = true;
- DIR *dirp;
-
- debug("Enter[%d]: %s\n", depth, dir);
- increment_depth();
-
- add_patterns_from_gitignore(dir, dirlen);
-
- dirp = opendir(dir);
- if (!dirp)
- perror_exit(dir);
-
- while (1) {
- struct dirent *d;
- struct stat st;
- char path[PATH_MAX];
- int pathlen;
- bool ignored;
-
- errno = 0;
- d = readdir(dirp);
- if (!d) {
- if (errno)
- perror_exit(dir);
- break;
- }
-
- if (!strcmp(d->d_name, "..") || !strcmp(d->d_name, "."))
- continue;
-
- pathlen = snprintf(path, sizeof(path), "%s/%s", dir, d->d_name);
- if (pathlen >= sizeof(path))
- error_exit("%s: too long path was truncated\n", path);
-
- if (lstat(path, &st) < 0)
- perror_exit(path);
-
- if ((!S_ISREG(st.st_mode) && !S_ISDIR(st.st_mode) && !S_ISLNK(st.st_mode)) ||
- is_ignored(path, pathlen, dirlen, S_ISDIR(st.st_mode))) {
- ignored = true;
- } else {
- if (S_ISDIR(st.st_mode) && !S_ISLNK(st.st_mode))
- // If all the files in a directory are ignored,
- // let's ignore that directory as well. This
- // will avoid empty directories in the tarball.
- ignored = traverse_directory(path, pathlen);
- else
- ignored = false;
- }
-
- if (ignored) {
- print_path(path);
- } else {
- print_stat(path, &st);
- all_ignored = false;
- }
- }
-
- if (closedir(dirp))
- perror_exit(dir);
-
- decrement_depth();
- debug("Leave[%d]: %s\n", depth, dir);
-
- return all_ignored;
-}
-
-static void usage(void)
-{
- fprintf(stderr,
- "usage: %s [options]\n"
- "\n"
- "Show files that are ignored by git\n"
- "\n"
- "options:\n"
- " -d, --debug print debug messages to stderr\n"
- " -e, --exclude PATTERN add the given exclude pattern\n"
- " -h, --help show this help message and exit\n"
- " -i, --ignore-case Ignore case differences between the patterns and the files\n"
- " -o, --output FILE output the ignored files to a file (default: '-', i.e. stdout)\n"
- " -p, --prefix PREFIX prefix added to each path (default: empty string)\n"
- " -r, --rootdir DIR root of the source tree (default: current working directory)\n"
- " -s, --stat FILE output the file stat of non-ignored files to a file\n",
- progname);
-}
-
-static void open_output(const char *pathname, FILE **fp)
-{
- if (strcmp(pathname, "-")) {
- *fp = fopen(pathname, "w");
- if (!*fp)
- perror_exit(pathname);
- } else {
- *fp = stdout;
- }
-}
-
-static void close_output(const char *pathname, FILE *fp)
-{
- fflush(fp);
-
- if (ferror(fp))
- error_exit("not all data was written to the output\n");
-
- if (fclose(fp))
- perror_exit(pathname);
-}
-
-int main(int argc, char *argv[])
-{
- const char *output = "-";
- const char *rootdir = ".";
- const char *stat = NULL;
-
- progname = strrchr(argv[0], '/');
- if (progname)
- progname++;
- else
- progname = argv[0];
-
- while (1) {
- static struct option long_options[] = {
- {"debug", no_argument, NULL, 'd'},
- {"help", no_argument, NULL, 'h'},
- {"ignore-case", no_argument, NULL, 'i'},
- {"output", required_argument, NULL, 'o'},
- {"prefix", required_argument, NULL, 'p'},
- {"rootdir", required_argument, NULL, 'r'},
- {"stat", required_argument, NULL, 's'},
- {"exclude", required_argument, NULL, 'x'},
- {},
- };
-
- int c = getopt_long(argc, argv, "dhino:p:r:s:x:", long_options, NULL);
-
- if (c == -1)
- break;
-
- switch (c) {
- case 'd':
- debug_on = true;
- break;
- case 'h':
- usage();
- exit(0);
- case 'i':
- ignore_case = true;
- break;
- case 'o':
- output = optarg;
- break;
- case 'p':
- prefix = optarg;
- break;
- case 'r':
- rootdir = optarg;
- break;
- case 's':
- stat = optarg;
- break;
- case 'x':
- add_pattern(optarg, ".", strlen("."));
- break;
- case '?':
- usage();
- /* fallthrough */
- default:
- exit(EXIT_FAILURE);
- }
- }
-
- open_output(output, &out_fp);
- if (stat && stat[0])
- open_output(stat, &stat_fp);
-
- if (chdir(rootdir))
- perror_exit(rootdir);
-
- add_pattern(".git/", ".", strlen("."));
-
- if (traverse_directory(".", strlen(".")))
- print_path("./");
-
- assert(depth == 0);
-
- while (nr_patterns > 0)
- free(pattern_list[--nr_patterns]);
- free(pattern_list);
- free(nr_patterns_at);
-
- close_output(output, out_fp);
- if (stat_fp)
- close_output(stat, stat_fp);
-
- return 0;
-}
dpkg-deb $dpkg_deb_opts ${KDEB_COMPRESS:+-Z$KDEB_COMPRESS} --build "$pdir" ..
}
-deploy_kernel_headers () {
+install_linux_image () {
+ pdir=$1
+ pname=$2
+
+ rm -rf ${pdir}
+
+ # Only some architectures with OF support have this target
+ if is_enabled CONFIG_OF_EARLY_FLATTREE && [ -d "${srctree}/arch/${SRCARCH}/boot/dts" ]; then
+ ${MAKE} -f ${srctree}/Makefile INSTALL_DTBS_PATH="${pdir}/usr/lib/linux-image-${KERNELRELEASE}" dtbs_install
+ fi
+
+ if is_enabled CONFIG_MODULES; then
+ ${MAKE} -f ${srctree}/Makefile INSTALL_MOD_PATH="${pdir}" modules_install
+ rm -f "${pdir}/lib/modules/${KERNELRELEASE}/build"
+ rm -f "${pdir}/lib/modules/${KERNELRELEASE}/source"
+ if [ "${SRCARCH}" = um ] ; then
+ mkdir -p "${pdir}/usr/lib/uml/modules"
+ mv "${pdir}/lib/modules/${KERNELRELEASE}" "${pdir}/usr/lib/uml/modules/${KERNELRELEASE}"
+ fi
+ fi
+
+ # Install the kernel
+ if [ "${ARCH}" = um ] ; then
+ mkdir -p "${pdir}/usr/bin" "${pdir}/usr/share/doc/${pname}"
+ cp System.map "${pdir}/usr/lib/uml/modules/${KERNELRELEASE}/System.map"
+ cp ${KCONFIG_CONFIG} "${pdir}/usr/share/doc/${pname}/config"
+ gzip "${pdir}/usr/share/doc/${pname}/config"
+ else
+ mkdir -p "${pdir}/boot"
+ cp System.map "${pdir}/boot/System.map-${KERNELRELEASE}"
+ cp ${KCONFIG_CONFIG} "${pdir}/boot/config-${KERNELRELEASE}"
+ fi
+
+ # Not all arches have the same installed path in debian
+ # XXX: have each arch Makefile export a variable of the canonical image install
+ # path instead
+ case "${SRCARCH}" in
+ um)
+ installed_image_path="usr/bin/linux-${KERNELRELEASE}";;
+ parisc|mips|powerpc)
+ installed_image_path="boot/vmlinux-${KERNELRELEASE}";;
+ *)
+ installed_image_path="boot/vmlinuz-${KERNELRELEASE}";;
+ esac
+ cp "$(${MAKE} -s -f ${srctree}/Makefile image_name)" "${pdir}/${installed_image_path}"
+
+ # Install the maintainer scripts
+ # Note: hook scripts under /etc/kernel are also executed by official Debian
+ # kernel packages, as well as kernel packages built using make-kpkg.
+ # make-kpkg sets $INITRD to indicate whether an initramfs is wanted, and
+ # so do we; recent versions of dracut and initramfs-tools will obey this.
+ debhookdir=${KDEB_HOOKDIR:-/etc/kernel}
+ for script in postinst postrm preinst prerm; do
+ mkdir -p "${pdir}${debhookdir}/${script}.d"
+
+ mkdir -p "${pdir}/DEBIAN"
+ cat <<-EOF > "${pdir}/DEBIAN/${script}"
+
+ #!/bin/sh
+
+ set -e
+
+ # Pass maintainer script parameters to hook scripts
+ export DEB_MAINT_PARAMS="\$*"
+
+ # Tell initramfs builder whether it's wanted
+ export INITRD=$(if_enabled_echo CONFIG_BLK_DEV_INITRD Yes No)
+
+ test -d ${debhookdir}/${script}.d && run-parts --arg="${KERNELRELEASE}" --arg="/${installed_image_path}" ${debhookdir}/${script}.d
+ exit 0
+ EOF
+ chmod 755 "${pdir}/DEBIAN/${script}"
+ done
+}
+
+install_linux_image_dbg () {
+ pdir=$1
+ image_pdir=$2
+
+ rm -rf ${pdir}
+
+ for module in $(find ${image_pdir}/lib/modules/ -name *.ko -printf '%P\n'); do
+ module=lib/modules/${module}
+ mkdir -p $(dirname ${pdir}/usr/lib/debug/${module})
+ # only keep debug symbols in the debug file
+ ${OBJCOPY} --only-keep-debug ${image_pdir}/${module} ${pdir}/usr/lib/debug/${module}
+ # strip original module from debug symbols
+ ${OBJCOPY} --strip-debug ${image_pdir}/${module}
+ # then add a link to those
+ ${OBJCOPY} --add-gnu-debuglink=${pdir}/usr/lib/debug/${module} ${image_pdir}/${module}
+ done
+
+ # re-sign stripped modules
+ if is_enabled CONFIG_MODULE_SIG_ALL; then
+ ${MAKE} -f ${srctree}/Makefile INSTALL_MOD_PATH="${image_pdir}" modules_sign
+ fi
+
+ # Build debug package
+ # Different tools want the image in different locations
+ # perf
+ mkdir -p ${pdir}/usr/lib/debug/lib/modules/${KERNELRELEASE}/
+ cp vmlinux ${pdir}/usr/lib/debug/lib/modules/${KERNELRELEASE}/
+ # systemtap
+ mkdir -p ${pdir}/usr/lib/debug/boot/
+ ln -s ../lib/modules/${KERNELRELEASE}/vmlinux ${pdir}/usr/lib/debug/boot/vmlinux-${KERNELRELEASE}
+ # kdump-tools
+ ln -s lib/modules/${KERNELRELEASE}/vmlinux ${pdir}/usr/lib/debug/vmlinux-${KERNELRELEASE}
+}
+
+install_kernel_headers () {
pdir=$1
rm -rf $pdir
ln -s /usr/src/linux-headers-$version $pdir/lib/modules/$version/build
}
-deploy_libc_headers () {
+install_libc_headers () {
pdir=$1
rm -rf $pdir
mv $pdir/usr/include/asm $pdir/usr/include/$host_arch/
}
-version=$KERNELRELEASE
-tmpdir=debian/linux-image
-dbg_dir=debian/linux-image-dbg
-packagename=linux-image-$version
-dbg_packagename=$packagename-dbg
-
-if [ "$ARCH" = "um" ] ; then
- packagename=user-mode-linux-$version
-fi
-
-# Not all arches have the same installed path in debian
-# XXX: have each arch Makefile export a variable of the canonical image install
-# path instead
-case $ARCH in
-um)
- installed_image_path="usr/bin/linux-$version"
- ;;
-parisc|mips|powerpc)
- installed_image_path="boot/vmlinux-$version"
- ;;
-*)
- installed_image_path="boot/vmlinuz-$version"
-esac
-
-BUILD_DEBUG=$(if_enabled_echo CONFIG_DEBUG_INFO Yes)
-
-# Setup the directory structure
-rm -rf "$tmpdir" "$dbg_dir" debian/files
-mkdir -m 755 -p "$tmpdir/DEBIAN"
-mkdir -p "$tmpdir/lib" "$tmpdir/boot"
-
-# Install the kernel
-if [ "$ARCH" = "um" ] ; then
- mkdir -p "$tmpdir/usr/lib/uml/modules/$version" "$tmpdir/usr/bin" "$tmpdir/usr/share/doc/$packagename"
- cp System.map "$tmpdir/usr/lib/uml/modules/$version/System.map"
- cp $KCONFIG_CONFIG "$tmpdir/usr/share/doc/$packagename/config"
- gzip "$tmpdir/usr/share/doc/$packagename/config"
-else
- cp System.map "$tmpdir/boot/System.map-$version"
- cp $KCONFIG_CONFIG "$tmpdir/boot/config-$version"
-fi
-cp "$($MAKE -s -f $srctree/Makefile image_name)" "$tmpdir/$installed_image_path"
-
-if is_enabled CONFIG_OF_EARLY_FLATTREE; then
- # Only some architectures with OF support have this target
- if [ -d "${srctree}/arch/$SRCARCH/boot/dts" ]; then
- $MAKE -f $srctree/Makefile INSTALL_DTBS_PATH="$tmpdir/usr/lib/$packagename" dtbs_install
- fi
-fi
-
-if is_enabled CONFIG_MODULES; then
- INSTALL_MOD_PATH="$tmpdir" $MAKE -f $srctree/Makefile modules_install
- rm -f "$tmpdir/lib/modules/$version/build"
- rm -f "$tmpdir/lib/modules/$version/source"
- if [ "$ARCH" = "um" ] ; then
- mv "$tmpdir/lib/modules/$version"/* "$tmpdir/usr/lib/uml/modules/$version/"
- rmdir "$tmpdir/lib/modules/$version"
- fi
- if [ -n "$BUILD_DEBUG" ] ; then
- for module in $(find $tmpdir/lib/modules/ -name *.ko -printf '%P\n'); do
- module=lib/modules/$module
- mkdir -p $(dirname $dbg_dir/usr/lib/debug/$module)
- # only keep debug symbols in the debug file
- $OBJCOPY --only-keep-debug $tmpdir/$module $dbg_dir/usr/lib/debug/$module
- # strip original module from debug symbols
- $OBJCOPY --strip-debug $tmpdir/$module
- # then add a link to those
- $OBJCOPY --add-gnu-debuglink=$dbg_dir/usr/lib/debug/$module $tmpdir/$module
- done
-
- # resign stripped modules
- if is_enabled CONFIG_MODULE_SIG_ALL; then
- INSTALL_MOD_PATH="$tmpdir" $MAKE -f $srctree/Makefile modules_sign
- fi
- fi
-fi
-
-# Install the maintainer scripts
-# Note: hook scripts under /etc/kernel are also executed by official Debian
-# kernel packages, as well as kernel packages built using make-kpkg.
-# make-kpkg sets $INITRD to indicate whether an initramfs is wanted, and
-# so do we; recent versions of dracut and initramfs-tools will obey this.
-debhookdir=${KDEB_HOOKDIR:-/etc/kernel}
-for script in postinst postrm preinst prerm ; do
- mkdir -p "$tmpdir$debhookdir/$script.d"
- cat <<EOF > "$tmpdir/DEBIAN/$script"
-#!/bin/sh
-
-set -e
-
-# Pass maintainer script parameters to hook scripts
-export DEB_MAINT_PARAMS="\$*"
-
-# Tell initramfs builder whether it's wanted
-export INITRD=$(if_enabled_echo CONFIG_BLK_DEV_INITRD Yes No)
-
-test -d $debhookdir/$script.d && run-parts --arg="$version" --arg="/$installed_image_path" $debhookdir/$script.d
-exit 0
-EOF
- chmod 755 "$tmpdir/DEBIAN/$script"
+rm -f debian/files
+
+packages_enabled=$(dh_listpackages)
+
+for package in ${packages_enabled}
+do
+ case ${package} in
+ *-dbg)
+ # This must be done after linux-image, that is, we expect the
+ # debug package appears after linux-image in debian/control.
+ install_linux_image_dbg debian/linux-image-dbg debian/linux-image;;
+ linux-image-*|user-mode-linux-*)
+ install_linux_image debian/linux-image ${package};;
+ linux-libc-dev)
+ install_libc_headers debian/linux-libc-dev;;
+ linux-headers-*)
+ install_kernel_headers debian/linux-headers;;
+ esac
done
-if [ "$ARCH" != "um" ]; then
- if is_enabled CONFIG_MODULES; then
- deploy_kernel_headers debian/linux-headers
- create_package linux-headers-$version debian/linux-headers
- fi
-
- deploy_libc_headers debian/linux-libc-dev
- create_package linux-libc-dev debian/linux-libc-dev
-fi
-
-create_package "$packagename" "$tmpdir"
-
-if [ -n "$BUILD_DEBUG" ] ; then
- # Build debug package
- # Different tools want the image in different locations
- # perf
- mkdir -p $dbg_dir/usr/lib/debug/lib/modules/$version/
- cp vmlinux $dbg_dir/usr/lib/debug/lib/modules/$version/
- # systemtap
- mkdir -p $dbg_dir/usr/lib/debug/boot/
- ln -s ../lib/modules/$version/vmlinux $dbg_dir/usr/lib/debug/boot/vmlinux-$version
- # kdump-tools
- ln -s lib/modules/$version/vmlinux $dbg_dir/usr/lib/debug/vmlinux-$version
- create_package "$dbg_packagename" "$dbg_dir"
-fi
+for package in ${packages_enabled}
+do
+ case ${package} in
+ *-dbg)
+ create_package ${package} debian/linux-image-dbg;;
+ linux-image-*|user-mode-linux-*)
+ create_package ${package} debian/linux-image;;
+ linux-libc-dev)
+ create_package ${package} debian/linux-libc-dev;;
+ linux-headers-*)
+ create_package ${package} debian/linux-headers;;
+ esac
+done
exit 0
#!/bin/sh
# SPDX-License-Identifier: GPL-2.0-only
-# Set up CROSS_COMPILE if we are cross-compiling, but not called from the
-# kernel toplevel Makefile
-if [ -z "${CROSS_COMPILE}${cross_compiling}" -a "${DEB_HOST_ARCH}" != "${DEB_BUILD_ARCH}" ]; then
+# Set up CROSS_COMPILE if not defined yet
+if [ "${CROSS_COMPILE+set}" != "set" -a "${DEB_HOST_ARCH}" != "${DEB_BUILD_ARCH}" ]; then
echo CROSS_COMPILE=${DEB_HOST_GNU_TYPE}-
fi
version=$(dpkg-parsechangelog -S Version)
-version_upstream="${version%-*}"
-debian_revision="${version#${version_upstream}}"
-debian_revision="${debian_revision#*-}"
+debian_revision="${version##*-}"
-echo KERNELRELEASE=${version_upstream}
-echo KBUILD_BUILD_VERSION=${debian_revision}
+if [ "${version}" != "${debian_revision}" ]; then
+ echo KBUILD_BUILD_VERSION=${debian_revision}
+fi
--- /dev/null
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0-only
+
+diff_patch="${1}"
+untracked_patch="${2}"
+srctree=$(dirname $0)/../..
+
+rm -f ${diff_patch} ${untracked_patch}
+
+if ! ${srctree}/scripts/check-git; then
+ exit
+fi
+
+mkdir -p "$(dirname ${diff_patch})" "$(dirname ${untracked_patch})"
+
+git -C "${srctree}" diff HEAD > "${diff_patch}"
+
+if [ ! -s "${diff_patch}" ]; then
+ rm -f "${diff_patch}"
+ exit
+fi
+
+git -C ${srctree} status --porcelain --untracked-files=all |
+while read stat path
+do
+ if [ "${stat}" = '??' ]; then
+
+ if ! diff -u /dev/null "${srctree}/${path}" > .tmp_diff &&
+ ! head -n1 .tmp_diff | grep -q "Binary files"; then
+ {
+ echo "--- /dev/null"
+ echo "+++ linux/$path"
+ cat .tmp_diff | tail -n +3
+ } >> ${untracked_patch}
+ fi
+ fi
+done
+
+rm -f .tmp_diff
+
+if [ ! -s "${diff_patch}" ]; then
+ rm -f "${diff_patch}"
+ exit
+fi
if [ -n "$KDEB_PKGVERSION" ]; then
packageversion=$KDEB_PKGVERSION
else
- packageversion=$version-$($srctree/init/build-version)
+ packageversion=$(${srctree}/scripts/setlocalversion --no-local ${srctree})-$($srctree/init/build-version)
fi
sourcename=${KDEB_SOURCENAME:-linux-upstream}
} > debian/patches/config
echo config > debian/patches/series
+$(dirname $0)/gen-diff-patch debian/patches/diff.patch debian/patches/untracked.patch
+if [ -f debian/patches/diff.patch ]; then
+ echo diff.patch >> debian/patches/series
+fi
+if [ -f debian/patches/untracked.patch ]; then
+ echo untracked.patch >> debian/patches/series
+fi
+
echo $debarch > debian/arch
extra_build_depends=", $(if_enabled_echo CONFIG_UNWINDER_ORC libelf-dev:native)"
extra_build_depends="$extra_build_depends, $(if_enabled_echo CONFIG_SYSTEM_TRUSTED_KEYRING libssl-dev:native)"
Priority: optional
Maintainer: $maintainer
Rules-Requires-Root: no
-Build-Depends: bc, rsync, kmod, cpio, bison, flex $extra_build_depends
+Build-Depends: bc, debhelper, rsync, kmod, cpio, bison, flex $extra_build_depends
Homepage: https://www.kernel.org/
Package: $packagename-$version
Description: Linux kernel, version $version
This package contains the Linux kernel, modules and corresponding other
files, version: $version.
+EOF
+
+if [ "${SRCARCH}" != um ]; then
+cat <<EOF >> debian/control
Package: linux-libc-dev
Section: devel
This is useful for people who need to build external modules
EOF
fi
+fi
if is_enabled CONFIG_DEBUG_INFO; then
cat <<EOF >> debian/control
#!$(command -v $MAKE) -f
srctree ?= .
+KERNELRELEASE = ${KERNELRELEASE}
build-indep:
build-arch:
\$(MAKE) -f \$(srctree)/Makefile ARCH=${ARCH} \
+ KERNELRELEASE=\$(KERNELRELEASE) \
\$(shell \$(srctree)/scripts/package/deb-build-option) \
olddefconfig all
binary-indep:
binary-arch: build-arch
- \$(MAKE) -f \$(srctree)/Makefile ARCH=${ARCH} intdeb-pkg
+ \$(MAKE) -f \$(srctree)/Makefile ARCH=${ARCH} \
+ KERNELRELEASE=\$(KERNELRELEASE) intdeb-pkg
+
clean:
rm -rf debian/files debian/linux-*
\$(MAKE) -f \$(srctree)/Makefile ARCH=${ARCH} clean
MAKE="$MAKE -f $srctree/Makefile"
else
S=
+
+ mkdir -p rpmbuild/SOURCES
+ cp linux.tar.gz rpmbuild/SOURCES
+ cp "${KCONFIG_CONFIG}" rpmbuild/SOURCES/config
+ $(dirname $0)/gen-diff-patch rpmbuild/SOURCES/diff.patch rpmbuild/SOURCES/untracked.patch
+ touch rpmbuild/SOURCES/diff.patch rpmbuild/SOURCES/untracked.patch
fi
-if grep -q CONFIG_MODULES=y .config; then
+if grep -q CONFIG_MODULES=y include/config/auto.conf; then
M=
else
M=DEL
fi
-if grep -q CONFIG_DRM=y .config; then
+if grep -q CONFIG_DRM=y include/config/auto.conf; then
PROVIDES=kernel-drm
fi
Vendor: The Linux Community
URL: https://www.kernel.org
$S Source0: linux.tar.gz
-$S Source1: .config
+$S Source1: config
+$S Source2: diff.patch
+$S Source3: untracked.patch
Provides: $PROVIDES
$S BuildRequires: bc binutils bison dwarves
$S BuildRequires: (elfutils-libelf-devel or libelf-devel) flex
$S$M
$S %prep
$S %setup -q -n linux
-$S cp %{SOURCE1} .
+$S cp %{SOURCE1} .config
+$S if [ -s %{SOURCE2} ]; then
+$S patch -p1 < %{SOURCE2}
+$S fi
+$S if [ -s %{SOURCE3} ]; then
+$S patch -p1 < %{SOURCE3}
+$S fi
$S
$S %build
$S $MAKE %{?_smp_mflags} KERNELRELEASE=$KERNELRELEASE KBUILD_BUILD_VERSION=%{release}
#
usage() {
- echo "Usage: $0 [srctree]" >&2
+ echo "Usage: $0 [--no-local] [srctree]" >&2
exit 1
}
+no_local=false
+if test "$1" = "--no-local"; then
+ no_local=true
+ shift
+fi
+
srctree=.
if test $# -gt 0; then
srctree=$1
scm_version()
{
- local short
+ local short=false
+ local no_dirty=false
local tag
- short=false
+
+ while [ $# -gt 0 ];
+ do
+ case "$1" in
+ --short)
+ short=true;;
+ --no-dirty)
+ no_dirty=true;;
+ esac
+ shift
+ done
cd "$srctree"
- if test "$1" = "--short"; then
- short=true
- fi
if test -n "$(git rev-parse --show-cdup 2>/dev/null)"; then
return
printf '%s%s' -g "$(echo $head | cut -c1-12)"
fi
+ if ${no_dirty}; then
+ return
+ fi
+
# Check for uncommitted changes.
# This script must avoid any write attempt to the source tree, which
# might be read-only.
echo "$res"
}
-if ! test -e include/config/auto.conf; then
- echo "Error: kernelrelease not valid - run 'make prepare' to update it" >&2
- exit 1
-fi
-
if [ -z "${KERNELVERSION}" ]; then
echo "KERNELVERSION is not set" >&2
exit 1
file_localversion="${file_localversion}$(collect_files "$srctree"/localversion*)"
fi
+if ${no_local}; then
+ echo "${KERNELVERSION}$(scm_version --no-dirty)"
+ exit 0
+fi
+
+if ! test -e include/config/auto.conf; then
+ echo "Error: kernelrelease not valid - run 'make prepare' to update it" >&2
+ exit 1
+fi
+
# version string from CONFIG_LOCALVERSION
config_localversion=$(sed -n 's/^CONFIG_LOCALVERSION=\(.*\)$/\1/p' include/config/auto.conf)
#ifdef CONFIG_KEYS_REQUEST_CACHE
struct task_struct *t = current;
- key_put(t->cached_requested_key);
- t->cached_requested_key = key_get(key);
- set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
+ /* Do not cache key if it is a kernel thread */
+ if (!(t->flags & PF_KTHREAD)) {
+ key_put(t->cached_requested_key);
+ t->cached_requested_key = key_get(key);
+ set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
+ }
#endif
}
},
#endif
+/* Meteor Lake */
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_METEORLAKE)
+ /* Meteorlake-P */
+ {
+ .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
+ .device = 0x7e28,
+ },
+#endif
+
};
static const struct config_entry *snd_intel_dsp_find_config
pao = hpi_find_adapter(phm->adapter_index);
} else {
/* subsys messages don't address an adapter */
- _HPI_6205(NULL, phm, phr);
+ phr->error = HPI_ERROR_INVALID_OBJ_INDEX;
return;
}
#define needs_eld_notify_link(chip) false
#endif
-#define CONTROLLER_IN_GPU(pci) (((pci)->device == 0x0a0c) || \
+#define CONTROLLER_IN_GPU(pci) (((pci)->vendor == 0x8086) && \
+ (((pci)->device == 0x0a0c) || \
((pci)->device == 0x0c0c) || \
((pci)->device == 0x0d0c) || \
((pci)->device == 0x160c) || \
((pci)->device == 0x490d) || \
((pci)->device == 0x4f90) || \
((pci)->device == 0x4f91) || \
- ((pci)->device == 0x4f92))
+ ((pci)->device == 0x4f92)))
#define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98)
for (i = 0; i < TUNING_CTLS_COUNT; i++)
if (nid == ca0132_tuning_ctls[i].nid)
- break;
+ goto found;
+ return -EINVAL;
+found:
snd_hda_power_up(codec);
dspio_set_param(codec, ca0132_tuning_ctls[i].mid, 0x20,
ca0132_tuning_ctls[i].req,
SND_PCI_QUIRK(0x103c, 0x8b8a, "HP", ALC236_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8b8b, "HP", ALC236_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8b8d, "HP", ALC236_FIXUP_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8b8f, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8b92, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8bf0, "HP", ALC236_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
SND_PCI_QUIRK(0x144d, 0xc830, "Samsung Galaxy Book Ion (NT950XCJ-X716A)", ALC298_FIXUP_SAMSUNG_AMP),
SND_PCI_QUIRK(0x144d, 0xc832, "Samsung Galaxy Book Flex Alpha (NP730QCJ)", ALC256_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
SND_PCI_QUIRK(0x144d, 0xca03, "Samsung Galaxy Book2 Pro 360 (NP930QED)", ALC298_FIXUP_SAMSUNG_AMP),
+ SND_PCI_QUIRK(0x144d, 0xc868, "Samsung Galaxy Book2 Pro (NP930XED)", ALC298_FIXUP_SAMSUNG_AMP),
SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_HEADSET_MIC),
SND_PCI_QUIRK(0x1462, 0xb120, "MSI Cubi MS-B120", ALC283_FIXUP_HEADSET_MIC),
SND_PCI_QUIRK(0x1462, 0xb171, "Cubi N 8GL (MS-B171)", ALC283_FIXUP_HEADSET_MIC),
INIT_WORK(&da7219_aad->hptest_work, da7219_aad_hptest_work);
INIT_WORK(&da7219_aad->jack_det_work, da7219_aad_jack_det_work);
+ mutex_init(&da7219_aad->jack_det_mutex);
+
ret = request_threaded_irq(da7219_aad->irq, da7219_aad_pre_irq_thread,
da7219_aad_irq_thread,
IRQF_TRIGGER_LOW | IRQF_ONESHOT,
{
struct hdmi_codec_priv *hcp = snd_soc_dai_get_drvdata(dai);
bool tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
+ bool has_capture = !hcp->hcd.no_i2s_capture;
+ bool has_playback = !hcp->hcd.no_i2s_playback;
int ret = 0;
+ if (!((has_playback && tx) || (has_capture && !tx)))
+ return 0;
+
mutex_lock(&hcp->lock);
if (hcp->busy) {
dev_err(dai->dev, "Only one simultaneous stream supported!\n");
struct snd_soc_dai *dai)
{
struct hdmi_codec_priv *hcp = snd_soc_dai_get_drvdata(dai);
+ bool tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
+ bool has_capture = !hcp->hcd.no_i2s_capture;
+ bool has_playback = !hcp->hcd.no_i2s_playback;
+
+ if (!((has_playback && tx) || (has_capture && !tx)))
+ return;
hcp->chmap_idx = HDMI_CODEC_CHMAP_IDX_UNKNOWN;
hcp->hcd.ops->audio_shutdown(dai->dev->parent, hcp->hcd.data);
struct tx_mute_work {
struct tx_macro *tx;
- u32 decimator;
+ u8 decimator;
struct delayed_work dwork;
};
return 0;
}
-static bool is_amic_enabled(struct snd_soc_component *component, int decimator)
+static bool is_amic_enabled(struct snd_soc_component *component, u8 decimator)
{
u16 adc_mux_reg, adc_reg, adc_n;
struct snd_kcontrol *kcontrol, int event)
{
struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm);
- unsigned int decimator;
+ u8 decimator;
u16 tx_vol_ctl_reg, dec_cfg_reg, hpf_gate_reg, tx_gain_ctl_reg;
u8 hpf_cut_off_freq;
int hpf_delay = TX_MACRO_DMIC_HPF_DELAY_MS;
struct snd_soc_dai *dai)
{
struct snd_soc_component *component = dai->component;
- u32 decimator, sample_rate;
+ u32 sample_rate;
+ u8 decimator;
int tx_fs_rate;
struct tx_macro *tx = snd_soc_component_get_drvdata(component);
{
struct snd_soc_component *component = dai->component;
struct tx_macro *tx = snd_soc_component_get_drvdata(component);
- u16 decimator;
+ u8 decimator;
/* active decimator not set yet */
if (tx->active_decimator[dai->id] == -1)
Say Y if you want to add support for SoC audio on an i.MX board with
a sgtl5000 codec.
+ Note that this is an old driver. Consider enabling
+ SND_SOC_FSL_ASOC_CARD and SND_SOC_SGTL5000 to use the newer
+ driver.
+
config SND_SOC_IMX_SPDIF
tristate "SoC Audio support for i.MX boards with S/PDIF"
select SND_SOC_IMX_PCM_DMA
snd_soc_component_set_jack(asoc_rtd_to_codec(rtd, 0)->component, NULL, NULL);
}
+static int
+avs_da7219_be_fixup(struct snd_soc_pcm_runtime *runrime, struct snd_pcm_hw_params *params)
+{
+ struct snd_interval *rate, *channels;
+ struct snd_mask *fmt;
+
+ rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
+ channels = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS);
+ fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
+
+ /* The ADSP will convert the FE rate to 48k, stereo */
+ rate->min = rate->max = 48000;
+ channels->min = channels->max = 2;
+
+ /* set SSP0 to 24 bit */
+ snd_mask_none(fmt);
+ snd_mask_set_format(fmt, SNDRV_PCM_FORMAT_S24_LE);
+ return 0;
+}
+
static int avs_create_dai_link(struct device *dev, const char *platform_name, int ssp_port,
struct snd_soc_dai_link **dai_link)
{
dl->num_platforms = 1;
dl->id = 0;
dl->dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS;
+ dl->be_hw_params_fixup = avs_da7219_be_fixup;
dl->init = avs_da7219_codec_init;
dl->exit = avs_da7219_codec_exit;
dl->nonatomic = 1;
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <sound/pcm_params.h>
#include <sound/soc.h>
#include <sound/soc-acpi.h>
#include <sound/soc-dapm.h>
{ "Spk", NULL, "Speaker" },
};
+static int
+avs_max98357a_be_fixup(struct snd_soc_pcm_runtime *runrime, struct snd_pcm_hw_params *params)
+{
+ struct snd_interval *rate, *channels;
+ struct snd_mask *fmt;
+
+ rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
+ channels = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS);
+ fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
+
+ /* The ADSP will convert the FE rate to 48k, stereo */
+ rate->min = rate->max = 48000;
+ channels->min = channels->max = 2;
+
+ /* set SSP0 to 16 bit */
+ snd_mask_none(fmt);
+ snd_mask_set_format(fmt, SNDRV_PCM_FORMAT_S16_LE);
+ return 0;
+}
+
static int avs_create_dai_link(struct device *dev, const char *platform_name, int ssp_port,
struct snd_soc_dai_link **dai_link)
{
dl->num_platforms = 1;
dl->id = 0;
dl->dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS;
+ dl->be_hw_params_fixup = avs_max98357a_be_fixup;
dl->nonatomic = 1;
dl->no_pcm = 1;
dl->dpcm_playback = 1;
return -EINVAL;
}
- if (!SND_SOC_DAPM_EVENT_ON(event)) {
+ if (SND_SOC_DAPM_EVENT_ON(event))
+ ret = snd_soc_dai_set_sysclk(codec_dai, NAU8825_CLK_MCLK, 24000000,
+ SND_SOC_CLOCK_IN);
+ else
ret = snd_soc_dai_set_sysclk(codec_dai, NAU8825_CLK_INTERNAL, 0, SND_SOC_CLOCK_IN);
- if (ret < 0) {
- dev_err(card->dev, "set sysclk err = %d\n", ret);
- return ret;
- }
- }
+ if (ret < 0)
+ dev_err(card->dev, "Set sysclk failed: %d\n", ret);
- return 0;
+ return ret;
}
static const struct snd_kcontrol_new card_controls[] = {
.hw_params = avs_rt5682_hw_params,
};
+static int
+avs_rt5682_be_fixup(struct snd_soc_pcm_runtime *runtime, struct snd_pcm_hw_params *params)
+{
+ struct snd_interval *rate, *channels;
+ struct snd_mask *fmt;
+
+ rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
+ channels = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS);
+ fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
+
+ /* The ADSP will convert the FE rate to 48k, stereo */
+ rate->min = rate->max = 48000;
+ channels->min = channels->max = 2;
+
+ /* set SSPN to 24 bit */
+ snd_mask_none(fmt);
+ snd_mask_set_format(fmt, SNDRV_PCM_FORMAT_S24_LE);
+
+ return 0;
+}
+
static int avs_create_dai_link(struct device *dev, const char *platform_name, int ssp_port,
struct snd_soc_dai_link **dai_link)
{
dl->id = 0;
dl->init = avs_rt5682_codec_init;
dl->exit = avs_rt5682_codec_exit;
+ dl->be_hw_params_fixup = avs_rt5682_be_fixup;
dl->ops = &avs_rt5682_ops;
dl->nonatomic = 1;
dl->no_pcm = 1;
#include <sound/soc-acpi.h>
#include "../../../codecs/nau8825.h"
-#define SKL_NUVOTON_CODEC_DAI "nau8825-hifi"
#define SKL_SSM_CODEC_DAI "ssm4567-hifi"
static struct snd_soc_codec_conf card_codec_conf[] = {
SOC_DAPM_PIN_SWITCH("Right Speaker"),
};
-static int
-platform_clock_control(struct snd_soc_dapm_widget *w, struct snd_kcontrol *control, int event)
-{
- struct snd_soc_dapm_context *dapm = w->dapm;
- struct snd_soc_card *card = dapm->card;
- struct snd_soc_dai *codec_dai;
- int ret;
-
- codec_dai = snd_soc_card_get_codec_dai(card, SKL_NUVOTON_CODEC_DAI);
- if (!codec_dai) {
- dev_err(card->dev, "Codec dai not found\n");
- return -EINVAL;
- }
-
- if (SND_SOC_DAPM_EVENT_ON(event)) {
- ret = snd_soc_dai_set_sysclk(codec_dai, NAU8825_CLK_MCLK, 24000000,
- SND_SOC_CLOCK_IN);
- if (ret < 0)
- dev_err(card->dev, "set sysclk err = %d\n", ret);
- } else {
- ret = snd_soc_dai_set_sysclk(codec_dai, NAU8825_CLK_INTERNAL, 0, SND_SOC_CLOCK_IN);
- if (ret < 0)
- dev_err(card->dev, "set sysclk err = %d\n", ret);
- }
-
- return ret;
-}
-
static const struct snd_soc_dapm_widget card_widgets[] = {
SND_SOC_DAPM_SPK("Left Speaker", NULL),
SND_SOC_DAPM_SPK("Right Speaker", NULL),
SND_SOC_DAPM_SPK("DP1", NULL),
SND_SOC_DAPM_SPK("DP2", NULL),
- SND_SOC_DAPM_SUPPLY("Platform Clock", SND_SOC_NOPM, 0, 0, platform_clock_control,
- SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
};
static const struct snd_soc_dapm_route card_base_routes[] = {
{
.comp_ids = &essx_83x6,
.drv_name = "sof-essx8336",
- .sof_tplg_filename = "sof-adl-es83x6", /* the tplg suffix is added at run time */
+ .sof_tplg_filename = "sof-adl-es8336", /* the tplg suffix is added at run time */
.tplg_quirk_mask = SND_SOC_ACPI_TPLG_INTEL_SSP_NUMBER |
SND_SOC_ACPI_TPLG_INTEL_SSP_MSB |
SND_SOC_ACPI_TPLG_INTEL_DMIC_NUMBER,
unsigned int freq)
{
if (freq)
- return q6prm_request_lpass_clock(dev, clk_id, clk_attr, clk_attr, freq);
+ return q6prm_request_lpass_clock(dev, clk_id, clk_attr, clk_root, freq);
- return q6prm_release_lpass_clock(dev, clk_id, clk_attr, clk_attr, freq);
+ return q6prm_release_lpass_clock(dev, clk_id, clk_attr, clk_root, freq);
}
EXPORT_SYMBOL_GPL(q6prm_set_lpass_clock);
goto err;
}
+ usleep_range(500, 1000);
+
/* exit HDA controller reset */
ret = hda_dsp_ctrl_link_reset(sdev, false);
if (ret < 0) {
dev_err(sdev->dev, "error: failed to exit HDA controller reset\n");
goto err;
}
+ usleep_range(1000, 1200);
hda_codec_detect_mask(sdev);
snd_sof_dsp_update8(sdev, HDA_DSP_HDA_BAR, chip->d0i3_offset,
SOF_HDA_VS_D0I3C_I3, value);
+ /*
+ * The value written to the D0I3C::I3 bit may not be taken into account immediately.
+ * A delay is recommended before checking if D0I3C::CIP is cleared
+ */
+ usleep_range(30, 40);
+
/* Wait for cmd in progress to be cleared before exiting the function */
ret = hda_dsp_wait_d0i3c_done(sdev);
if (ret < 0) {
}
reg = snd_sof_dsp_read8(sdev, HDA_DSP_HDA_BAR, chip->d0i3_offset);
+ /* Confirm d0i3 state changed with paranoia check */
+ if ((reg ^ value) & SOF_HDA_VS_D0I3C_I3) {
+ dev_err(sdev->dev, "failed to update D0I3C!\n");
+ return -EIO;
+ }
+
trace_sof_intel_D0I3C_updated(sdev, reg);
return 0;
.nocodec_tplg_filename = "sof-glk-nocodec.tplg",
.ops = &sof_apl_ops,
.ops_init = sof_apl_ops_init,
+ .ops_free = hda_ops_free,
};
/* PCI IDs */
.nocodec_tplg_filename = "sof-cnl-nocodec.tplg",
.ops = &sof_cnl_ops,
.ops_init = sof_cnl_ops_init,
+ .ops_free = hda_ops_free,
};
static const struct sof_dev_desc cfl_desc = {
.nocodec_tplg_filename = "sof-cnl-nocodec.tplg",
.ops = &sof_cnl_ops,
.ops_init = sof_cnl_ops_init,
+ .ops_free = hda_ops_free,
};
/* PCI IDs */
.nocodec_tplg_filename = "sof-jsl-nocodec.tplg",
.ops = &sof_cnl_ops,
.ops_init = sof_cnl_ops_init,
+ .ops_free = hda_ops_free,
};
/* PCI IDs */
.nocodec_tplg_filename = "sof-mtl-nocodec.tplg",
.ops = &sof_mtl_ops,
.ops_init = sof_mtl_ops_init,
+ .ops_free = hda_ops_free,
};
/* PCI IDs */
.nocodec_tplg_filename = "sof-skl-nocodec.tplg",
.ops = &sof_skl_ops,
.ops_init = sof_skl_ops_init,
+ .ops_free = hda_ops_free,
};
static struct sof_dev_desc kbl_desc = {
.nocodec_tplg_filename = "sof-kbl-nocodec.tplg",
.ops = &sof_skl_ops,
.ops_init = sof_skl_ops_init,
+ .ops_free = hda_ops_free,
};
/* PCI IDs */
.nocodec_tplg_filename = "sof-tgl-nocodec.tplg",
.ops = &sof_tgl_ops,
.ops_init = sof_tgl_ops_init,
+ .ops_free = hda_ops_free,
};
static const struct sof_dev_desc tglh_desc = {
.nocodec_tplg_filename = "sof-ehl-nocodec.tplg",
.ops = &sof_tgl_ops,
.ops_init = sof_tgl_ops_init,
+ .ops_free = hda_ops_free,
};
static const struct sof_dev_desc adls_desc = {
.nocodec_tplg_filename = "sof-adl-nocodec.tplg",
.ops = &sof_tgl_ops,
.ops_init = sof_tgl_ops_init,
+ .ops_free = hda_ops_free,
};
static const struct sof_dev_desc adl_desc = {
.nocodec_tplg_filename = "sof-adl-nocodec.tplg",
.ops = &sof_tgl_ops,
.ops_init = sof_tgl_ops_init,
+ .ops_free = hda_ops_free,
};
static const struct sof_dev_desc adl_n_desc = {
.nocodec_tplg_filename = "sof-adl-nocodec.tplg",
.ops = &sof_tgl_ops,
.ops_init = sof_tgl_ops_init,
+ .ops_free = hda_ops_free,
};
static const struct sof_dev_desc rpls_desc = {
.nocodec_tplg_filename = "sof-rpl-nocodec.tplg",
.ops = &sof_tgl_ops,
.ops_init = sof_tgl_ops_init,
+ .ops_free = hda_ops_free,
};
static const struct sof_dev_desc rpl_desc = {
.nocodec_tplg_filename = "sof-rpl-nocodec.tplg",
.ops = &sof_tgl_ops,
.ops_init = sof_tgl_ops_init,
+ .ops_free = hda_ops_free,
};
/* PCI IDs */
/* LPE base */
base = pci_resource_start(pci, desc->resindex_lpe_base) - IRAM_OFFSET;
- size = pci_resource_len(pci, desc->resindex_lpe_base);
- if (size < PCI_BAR_SIZE) {
- dev_err(sdev->dev, "error: I/O region is too small.\n");
- return -ENODEV;
- }
+ size = PCI_BAR_SIZE;
dev_dbg(sdev->dev, "LPE PHY base at 0x%x size 0x%x", base, size);
sdev->bar[DSP_BAR] = devm_ioremap(sdev->dev, base, size);
break;
case SOF_DAI_INTEL_ALH:
if (data) {
- config->dai_index = data->dai_index;
+ /* save the dai_index during hw_params and reuse it for hw_free */
+ if (flags & SOF_DAI_CONFIG_FLAGS_HW_PARAMS)
+ config->dai_index = data->dai_index;
config->alh.stream_id = data->dai_data;
}
break;
break;
}
- config->flags = flags;
+ /*
+ * The dai_config op is invoked several times and the flags argument varies as below:
+ * BE DAI hw_params: When the op is invoked during the BE DAI hw_params, flags contains
+ * SOF_DAI_CONFIG_FLAGS_HW_PARAMS along with quirks
+ * FE DAI hw_params: When invoked during FE DAI hw_params after the DAI widget has
+ * just been set up in the DSP, flags is set to SOF_DAI_CONFIG_FLAGS_HW_PARAMS with no
+ * quirks
+ * BE DAI trigger: When invoked during the BE DAI trigger, flags is set to
+ * SOF_DAI_CONFIG_FLAGS_PAUSE and contains no quirks
+ * BE DAI hw_free: When invoked during the BE DAI hw_free, flags is set to
+ * SOF_DAI_CONFIG_FLAGS_HW_FREE and contains no quirks
+ * FE DAI hw_free: When invoked during the FE DAI hw_free, flags is set to
+ * SOF_DAI_CONFIG_FLAGS_HW_FREE and contains no quirks
+ *
+ * The DAI_CONFIG IPC is sent to the DSP, only after the widget is set up during the FE
+ * DAI hw_params. But since the BE DAI hw_params precedes the FE DAI hw_params, the quirks
+ * need to be preserved when assigning the flags before sending the IPC.
+ * For the case of PAUSE/HW_FREE, since there are no quirks, flags can be used as is.
+ */
+
+ if (flags & SOF_DAI_CONFIG_FLAGS_HW_PARAMS)
+ config->flags |= flags;
+ else
+ config->flags = flags;
/* only send the IPC if the widget is set up in the DSP */
if (swidget->use_count > 0) {
&reply, sizeof(reply));
if (ret < 0)
dev_err(sdev->dev, "Failed to set dai config for %s\n", dai->name);
+
+ /* clear the flags once the IPC has been sent even if it fails */
+ config->flags = SOF_DAI_CONFIG_FLAGS_NONE;
}
return ret;
return;
}
- if (hdr.size < sizeof(hdr)) {
- dev_err(sdev->dev, "The received message size is invalid\n");
+ if (hdr.size < sizeof(hdr) || hdr.size > SOF_IPC_MSG_MAX_SIZE) {
+ dev_err(sdev->dev, "The received message size is invalid: %u\n",
+ hdr.size);
return;
}
}
/* set curve type and duration from topology */
- data.curve_duration = gain->data.curve_duration;
+ data.curve_duration_l = gain->data.curve_duration_l;
+ data.curve_duration_h = gain->data.curve_duration_h;
data.curve_type = gain->data.curve_type;
msg->data_ptr = &data;
get_token_u32, offsetof(struct sof_ipc4_gain_data, curve_type)},
{SOF_TKN_GAIN_RAMP_DURATION,
SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
- offsetof(struct sof_ipc4_gain_data, curve_duration)},
+ offsetof(struct sof_ipc4_gain_data, curve_duration_l)},
{SOF_TKN_GAIN_VAL, SND_SOC_TPLG_TUPLE_TYPE_WORD,
get_token_u32, offsetof(struct sof_ipc4_gain_data, init_val)},
};
for (i = 0; i < num_format; i++, ptr = (u8 *)ptr + object_size) {
fmt = ptr;
dev_dbg(dev,
- " #%d: %uKHz, %ubit (ch_map %#x ch_cfg %u interleaving_style %u fmt_cfg %#x)\n",
+ " #%d: %uHz, %ubit (ch_map %#x ch_cfg %u interleaving_style %u fmt_cfg %#x)\n",
i, fmt->sampling_frequency, fmt->bit_depth, fmt->ch_map,
fmt->ch_cfg, fmt->interleaving_style, fmt->fmt_cfg);
}
dev_dbg(scomp->dev,
"pga widget %s: ramp type: %d, ramp duration %d, initial gain value: %#x, cpc %d\n",
- swidget->widget->name, gain->data.curve_type, gain->data.curve_duration,
+ swidget->widget->name, gain->data.curve_type, gain->data.curve_duration_l,
gain->data.init_val, gain->base_config.cpc);
ret = sof_ipc4_widget_setup_msg(swidget, &gain->msg);
ipc4_copier = dai->private;
if (ipc4_copier->dai_type == SOF_DAI_INTEL_ALH) {
+ struct sof_ipc4_copier_data *copier_data = &ipc4_copier->data;
struct sof_ipc4_alh_configuration_blob *blob;
unsigned int group_id;
ALH_MULTI_GTW_BASE;
ida_free(&alh_group_ida, group_id);
}
+
+ /* clear the node ID */
+ copier_data->gtw_cfg.node_id &= ~SOF_IPC4_NODE_INDEX_MASK;
}
}
pipeline->skip_during_fe_trigger = true;
fallthrough;
case SOF_DAI_INTEL_ALH:
- copier_data->gtw_cfg.node_id &= ~SOF_IPC4_NODE_INDEX_MASK;
- copier_data->gtw_cfg.node_id |= SOF_IPC4_NODE_INDEX(data->dai_data);
+ /*
+ * Do not clear the node ID when this op is invoked with
+ * SOF_DAI_CONFIG_FLAGS_HW_FREE. It is needed to free the group_ida during
+ * unprepare.
+ */
+ if (flags & SOF_DAI_CONFIG_FLAGS_HW_PARAMS) {
+ copier_data->gtw_cfg.node_id &= ~SOF_IPC4_NODE_INDEX_MASK;
+ copier_data->gtw_cfg.node_id |= SOF_IPC4_NODE_INDEX(data->dai_data);
+ }
break;
case SOF_DAI_INTEL_DMIC:
case SOF_DAI_INTEL_SSP:
#define SOF_IPC4_NODE_INDEX_INTEL_SSP(x) (((x) & 0xf) << 4)
/* Node ID for DMIC type DAI copiers */
-#define SOF_IPC4_NODE_INDEX_INTEL_DMIC(x) (((x) & 0x7) << 5)
+#define SOF_IPC4_NODE_INDEX_INTEL_DMIC(x) ((x) & 0x7)
#define SOF_IPC4_GAIN_ALL_CHANNELS_MASK 0xffffffff
#define SOF_IPC4_VOL_ZERO_DB 0x7fffffff
* @init_val: Initial value
* @curve_type: Curve type
* @reserved: reserved for future use
- * @curve_duration: Curve duration
+ * @curve_duration_l: Curve duration low part
+ * @curve_duration_h: Curve duration high part
*/
struct sof_ipc4_gain_data {
uint32_t channels;
uint32_t init_val;
uint32_t curve_type;
uint32_t reserved;
- uint32_t curve_duration;
+ uint32_t curve_duration_l;
+ uint32_t curve_duration_h;
} __aligned(8);
/**
/* reset route setup status for all routes that contain this widget */
sof_reset_route_setup_status(sdev, swidget);
+ /* free DAI config and continue to free widget even if it fails */
+ if (WIDGET_IS_DAI(swidget->id)) {
+ struct snd_sof_dai_config_data data;
+ unsigned int flags = SOF_DAI_CONFIG_FLAGS_HW_FREE;
+
+ data.dai_data = DMA_CHAN_INVALID;
+
+ if (tplg_ops && tplg_ops->dai_config) {
+ err = tplg_ops->dai_config(sdev, swidget, flags, &data);
+ if (err < 0)
+ dev_err(sdev->dev, "failed to free config for widget %s\n",
+ swidget->widget->name);
+ }
+ }
+
/* continue to disable core even if IPC fails */
- if (tplg_ops && tplg_ops->widget_free)
- err = tplg_ops->widget_free(sdev, swidget);
+ if (tplg_ops && tplg_ops->widget_free) {
+ ret = tplg_ops->widget_free(sdev, swidget);
+ if (ret < 0 && !err)
+ err = ret;
+ }
/*
* disable widget core. continue to route setup status and complete flag
/* send config for DAI components */
if (WIDGET_IS_DAI(swidget->id)) {
- unsigned int flags = SOF_DAI_CONFIG_FLAGS_NONE;
+ unsigned int flags = SOF_DAI_CONFIG_FLAGS_HW_PARAMS;
+ /*
+ * The config flags saved during BE DAI hw_params will be used for IPC3. IPC4 does
+ * not use the flags argument.
+ */
if (tplg_ops && tplg_ops->dai_config) {
ret = tplg_ops->dai_config(sdev, swidget, flags, NULL);
if (ret < 0)
ret = sof_walk_widgets_in_order(sdev, spcm, fe_params, platform_params,
dir, SOF_WIDGET_SETUP);
if (ret < 0) {
- ret = sof_walk_widgets_in_order(sdev, spcm, fe_params, platform_params,
- dir, SOF_WIDGET_UNPREPARE);
+ sof_walk_widgets_in_order(sdev, spcm, fe_params, platform_params,
+ dir, SOF_WIDGET_UNPREPARE);
return ret;
}
if (ret < 0) {
dev_err(scomp->dev, "failed to parse component pin tokens for %s\n",
w->name);
- return ret;
+ goto widget_free;
}
if (swidget->num_sink_pins > SOF_WIDGET_MAX_NUM_PINS ||
swidget->num_source_pins > SOF_WIDGET_MAX_NUM_PINS) {
dev_err(scomp->dev, "invalid pins for %s: [sink: %d, src: %d]\n",
swidget->widget->name, swidget->num_sink_pins, swidget->num_source_pins);
- return -EINVAL;
+ ret = -EINVAL;
+ goto widget_free;
}
if (swidget->num_sink_pins > 1) {
if (ret < 0) {
dev_err(scomp->dev, "failed to parse sink pin binding for %s\n",
w->name);
- return ret;
+ goto widget_free;
}
}
if (ret < 0) {
dev_err(scomp->dev, "failed to parse source pin binding for %s\n",
w->name);
- return ret;
+ goto widget_free;
}
}
case snd_soc_dapm_dai_out:
dai = kzalloc(sizeof(*dai), GFP_KERNEL);
if (!dai) {
- kfree(swidget);
- return -ENOMEM;
-
+ ret = -ENOMEM;
+ goto widget_free;
}
ret = sof_widget_parse_tokens(scomp, swidget, tw, token_list, token_list_size);
tw->shift, swidget->id, tw->name,
strnlen(tw->sname, SNDRV_CTL_ELEM_ID_NAME_MAXLEN) > 0
? tw->sname : "none");
- kfree(swidget);
- return ret;
+ goto widget_free;
}
if (sof_debug_check_flag(SOF_DBG_DISABLE_MULTICORE)) {
if (ret) {
dev_err(scomp->dev, "widget event binding failed for %s\n",
swidget->widget->name);
- kfree(swidget->private);
- kfree(swidget->tuples);
- kfree(swidget);
- return ret;
+ goto free;
}
}
}
spipe = kzalloc(sizeof(*spipe), GFP_KERNEL);
if (!spipe) {
- kfree(swidget->private);
- kfree(swidget->tuples);
- kfree(swidget);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto free;
}
spipe->pipe_widget = swidget;
w->dobj.private = swidget;
list_add(&swidget->list, &sdev->widget_list);
return ret;
+free:
+ kfree(swidget->private);
+ kfree(swidget->tuples);
+widget_free:
+ kfree(swidget);
+ return ret;
}
static int sof_route_unload(struct snd_soc_component *scomp,
echo "Max node number check"
-echo -n > $TEMPCONF
-for i in `seq 1 1024` ; do
- echo "node$i" >> $TEMPCONF
-done
+awk '
+BEGIN {
+ for (i = 0; i < 26; i += 1)
+ printf("%c\n", 65 + i % 26)
+ for (i = 26; i < 8192; i += 1)
+ printf("%c%c%c\n", 65 + i % 26, 65 + (i / 26) % 26, 65 + (i / 26 / 26))
+}
+' > $TEMPCONF
xpass $BOOTCONF -a $TEMPCONF $INITRD
echo "badnode" >> $TEMPCONF
-/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause */
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
/* Do not edit directly, auto-generated from: */
/* Documentation/netlink/specs/netdev.yaml */
/* YNL-GEN uapi header */
NETDEV_XDP_ACT_HW_OFFLOAD = 16,
NETDEV_XDP_ACT_RX_SG = 32,
NETDEV_XDP_ACT_NDO_XMIT_SG = 64,
+
+ NETDEV_XDP_ACT_MASK = 127,
};
enum {
/* make sure libbpf doesn't use kernel-only integer typedefs */
#pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64
-/* prevent accidental re-addition of reallocarray()/strlcpy() */
-#pragma GCC poison reallocarray strlcpy
+/* prevent accidental re-addition of reallocarray() */
+#pragma GCC poison reallocarray
#include "libbpf.h"
#include "btf.h"
def get_mask(self):
mask = 0
- idx = self.yaml.get('value-start', 0)
- for _ in self.entries.values():
- mask |= 1 << idx
- idx += 1
+ for e in self.entries.values():
+ mask += e.user_value()
return mask
Attributes:
proto protocol type (e.g. genetlink)
+ license spec license (loaded from an SPDX tag on the spec)
attr_sets dict of attribute sets
msgs dict of all messages (index by name)
"""
def __init__(self, spec_path, schema_path=None):
with open(spec_path, "r") as stream:
+ prefix = '# SPDX-License-Identifier: '
+ first = stream.readline().strip()
+ if not first.startswith(prefix):
+ raise Exception('SPDX license tag required in the spec')
+ self.license = first[len(prefix):]
+
+ stream.seek(0)
spec = yaml.safe_load(stream)
self._resolution_list = []
def resolve(self):
self.resolve_up(super())
- for elem in self.yaml['definitions']:
+ definitions = self.yaml.get('definitions', [])
+ for elem in definitions:
if elem['type'] == 'enum' or elem['type'] == 'flags':
self.consts[elem['name']] = self.new_enum(elem)
else:
if seq is None:
seq = random.randint(1, 1024)
nlmsg = struct.pack("HHII", nl_type, nl_flags, seq, 0)
- genlmsg = struct.pack("bbH", genl_cmd, genl_version, 0)
+ genlmsg = struct.pack("BBH", genl_cmd, genl_version, 0)
return nlmsg + genlmsg
self.hdr = nl_msg.raw[0:4]
self.raw = nl_msg.raw[4:]
- self.genl_cmd, self.genl_version, _ = struct.unpack("bbH", self.hdr)
+ self.genl_cmd, self.genl_version, _ = struct.unpack("BBH", self.hdr)
self.raw_attrs = NlAttrs(self.raw)
raw >>= 1
i += 1
else:
- value = enum['entries'][raw - i]
+ value = enum.entries_by_val[raw - i].name
rsp[attr_spec['name']] = value
def _decode(self, attrs, space):
#!/usr/bin/env python3
-# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
import argparse
import collections
if const.get('render-max', False):
cw.nl()
- max_name = c_upper(name_pfx + 'max')
- cw.p('__' + max_name + ',')
- cw.p(max_name + ' = (__' + max_name + ' - 1)')
+ if const['type'] == 'flags':
+ max_name = c_upper(name_pfx + 'mask')
+ max_val = f' = {enum.get_mask()},'
+ cw.p(max_name + max_val)
+ else:
+ max_name = c_upper(name_pfx + 'max')
+ cw.p('__' + max_name + ',')
+ cw.p(max_name + ' = (__' + max_name + ' - 1)')
cw.block_end(line=';')
cw.nl()
elif const['type'] == 'const':
try:
parsed = Family(args.spec)
+ if parsed.license != '((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)':
+ print('Spec license:', parsed.license)
+ print('License must be: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)')
+ os.sys.exit(1)
except yaml.YAMLError as exc:
print(exc)
os.sys.exit(1)
cw = CodeWriter(BaseNlLib(), out_file)
_, spec_kernel = find_kernel_root(args.spec)
- if args.mode == 'uapi':
- cw.p('/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause */')
+ if args.mode == 'uapi' or args.header:
+ cw.p(f'/* SPDX-License-Identifier: {parsed.license} */')
else:
- if args.header:
- cw.p('/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */')
- else:
- cw.p('// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause')
+ cw.p(f'// SPDX-License-Identifier: {parsed.license}')
cw.p("/* Do not edit directly, auto-generated from: */")
cw.p(f"/*\t{spec_kernel} */")
cw.p(f"/* YNL-GEN {args.mode} {'header' if args.header else 'source'} */")
static void parse_options(int argc, char **argv)
{
int option_index = 0;
- char *pathname;
+ char *pathname, *endptr;
int opt;
pathname = strdup(argv[0]);
log_getinfo = 1;
break;
case 'T':
- log_type = atoi(optarg);
+ log_type = strtol(optarg, &endptr, 0);
+ if (*endptr || (log_type != 0 && log_type != 1)) {
+ printf("Number expected: type(0:execution, 1:history) - Quit.\n");
+ exit(1);
+ }
+
set_log_type = 1;
break;
case 'L':
- log_level = atoi(optarg);
+ log_level = strtol(optarg, &endptr, 0);
+ if (*endptr ||
+ (log_level != 0 && log_level != 1 &&
+ log_level != 2 && log_level != 4)) {
+ printf("Number expected: level(0, 1, 2, 4) - Quit.\n");
+ exit(1);
+ }
+
set_log_level = 1;
break;
case 'R':
if not quiet:
pprint('CAPTURING TRACE')
op = sv.writeDatafileHeader(sv.ftracefile, testdata)
- fp = open(tp+'trace', 'r')
- for line in fp:
- op.write(line)
+ fp = open(tp+'trace', 'rb')
+ op.write(ascii(fp.read()))
op.close()
sv.fsetVal('', 'trace')
sv.platforminfo(cmdafter)
must be run as root.
Alternatively, non-root users can be enabled to run turbostat this way:
-# setcap cap_sys_admin,cap_sys_rawio,cap_sys_nice=+ep ./turbostat
+# setcap cap_sys_admin,cap_sys_rawio,cap_sys_nice=+ep path/to/turbostat
# chmod +r /dev/cpu/*/msr
+# chmod +r /dev/cpu_dma_latency
+
.B "turbostat "
reads hardware counters, but doesn't write them.
So it will not interfere with the OS or other programs, including
* turbostat -- show CPU frequency and C-state residency
* on modern Intel and AMD processors.
*
- * Copyright (c) 2022 Intel Corporation.
+ * Copyright (c) 2023 Intel Corporation.
* Len Brown <len.brown@intel.com>
*/
/* counter for cpu_num, including user + kernel and all processes */
fd = perf_event_open(&pea, -1, cpu_num, -1, 0);
if (fd == -1) {
- warn("cpu%d: perf instruction counter", cpu_num);
+ warnx("capget(CAP_PERFMON) failed, try \"# setcap cap_sys_admin=ep %s\"", progname);
BIC_NOT_PRESENT(BIC_IPC);
}
get_msr(base_cpu, trl_msr_offset, &msr);
fprintf(outf, "cpu%d: MSR_%sTURBO_RATIO_LIMIT: 0x%08llx\n",
- base_cpu, trl_msr_offset == MSR_SECONDARY_TURBO_RATIO_LIMIT ? "SECONDARY" : "", msr);
+ base_cpu, trl_msr_offset == MSR_SECONDARY_TURBO_RATIO_LIMIT ? "SECONDARY_" : "", msr);
if (has_turbo_ratio_group_limits(family, model)) {
get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT1, &core_counts);
/*
* set_my_sched_priority(pri)
* return previous
- *
- * if non-root, do this:
- * # /sbin/setcap cap_sys_rawio,cap_sys_nice=+ep /usr/bin/turbostat
*/
int set_my_sched_priority(int priority)
{
retval = setpriority(PRIO_PROCESS, 0, priority);
if (retval)
- err(retval, "setpriority(%d)", priority);
+ errx(retval, "capget(CAP_SYS_NICE) failed,try \"# setcap cap_sys_nice=ep %s\"", progname);
errno = 0;
retval = getpriority(PRIO_PROCESS, 0);
fprintf(outf, "cpu%d: MSR_HWP_STATUS: 0x%08llx "
"(%sGuaranteed_Perf_Change, %sExcursion_Min)\n",
- cpu, msr, ((msr) & 0x1) ? "" : "No-", ((msr) & 0x2) ? "" : "No-");
+ cpu, msr, ((msr) & 0x1) ? "" : "No-", ((msr) & 0x4) ? "" : "No-");
return 0;
}
case INTEL_FAM6_ICELAKE_D:
return INTEL_FAM6_ICELAKE_X;
+
+ case INTEL_FAM6_EMERALDRAPIDS_X:
+ return INTEL_FAM6_SAPPHIRERAPIDS_X;
}
return model;
}
fd = open(path, O_RDONLY);
if (fd < 0) {
- warn("fopen %s\n", path);
+ warnx("capget(CAP_SYS_ADMIN) failed, try \"# setcap cap_sys_admin=ep %s\"", progname);
return;
}
retval = read(fd, (void *)&value, sizeof(int));
if (retval != sizeof(int)) {
- warn("read %s\n", path);
+ warn("read failed %s", path);
close(fd);
return;
}
edx_flags = edx;
if (get_msr(sched_getcpu(), MSR_IA32_UCODE_REV, &ucode_patch))
- warnx("get_msr(UCODE)\n");
+ warnx("get_msr(UCODE)");
/*
* check max extended function levels of CPUID.
void print_version()
{
- fprintf(outf, "turbostat version 2022.10.04 - Len Brown <lenb@kernel.org>\n");
+ fprintf(outf, "turbostat version 2023.03.17 - Len Brown <lenb@kernel.org>\n");
}
#define COMMAND_LINE_SIZE 2048
# No binaries, but make sure arg-less "make" doesn't trigger "run_tests"
all:
-uname_M := $(shell uname -m 2>/dev/null || echo not)
-ARCH ?= $(shell echo $(uname_M) | sed -e s/i.86/x86/ -e s/x86_64/x86/)
+ARCH ?= $(shell uname -m 2>/dev/null || echo not)
+ARCH := $(shell echo $(ARCH) | sed -e s/i.86/x86/ -e s/x86_64/x86/)
-TEST_PROGS := run.sh
-TEST_FILES := basic.sh tbench.sh gitsource.sh
+ifeq (x86,$(ARCH))
+TEST_FILES += ../../../power/x86/amd_pstate_tracer/amd_pstate_trace.py
+TEST_FILES += ../../../power/x86/intel_pstate_tracer/intel_pstate_tracer.py
+endif
+
+TEST_PROGS += run.sh
+TEST_FILES += basic.sh tbench.sh gitsource.sh
include ../lib.mk
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+
+#include <test_progs.h>
+#include "uninit_stack.skel.h"
+
+void test_uninit_stack(void)
+{
+ RUN_TESTS(uninit_stack);
+}
/* Copyright (c) 2021 Facebook */
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
+#define vm_flags vm_start
char _license[] SEC("license") = "GPL";
#include <errno.h>
#include <linux/capability.h>
-struct kernel_cap_struct {
- __u64 val;
-} __attribute__((preserve_access_index));
+typedef struct { unsigned long long val; } kernel_cap_t;
struct cred {
- struct kernel_cap_struct cap_effective;
+ kernel_cap_t cap_effective;
} __attribute__((preserve_access_index));
char _license[] SEC("license") = "GPL";
SEC("lsm.s/userns_create")
int BPF_PROG(test_userns_create, const struct cred *cred, int ret)
{
- struct kernel_cap_struct caps = cred->cap_effective;
- __u64 cap_mask = BIT_LL(CAP_SYS_ADMIN);
+ kernel_cap_t caps = cred->cap_effective;
+ __u64 cap_mask = 1ULL << CAP_SYS_ADMIN;
if (ret)
return 0;
#include "bpf_misc.h"
struct Small {
- int x;
+ long x;
};
struct Big {
- int x;
- int y;
+ long x;
+ long y;
};
__noinline int foo(const struct Big *big)
}
SEC("cgroup_skb/ingress")
-__failure __msg("invalid indirect read from stack")
+__failure __msg("invalid indirect access to stack")
int global_func10(struct __sk_buff *skb)
{
const struct Small small = {.x = skb->len };
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+/* Read an uninitialized value from stack at a fixed offset */
+SEC("socket")
+__naked int read_uninit_stack_fixed_off(void *ctx)
+{
+ asm volatile (" \
+ r0 = 0; \
+ /* force stack depth to be 128 */ \
+ *(u64*)(r10 - 128) = r1; \
+ r1 = *(u8 *)(r10 - 8 ); \
+ r0 += r1; \
+ r1 = *(u8 *)(r10 - 11); \
+ r1 = *(u8 *)(r10 - 13); \
+ r1 = *(u8 *)(r10 - 15); \
+ r1 = *(u16*)(r10 - 16); \
+ r1 = *(u32*)(r10 - 32); \
+ r1 = *(u64*)(r10 - 64); \
+ /* read from a spill of a wrong size, it is a separate \
+ * branch in check_stack_read_fixed_off() \
+ */ \
+ *(u32*)(r10 - 72) = r1; \
+ r1 = *(u64*)(r10 - 72); \
+ r0 = 0; \
+ exit; \
+"
+ ::: __clobber_all);
+}
+
+/* Read an uninitialized value from stack at a variable offset */
+SEC("socket")
+__naked int read_uninit_stack_var_off(void *ctx)
+{
+ asm volatile (" \
+ call %[bpf_get_prandom_u32]; \
+ /* force stack depth to be 64 */ \
+ *(u64*)(r10 - 64) = r0; \
+ r0 = -r0; \
+ /* give r0 a range [-31, -1] */ \
+ if r0 s<= -32 goto exit_%=; \
+ if r0 s>= 0 goto exit_%=; \
+ /* access stack using r0 */ \
+ r1 = r10; \
+ r1 += r0; \
+ r2 = *(u8*)(r1 + 0); \
+exit_%=: r0 = 0; \
+ exit; \
+"
+ :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+static __noinline void dummy(void) {}
+
+/* Pass a pointer to uninitialized stack memory to a helper.
+ * Passed memory block should be marked as STACK_MISC after helper call.
+ */
+SEC("socket")
+__log_level(7) __msg("fp-104=mmmmmmmm")
+__naked int helper_uninit_to_misc(void *ctx)
+{
+ asm volatile (" \
+ /* force stack depth to be 128 */ \
+ *(u64*)(r10 - 128) = r1; \
+ r1 = r10; \
+ r1 += -128; \
+ r2 = 32; \
+ call %[bpf_trace_printk]; \
+ /* Call to dummy() forces print_verifier_state(..., true), \
+ * thus showing the stack state, matched by __msg(). \
+ */ \
+ call %[dummy]; \
+ r0 = 0; \
+ exit; \
+"
+ :
+ : __imm(bpf_trace_printk),
+ __imm(dummy)
+ : __clobber_all);
+}
+
+char _license[] SEC("license") = "GPL";
* that fp-8 stack slot was unused in the fall-through
* branch and will accept the program incorrectly
*/
- BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 2),
+ BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_0, 2, 2),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .fixup_map_hash_48b = { 6 },
- .errstr = "invalid indirect read from stack R2 off -8+0 size 8",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
+ .fixup_map_hash_48b = { 7 },
+ .errstr_unpriv = "invalid indirect read from stack R2 off -8+0 size 8",
+ .result_unpriv = REJECT,
+ /* in privileged mode reads from uninitialized stack locations are permitted */
+ .result = ACCEPT,
},
{
"calls: ctx read at start of subprog",
{
"helper access to variable memory: stack, bitwise AND, zero included",
.insns = {
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
+ /* set max stack size */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -128, 0),
+ /* set r3 to a random value */
+ BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
+ /* use bitwise AND to limit r3 range to [0, 64] */
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_3, 64),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ /* Call bpf_ringbuf_output(), it is one of a few helper functions with
+ * ARG_CONST_SIZE_OR_ZERO parameter allowed in unpriv mode.
+ * For unpriv this should signal an error, because memory at &fp[-64] is
+ * not initialized.
+ */
+ BPF_EMIT_CALL(BPF_FUNC_ringbuf_output),
BPF_EXIT_INSN(),
},
- .errstr = "invalid indirect read from stack R1 off -64+0 size 64",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .fixup_map_ringbuf = { 4 },
+ .errstr_unpriv = "invalid indirect read from stack R2 off -64+0 size 64",
+ .result_unpriv = REJECT,
+ /* in privileged mode reads from uninitialized stack locations are permitted */
+ .result = ACCEPT,
},
{
"helper access to variable memory: stack, bitwise AND + JMP, wrong max",
{
"helper access to variable memory: stack, JMP, no min check",
.insns = {
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
- BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 3),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
+ /* set max stack size */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -128, 0),
+ /* set r3 to a random value */
+ BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
+ /* use JMP to limit r3 range to [0, 64] */
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 64, 6),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ /* Call bpf_ringbuf_output(), it is one of a few helper functions with
+ * ARG_CONST_SIZE_OR_ZERO parameter allowed in unpriv mode.
+ * For unpriv this should signal an error, because memory at &fp[-64] is
+ * not initialized.
+ */
+ BPF_EMIT_CALL(BPF_FUNC_ringbuf_output),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .errstr = "invalid indirect read from stack R1 off -64+0 size 64",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .fixup_map_ringbuf = { 4 },
+ .errstr_unpriv = "invalid indirect read from stack R2 off -64+0 size 64",
+ .result_unpriv = REJECT,
+ /* in privileged mode reads from uninitialized stack locations are permitted */
+ .result = ACCEPT,
},
{
"helper access to variable memory: stack, JMP (signed), no min check",
{
"helper access to variable memory: 8 bytes leak",
.insns = {
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
+ /* set max stack size */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -128, 0),
+ /* set r3 to a random value */
+ BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
+ /* Note: fp[-32] left uninitialized */
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 63),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ /* Limit r3 range to [1, 64] */
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_3, 63),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 1),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ /* Call bpf_ringbuf_output(), it is one of a few helper functions with
+ * ARG_CONST_SIZE_OR_ZERO parameter allowed in unpriv mode.
+ * For unpriv this should signal an error, because memory region [1, 64]
+ * at &fp[-64] is not fully initialized.
+ */
+ BPF_EMIT_CALL(BPF_FUNC_ringbuf_output),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .errstr = "invalid indirect read from stack R1 off -64+32 size 64",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .fixup_map_ringbuf = { 3 },
+ .errstr_unpriv = "invalid indirect read from stack R2 off -64+32 size 64",
+ .result_unpriv = REJECT,
+ /* in privileged mode reads from uninitialized stack locations are permitted */
+ .result = ACCEPT,
},
{
"helper access to variable memory: 8 bytes no leak (init memory)",
/* bpf_strtoul() */
BPF_EMIT_CALL(BPF_FUNC_strtoul),
- BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_CGROUP_SYSCTL,
- .errstr = "invalid indirect read from stack R4 off -16+4 size 8",
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "invalid indirect read from stack R4 off -16+4 size 8",
+ /* in privileged mode reads from uninitialized stack locations are permitted */
+ .result = ACCEPT,
},
{
"ARG_PTR_TO_LONG misaligned",
BPF_EXIT_INSN(),
},
.fixup_map_hash_8b = { 3 },
- .errstr = "invalid read from stack off -16+0 size 8",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr_unpriv = "invalid read from stack off -16+0 size 8",
+ .result_unpriv = REJECT,
+ /* in privileged mode reads from uninitialized stack locations are permitted */
+ .result = ACCEPT,
},
{
"precision tracking for u32 spill/fill",
BPF_EXIT_INSN(),
},
.flags = BPF_F_TEST_STATE_FREQ,
- .errstr = "invalid read from stack off -8+1 size 8",
- .result = REJECT,
+ .errstr_unpriv = "invalid read from stack off -8+1 size 8",
+ .result_unpriv = REJECT,
+ /* in privileged mode reads from uninitialized stack locations are permitted */
+ .result = ACCEPT,
},
.result = ACCEPT,
},
{
- "sk_storage_get(map, skb->sk, &stack_value, 1): partially init stack_value",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_2, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_4, 1),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -8),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_sk_storage_get),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_sk_storage_map = { 14 },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = REJECT,
- .errstr = "invalid indirect read from stack",
-},
-{
"bpf_map_lookup_elem(smap, &key)",
.insns = {
BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .result = REJECT,
- .errstr = "invalid read from stack off -4+0 size 4",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "invalid read from stack off -4+0 size 4",
+ /* in privileged mode reads from uninitialized stack locations are permitted */
+ .result = ACCEPT,
},
{
"Spill a u32 const scalar. Refill as u16. Offset to skb->data",
.prog_type = BPF_PROG_TYPE_LWT_IN,
},
{
- "indirect variable-offset stack access, max_off+size > max_initialized",
- .insns = {
- /* Fill only the second from top 8 bytes of the stack. */
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
- /* Get an unknown value. */
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
- /* Make it small and 4-byte aligned. */
- BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
- BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 16),
- /* Add it to fp. We now have either fp-12 or fp-16, but we don't know
- * which. fp-12 size 8 is partially uninitialized stack.
- */
- BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
- /* Dereference it indirectly. */
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 5 },
- .errstr = "invalid indirect read from stack R2 var_off",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_LWT_IN,
-},
-{
"indirect variable-offset stack access, min_off < min_initialized",
.insns = {
/* Fill only the top 8 bytes of the stack. */
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
},
{
- "indirect variable-offset stack access, uninitialized",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 6),
- BPF_MOV64_IMM(BPF_REG_3, 28),
- /* Fill the top 16 bytes of the stack. */
- BPF_ST_MEM(BPF_W, BPF_REG_10, -16, 0),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- /* Get an unknown value. */
- BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1, 0),
- /* Make it small and 4-byte aligned. */
- BPF_ALU64_IMM(BPF_AND, BPF_REG_4, 4),
- BPF_ALU64_IMM(BPF_SUB, BPF_REG_4, 16),
- /* Add it to fp. We now have either fp-12 or fp-16, we don't know
- * which, but either way it points to initialized stack.
- */
- BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_10),
- BPF_MOV64_IMM(BPF_REG_5, 8),
- /* Dereference it indirectly. */
- BPF_EMIT_CALL(BPF_FUNC_getsockopt),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid indirect read from stack R4 var_off",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_SOCK_OPS,
-},
-{
"indirect variable-offset stack access, ok",
.insns = {
/* Fill the top 16 bytes of the stack. */
dev_addr_lists.sh \
mode-1-recovery-updelay.sh \
mode-2-recovery-updelay.sh \
- option_prio.sh
+ option_prio.sh \
+ bond-eth-type-change.sh
TEST_FILES := \
lag_lib.sh \
--- /dev/null
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Test bond device ether type changing
+#
+
+ALL_TESTS="
+ bond_test_unsuccessful_enslave_type_change
+ bond_test_successful_enslave_type_change
+"
+REQUIRE_MZ=no
+NUM_NETIFS=0
+lib_dir=$(dirname "$0")
+source "$lib_dir"/net_forwarding_lib.sh
+
+bond_check_flags()
+{
+ local bonddev=$1
+
+ ip -d l sh dev "$bonddev" | grep -q "MASTER"
+ check_err $? "MASTER flag is missing from the bond device"
+
+ ip -d l sh dev "$bonddev" | grep -q "SLAVE"
+ check_err $? "SLAVE flag is missing from the bond device"
+}
+
+# test enslaved bond dev type change from ARPHRD_ETHER and back
+# this allows us to test both MASTER and SLAVE flags at once
+bond_test_enslave_type_change()
+{
+ local test_success=$1
+ local devbond0="test-bond0"
+ local devbond1="test-bond1"
+ local devbond2="test-bond2"
+ local nonethdev="test-noneth0"
+
+ # create a non-ARPHRD_ETHER device for testing (e.g. nlmon type)
+ ip link add name "$nonethdev" type nlmon
+ check_err $? "could not create a non-ARPHRD_ETHER device (nlmon)"
+ ip link add name "$devbond0" type bond
+ if [ $test_success -eq 1 ]; then
+ # we need devbond0 in active-backup mode to successfully enslave nonethdev
+ ip link set dev "$devbond0" type bond mode active-backup
+ check_err $? "could not change bond mode to active-backup"
+ fi
+ ip link add name "$devbond1" type bond
+ ip link add name "$devbond2" type bond
+ ip link set dev "$devbond0" master "$devbond1"
+ check_err $? "could not enslave $devbond0 to $devbond1"
+ # change bond type to non-ARPHRD_ETHER
+ ip link set dev "$nonethdev" master "$devbond0" 1>/dev/null 2>/dev/null
+ ip link set dev "$nonethdev" nomaster 1>/dev/null 2>/dev/null
+ # restore ARPHRD_ETHER type by enslaving such device
+ ip link set dev "$devbond2" master "$devbond0"
+ check_err $? "could not enslave $devbond2 to $devbond0"
+ ip link set dev "$devbond1" nomaster
+
+ bond_check_flags "$devbond0"
+
+ # clean up
+ ip link del dev "$devbond0"
+ ip link del dev "$devbond1"
+ ip link del dev "$devbond2"
+ ip link del dev "$nonethdev"
+}
+
+bond_test_unsuccessful_enslave_type_change()
+{
+ RET=0
+
+ bond_test_enslave_type_change 0
+ log_test "Change ether type of an enslaved bond device with unsuccessful enslave"
+}
+
+bond_test_successful_enslave_type_change()
+{
+ RET=0
+
+ bond_test_enslave_type_change 1
+ log_test "Change ether type of an enslaved bond device with successful enslave"
+}
+
+tests_run
+
+exit "$EXIT_STATUS"
TEST_GEN_PROGS_aarch64 += aarch64/hypercalls
TEST_GEN_PROGS_aarch64 += aarch64/page_fault_test
TEST_GEN_PROGS_aarch64 += aarch64/psci_test
+TEST_GEN_PROGS_aarch64 += aarch64/smccc_filter
TEST_GEN_PROGS_aarch64 += aarch64/vcpu_width_config
TEST_GEN_PROGS_aarch64 += aarch64/vgic_init
TEST_GEN_PROGS_aarch64 += aarch64/vgic_irq
int nr_iter;
int timer_period_ms;
int migration_freq_ms;
+ struct kvm_arm_counter_offset offset;
};
static struct test_args test_args = {
.nr_iter = NR_TEST_ITERS_DEF,
.timer_period_ms = TIMER_TEST_PERIOD_MS_DEF,
.migration_freq_ms = TIMER_TEST_MIGRATION_FREQ_MS,
+ .offset = { .reserved = 1 },
};
#define msecs_to_usecs(msec) ((msec) * 1000LL)
uint64_t xcnt = 0, xcnt_diff_us, cval = 0;
unsigned long xctl = 0;
unsigned int timer_irq = 0;
+ unsigned int accessor;
- if (stage == GUEST_STAGE_VTIMER_CVAL ||
- stage == GUEST_STAGE_VTIMER_TVAL) {
- xctl = timer_get_ctl(VIRTUAL);
- timer_set_ctl(VIRTUAL, CTL_IMASK);
- xcnt = timer_get_cntct(VIRTUAL);
- cval = timer_get_cval(VIRTUAL);
+ if (intid == IAR_SPURIOUS)
+ return;
+
+ switch (stage) {
+ case GUEST_STAGE_VTIMER_CVAL:
+ case GUEST_STAGE_VTIMER_TVAL:
+ accessor = VIRTUAL;
timer_irq = vtimer_irq;
- } else if (stage == GUEST_STAGE_PTIMER_CVAL ||
- stage == GUEST_STAGE_PTIMER_TVAL) {
- xctl = timer_get_ctl(PHYSICAL);
- timer_set_ctl(PHYSICAL, CTL_IMASK);
- xcnt = timer_get_cntct(PHYSICAL);
- cval = timer_get_cval(PHYSICAL);
+ break;
+ case GUEST_STAGE_PTIMER_CVAL:
+ case GUEST_STAGE_PTIMER_TVAL:
+ accessor = PHYSICAL;
timer_irq = ptimer_irq;
- } else {
+ break;
+ default:
GUEST_ASSERT(0);
+ return;
}
+ xctl = timer_get_ctl(accessor);
+ if ((xctl & CTL_IMASK) || !(xctl & CTL_ENABLE))
+ return;
+
+ timer_set_ctl(accessor, CTL_IMASK);
+ xcnt = timer_get_cntct(accessor);
+ cval = timer_get_cval(accessor);
+
xcnt_diff_us = cycles_to_usec(xcnt - shared_data->xcnt);
/* Make sure we are dealing with the correct timer IRQ */
/* Basic 'timer condition met' check */
GUEST_ASSERT_3(xcnt >= cval, xcnt, cval, xcnt_diff_us);
GUEST_ASSERT_1(xctl & CTL_ISTATUS, xctl);
+
+ WRITE_ONCE(shared_data->nr_iter, shared_data->nr_iter + 1);
}
static void guest_irq_handler(struct ex_regs *regs)
guest_validate_irq(intid, shared_data);
- WRITE_ONCE(shared_data->nr_iter, shared_data->nr_iter + 1);
-
gic_set_eoi(intid);
}
vm_init_descriptor_tables(vm);
vm_install_exception_handler(vm, VECTOR_IRQ_CURRENT, guest_irq_handler);
+ if (!test_args.offset.reserved) {
+ if (kvm_has_cap(KVM_CAP_COUNTER_OFFSET))
+ vm_ioctl(vm, KVM_ARM_SET_COUNTER_OFFSET, &test_args.offset);
+ else
+ TEST_FAIL("no support for global offset\n");
+ }
+
for (i = 0; i < nr_vcpus; i++)
vcpu_init_descriptor_tables(vcpus[i]);
TIMER_TEST_PERIOD_MS_DEF);
pr_info("\t-m: Frequency (in ms) of vCPUs to migrate to different pCPU. 0 to turn off (default: %u)\n",
TIMER_TEST_MIGRATION_FREQ_MS);
+ pr_info("\t-o: Counter offset (in counter cycles, default: 0)\n");
pr_info("\t-h: print this help screen\n");
}
{
int opt;
- while ((opt = getopt(argc, argv, "hn:i:p:m:")) != -1) {
+ while ((opt = getopt(argc, argv, "hn:i:p:m:o:")) != -1) {
switch (opt) {
case 'n':
test_args.nr_vcpus = atoi_positive("Number of vCPUs", optarg);
case 'm':
test_args.migration_freq_ms = atoi_non_negative("Frequency", optarg);
break;
+ case 'o':
+ test_args.offset.counter_offset = strtol(optarg, NULL, 0);
+ test_args.offset.reserved = 0;
+ break;
case 'h':
default:
goto err;
* The current blessed list was primed with the output of kernel version
* v4.15 with --core-reg-fixup and then later updated with new registers.
*
- * The blessed list is up to date with kernel version v5.13-rc3
+ * The blessed list is up to date with kernel version v6.4 (or so we hope)
*/
static __u64 base_regs[] = {
KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[0]),
ARM64_SYS_REG(3, 0, 0, 3, 7),
ARM64_SYS_REG(3, 0, 0, 4, 0), /* ID_AA64PFR0_EL1 */
ARM64_SYS_REG(3, 0, 0, 4, 1), /* ID_AA64PFR1_EL1 */
- ARM64_SYS_REG(3, 0, 0, 4, 2),
+ ARM64_SYS_REG(3, 0, 0, 4, 2), /* ID_AA64PFR2_EL1 */
ARM64_SYS_REG(3, 0, 0, 4, 3),
ARM64_SYS_REG(3, 0, 0, 4, 4), /* ID_AA64ZFR0_EL1 */
- ARM64_SYS_REG(3, 0, 0, 4, 5),
+ ARM64_SYS_REG(3, 0, 0, 4, 5), /* ID_AA64SMFR0_EL1 */
ARM64_SYS_REG(3, 0, 0, 4, 6),
ARM64_SYS_REG(3, 0, 0, 4, 7),
ARM64_SYS_REG(3, 0, 0, 5, 0), /* ID_AA64DFR0_EL1 */
ARM64_SYS_REG(3, 0, 0, 5, 7),
ARM64_SYS_REG(3, 0, 0, 6, 0), /* ID_AA64ISAR0_EL1 */
ARM64_SYS_REG(3, 0, 0, 6, 1), /* ID_AA64ISAR1_EL1 */
- ARM64_SYS_REG(3, 0, 0, 6, 2),
+ ARM64_SYS_REG(3, 0, 0, 6, 2), /* ID_AA64ISAR2_EL1 */
ARM64_SYS_REG(3, 0, 0, 6, 3),
ARM64_SYS_REG(3, 0, 0, 6, 4),
ARM64_SYS_REG(3, 0, 0, 6, 5),
ARM64_SYS_REG(3, 0, 0, 7, 0), /* ID_AA64MMFR0_EL1 */
ARM64_SYS_REG(3, 0, 0, 7, 1), /* ID_AA64MMFR1_EL1 */
ARM64_SYS_REG(3, 0, 0, 7, 2), /* ID_AA64MMFR2_EL1 */
- ARM64_SYS_REG(3, 0, 0, 7, 3),
- ARM64_SYS_REG(3, 0, 0, 7, 4),
+ ARM64_SYS_REG(3, 0, 0, 7, 3), /* ID_AA64MMFR3_EL1 */
+ ARM64_SYS_REG(3, 0, 0, 7, 4), /* ID_AA64MMFR4_EL1 */
ARM64_SYS_REG(3, 0, 0, 7, 5),
ARM64_SYS_REG(3, 0, 0, 7, 6),
ARM64_SYS_REG(3, 0, 0, 7, 7),
ARM64_SYS_REG(3, 2, 0, 0, 0), /* CSSELR_EL1 */
ARM64_SYS_REG(3, 3, 13, 0, 2), /* TPIDR_EL0 */
ARM64_SYS_REG(3, 3, 13, 0, 3), /* TPIDRRO_EL0 */
+ ARM64_SYS_REG(3, 3, 14, 0, 1), /* CNTPCT_EL0 */
+ ARM64_SYS_REG(3, 3, 14, 2, 1), /* CNTP_CTL_EL0 */
+ ARM64_SYS_REG(3, 3, 14, 2, 2), /* CNTP_CVAL_EL0 */
ARM64_SYS_REG(3, 4, 3, 0, 0), /* DACR32_EL2 */
ARM64_SYS_REG(3, 4, 5, 0, 1), /* IFSR32_EL2 */
ARM64_SYS_REG(3, 4, 5, 3, 0), /* FPEXC32_EL2 */
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * smccc_filter - Tests for the SMCCC filter UAPI.
+ *
+ * Copyright (c) 2023 Google LLC
+ *
+ * This test includes:
+ * - Tests that the UAPI constraints are upheld by KVM. For example, userspace
+ * is prevented from filtering the architecture range of SMCCC calls.
+ * - Test that the filter actions (DENIED, FWD_TO_USER) work as intended.
+ */
+
+#include <linux/arm-smccc.h>
+#include <linux/psci.h>
+#include <stdint.h>
+
+#include "processor.h"
+#include "test_util.h"
+
+enum smccc_conduit {
+ HVC_INSN,
+ SMC_INSN,
+};
+
+#define for_each_conduit(conduit) \
+ for (conduit = HVC_INSN; conduit <= SMC_INSN; conduit++)
+
+static void guest_main(uint32_t func_id, enum smccc_conduit conduit)
+{
+ struct arm_smccc_res res;
+
+ if (conduit == SMC_INSN)
+ smccc_smc(func_id, 0, 0, 0, 0, 0, 0, 0, &res);
+ else
+ smccc_hvc(func_id, 0, 0, 0, 0, 0, 0, 0, &res);
+
+ GUEST_SYNC(res.a0);
+}
+
+static int __set_smccc_filter(struct kvm_vm *vm, uint32_t start, uint32_t nr_functions,
+ enum kvm_smccc_filter_action action)
+{
+ struct kvm_smccc_filter filter = {
+ .base = start,
+ .nr_functions = nr_functions,
+ .action = action,
+ };
+
+ return __kvm_device_attr_set(vm->fd, KVM_ARM_VM_SMCCC_CTRL,
+ KVM_ARM_VM_SMCCC_FILTER, &filter);
+}
+
+static void set_smccc_filter(struct kvm_vm *vm, uint32_t start, uint32_t nr_functions,
+ enum kvm_smccc_filter_action action)
+{
+ int ret = __set_smccc_filter(vm, start, nr_functions, action);
+
+ TEST_ASSERT(!ret, "failed to configure SMCCC filter: %d", ret);
+}
+
+static struct kvm_vm *setup_vm(struct kvm_vcpu **vcpu)
+{
+ struct kvm_vcpu_init init;
+ struct kvm_vm *vm;
+
+ vm = vm_create(1);
+ vm_ioctl(vm, KVM_ARM_PREFERRED_TARGET, &init);
+
+ /*
+ * Enable in-kernel emulation of PSCI to ensure that calls are denied
+ * due to the SMCCC filter, not because of KVM.
+ */
+ init.features[0] |= (1 << KVM_ARM_VCPU_PSCI_0_2);
+
+ *vcpu = aarch64_vcpu_add(vm, 0, &init, guest_main);
+ return vm;
+}
+
+static void test_pad_must_be_zero(void)
+{
+ struct kvm_vcpu *vcpu;
+ struct kvm_vm *vm = setup_vm(&vcpu);
+ struct kvm_smccc_filter filter = {
+ .base = PSCI_0_2_FN_PSCI_VERSION,
+ .nr_functions = 1,
+ .action = KVM_SMCCC_FILTER_DENY,
+ .pad = { -1 },
+ };
+ int r;
+
+ r = __kvm_device_attr_set(vm->fd, KVM_ARM_VM_SMCCC_CTRL,
+ KVM_ARM_VM_SMCCC_FILTER, &filter);
+ TEST_ASSERT(r < 0 && errno == EINVAL,
+ "Setting filter with nonzero padding should return EINVAL");
+}
+
+/* Ensure that userspace cannot filter the Arm Architecture SMCCC range */
+static void test_filter_reserved_range(void)
+{
+ struct kvm_vcpu *vcpu;
+ struct kvm_vm *vm = setup_vm(&vcpu);
+ uint32_t smc64_fn;
+ int r;
+
+ r = __set_smccc_filter(vm, ARM_SMCCC_ARCH_WORKAROUND_1,
+ 1, KVM_SMCCC_FILTER_DENY);
+ TEST_ASSERT(r < 0 && errno == EEXIST,
+ "Attempt to filter reserved range should return EEXIST");
+
+ smc64_fn = ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_SMC_64,
+ 0, 0);
+
+ r = __set_smccc_filter(vm, smc64_fn, 1, KVM_SMCCC_FILTER_DENY);
+ TEST_ASSERT(r < 0 && errno == EEXIST,
+ "Attempt to filter reserved range should return EEXIST");
+
+ kvm_vm_free(vm);
+}
+
+static void test_invalid_nr_functions(void)
+{
+ struct kvm_vcpu *vcpu;
+ struct kvm_vm *vm = setup_vm(&vcpu);
+ int r;
+
+ r = __set_smccc_filter(vm, PSCI_0_2_FN64_CPU_ON, 0, KVM_SMCCC_FILTER_DENY);
+ TEST_ASSERT(r < 0 && errno == EINVAL,
+ "Attempt to filter 0 functions should return EINVAL");
+
+ kvm_vm_free(vm);
+}
+
+static void test_overflow_nr_functions(void)
+{
+ struct kvm_vcpu *vcpu;
+ struct kvm_vm *vm = setup_vm(&vcpu);
+ int r;
+
+ r = __set_smccc_filter(vm, ~0, ~0, KVM_SMCCC_FILTER_DENY);
+ TEST_ASSERT(r < 0 && errno == EINVAL,
+ "Attempt to overflow filter range should return EINVAL");
+
+ kvm_vm_free(vm);
+}
+
+static void test_reserved_action(void)
+{
+ struct kvm_vcpu *vcpu;
+ struct kvm_vm *vm = setup_vm(&vcpu);
+ int r;
+
+ r = __set_smccc_filter(vm, PSCI_0_2_FN64_CPU_ON, 1, -1);
+ TEST_ASSERT(r < 0 && errno == EINVAL,
+ "Attempt to use reserved filter action should return EINVAL");
+
+ kvm_vm_free(vm);
+}
+
+
+/* Test that overlapping configurations of the SMCCC filter are rejected */
+static void test_filter_overlap(void)
+{
+ struct kvm_vcpu *vcpu;
+ struct kvm_vm *vm = setup_vm(&vcpu);
+ int r;
+
+ set_smccc_filter(vm, PSCI_0_2_FN64_CPU_ON, 1, KVM_SMCCC_FILTER_DENY);
+
+ r = __set_smccc_filter(vm, PSCI_0_2_FN64_CPU_ON, 1, KVM_SMCCC_FILTER_DENY);
+ TEST_ASSERT(r < 0 && errno == EEXIST,
+ "Attempt to filter already configured range should return EEXIST");
+
+ kvm_vm_free(vm);
+}
+
+static void expect_call_denied(struct kvm_vcpu *vcpu)
+{
+ struct ucall uc;
+
+ if (get_ucall(vcpu, &uc) != UCALL_SYNC)
+ TEST_FAIL("Unexpected ucall: %lu\n", uc.cmd);
+
+ TEST_ASSERT(uc.args[1] == SMCCC_RET_NOT_SUPPORTED,
+ "Unexpected SMCCC return code: %lu", uc.args[1]);
+}
+
+/* Denied SMCCC calls have a return code of SMCCC_RET_NOT_SUPPORTED */
+static void test_filter_denied(void)
+{
+ enum smccc_conduit conduit;
+ struct kvm_vcpu *vcpu;
+ struct kvm_vm *vm;
+
+ for_each_conduit(conduit) {
+ vm = setup_vm(&vcpu);
+
+ set_smccc_filter(vm, PSCI_0_2_FN_PSCI_VERSION, 1, KVM_SMCCC_FILTER_DENY);
+ vcpu_args_set(vcpu, 2, PSCI_0_2_FN_PSCI_VERSION, conduit);
+
+ vcpu_run(vcpu);
+ expect_call_denied(vcpu);
+
+ kvm_vm_free(vm);
+ }
+}
+
+static void expect_call_fwd_to_user(struct kvm_vcpu *vcpu, uint32_t func_id,
+ enum smccc_conduit conduit)
+{
+ struct kvm_run *run = vcpu->run;
+
+ TEST_ASSERT(run->exit_reason == KVM_EXIT_HYPERCALL,
+ "Unexpected exit reason: %u", run->exit_reason);
+ TEST_ASSERT(run->hypercall.nr == func_id,
+ "Unexpected SMCCC function: %llu", run->hypercall.nr);
+
+ if (conduit == SMC_INSN)
+ TEST_ASSERT(run->hypercall.flags & KVM_HYPERCALL_EXIT_SMC,
+ "KVM_HYPERCALL_EXIT_SMC is not set");
+ else
+ TEST_ASSERT(!(run->hypercall.flags & KVM_HYPERCALL_EXIT_SMC),
+ "KVM_HYPERCALL_EXIT_SMC is set");
+}
+
+/* SMCCC calls forwarded to userspace cause KVM_EXIT_HYPERCALL exits */
+static void test_filter_fwd_to_user(void)
+{
+ enum smccc_conduit conduit;
+ struct kvm_vcpu *vcpu;
+ struct kvm_vm *vm;
+
+ for_each_conduit(conduit) {
+ vm = setup_vm(&vcpu);
+
+ set_smccc_filter(vm, PSCI_0_2_FN_PSCI_VERSION, 1, KVM_SMCCC_FILTER_FWD_TO_USER);
+ vcpu_args_set(vcpu, 2, PSCI_0_2_FN_PSCI_VERSION, conduit);
+
+ vcpu_run(vcpu);
+ expect_call_fwd_to_user(vcpu, PSCI_0_2_FN_PSCI_VERSION, conduit);
+
+ kvm_vm_free(vm);
+ }
+}
+
+static bool kvm_supports_smccc_filter(void)
+{
+ struct kvm_vm *vm = vm_create_barebones();
+ int r;
+
+ r = __kvm_has_device_attr(vm->fd, KVM_ARM_VM_SMCCC_CTRL, KVM_ARM_VM_SMCCC_FILTER);
+
+ kvm_vm_free(vm);
+ return !r;
+}
+
+int main(void)
+{
+ TEST_REQUIRE(kvm_supports_smccc_filter());
+
+ test_pad_must_be_zero();
+ test_invalid_nr_functions();
+ test_overflow_nr_functions();
+ test_reserved_action();
+ test_filter_reserved_range();
+ test_filter_overlap();
+ test_filter_denied();
+ test_filter_fwd_to_user();
+}
CONFIG_KVM_INTEL=y
CONFIG_KVM_AMD=y
CONFIG_USERFAULTFD=y
+CONFIG_IDLE_PAGE_TRACKING=y
uint64_t arg2, uint64_t arg3, uint64_t arg4, uint64_t arg5,
uint64_t arg6, struct arm_smccc_res *res);
+/**
+ * smccc_smc - Invoke a SMCCC function using the smc conduit
+ * @function_id: the SMCCC function to be called
+ * @arg0-arg6: SMCCC function arguments, corresponding to registers x1-x7
+ * @res: pointer to write the return values from registers x0-x3
+ *
+ */
+void smccc_smc(uint32_t function_id, uint64_t arg0, uint64_t arg1,
+ uint64_t arg2, uint64_t arg3, uint64_t arg4, uint64_t arg5,
+ uint64_t arg6, struct arm_smccc_res *res);
+
+
+
uint32_t guest_get_vcpuid(void);
#endif /* SELFTEST_KVM_PROCESSOR_H */
return (gva >> vm->page_shift) & mask;
}
-static uint64_t pte_addr(struct kvm_vm *vm, uint64_t entry)
+static uint64_t addr_pte(struct kvm_vm *vm, uint64_t pa, uint64_t attrs)
{
- uint64_t mask = ((1UL << (vm->va_bits - vm->page_shift)) - 1) << vm->page_shift;
- return entry & mask;
+ uint64_t pte;
+
+ pte = pa & GENMASK(47, vm->page_shift);
+ if (vm->page_shift == 16)
+ pte |= FIELD_GET(GENMASK(51, 48), pa) << 12;
+ pte |= attrs;
+
+ return pte;
+}
+
+static uint64_t pte_addr(struct kvm_vm *vm, uint64_t pte)
+{
+ uint64_t pa;
+
+ pa = pte & GENMASK(47, vm->page_shift);
+ if (vm->page_shift == 16)
+ pa |= FIELD_GET(GENMASK(15, 12), pte) << 48;
+
+ return pa;
}
static uint64_t ptrs_per_pgd(struct kvm_vm *vm)
ptep = addr_gpa2hva(vm, vm->pgd) + pgd_index(vm, vaddr) * 8;
if (!*ptep)
- *ptep = vm_alloc_page_table(vm) | 3;
+ *ptep = addr_pte(vm, vm_alloc_page_table(vm), 3);
switch (vm->pgtable_levels) {
case 4:
ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pud_index(vm, vaddr) * 8;
if (!*ptep)
- *ptep = vm_alloc_page_table(vm) | 3;
+ *ptep = addr_pte(vm, vm_alloc_page_table(vm), 3);
/* fall through */
case 3:
ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pmd_index(vm, vaddr) * 8;
if (!*ptep)
- *ptep = vm_alloc_page_table(vm) | 3;
+ *ptep = addr_pte(vm, vm_alloc_page_table(vm), 3);
/* fall through */
case 2:
ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pte_index(vm, vaddr) * 8;
TEST_FAIL("Page table levels must be 2, 3, or 4");
}
- *ptep = paddr | 3;
- *ptep |= (attr_idx << 2) | (1 << 10) /* Access Flag */;
+ *ptep = addr_pte(vm, paddr, (attr_idx << 2) | (1 << 10) | 3); /* AF */
}
void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
{
struct kvm_vcpu_init default_init = { .target = -1, };
struct kvm_vm *vm = vcpu->vm;
- uint64_t sctlr_el1, tcr_el1;
+ uint64_t sctlr_el1, tcr_el1, ttbr0_el1;
if (!init)
init = &default_init;
TEST_FAIL("Unknown guest mode, mode: 0x%x", vm->mode);
}
+ ttbr0_el1 = vm->pgd & GENMASK(47, vm->page_shift);
+
/* Configure output size */
switch (vm->mode) {
case VM_MODE_P52V48_64K:
tcr_el1 |= 6ul << 32; /* IPS = 52 bits */
+ ttbr0_el1 |= FIELD_GET(GENMASK(51, 48), vm->pgd) << 2;
break;
case VM_MODE_P48V48_4K:
case VM_MODE_P48V48_16K:
vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_SCTLR_EL1), sctlr_el1);
vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_TCR_EL1), tcr_el1);
vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_MAIR_EL1), DEFAULT_MAIR_EL1);
- vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_TTBR0_EL1), vm->pgd);
+ vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_TTBR0_EL1), ttbr0_el1);
vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_TPIDR_EL1), vcpu->id);
}
close(kvm_fd);
}
+#define __smccc_call(insn, function_id, arg0, arg1, arg2, arg3, arg4, arg5, \
+ arg6, res) \
+ asm volatile("mov w0, %w[function_id]\n" \
+ "mov x1, %[arg0]\n" \
+ "mov x2, %[arg1]\n" \
+ "mov x3, %[arg2]\n" \
+ "mov x4, %[arg3]\n" \
+ "mov x5, %[arg4]\n" \
+ "mov x6, %[arg5]\n" \
+ "mov x7, %[arg6]\n" \
+ #insn "#0\n" \
+ "mov %[res0], x0\n" \
+ "mov %[res1], x1\n" \
+ "mov %[res2], x2\n" \
+ "mov %[res3], x3\n" \
+ : [res0] "=r"(res->a0), [res1] "=r"(res->a1), \
+ [res2] "=r"(res->a2), [res3] "=r"(res->a3) \
+ : [function_id] "r"(function_id), [arg0] "r"(arg0), \
+ [arg1] "r"(arg1), [arg2] "r"(arg2), [arg3] "r"(arg3), \
+ [arg4] "r"(arg4), [arg5] "r"(arg5), [arg6] "r"(arg6) \
+ : "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7")
+
+
void smccc_hvc(uint32_t function_id, uint64_t arg0, uint64_t arg1,
uint64_t arg2, uint64_t arg3, uint64_t arg4, uint64_t arg5,
uint64_t arg6, struct arm_smccc_res *res)
{
- asm volatile("mov w0, %w[function_id]\n"
- "mov x1, %[arg0]\n"
- "mov x2, %[arg1]\n"
- "mov x3, %[arg2]\n"
- "mov x4, %[arg3]\n"
- "mov x5, %[arg4]\n"
- "mov x6, %[arg5]\n"
- "mov x7, %[arg6]\n"
- "hvc #0\n"
- "mov %[res0], x0\n"
- "mov %[res1], x1\n"
- "mov %[res2], x2\n"
- "mov %[res3], x3\n"
- : [res0] "=r"(res->a0), [res1] "=r"(res->a1),
- [res2] "=r"(res->a2), [res3] "=r"(res->a3)
- : [function_id] "r"(function_id), [arg0] "r"(arg0),
- [arg1] "r"(arg1), [arg2] "r"(arg2), [arg3] "r"(arg3),
- [arg4] "r"(arg4), [arg5] "r"(arg5), [arg6] "r"(arg6)
- : "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7");
+ __smccc_call(hvc, function_id, arg0, arg1, arg2, arg3, arg4, arg5,
+ arg6, res);
+}
+
+void smccc_smc(uint32_t function_id, uint64_t arg0, uint64_t arg1,
+ uint64_t arg2, uint64_t arg3, uint64_t arg4, uint64_t arg5,
+ uint64_t arg6, struct arm_smccc_res *res)
+{
+ __smccc_call(smc, function_id, arg0, arg1, arg2, arg3, arg4, arg5,
+ arg6, res);
}
void kvm_selftest_arch_init(void)
CLANG_TARGET_FLAGS_arm := arm-linux-gnueabi
CLANG_TARGET_FLAGS_arm64 := aarch64-linux-gnu
CLANG_TARGET_FLAGS_hexagon := hexagon-linux-musl
+CLANG_TARGET_FLAGS_i386 := i386-linux-gnu
CLANG_TARGET_FLAGS_m68k := m68k-linux-gnu
CLANG_TARGET_FLAGS_mips := mipsel-linux-gnu
CLANG_TARGET_FLAGS_powerpc := powerpc64le-linux-gnu
CLANG_TARGET_FLAGS_riscv := riscv64-linux-gnu
CLANG_TARGET_FLAGS_s390 := s390x-linux-gnu
CLANG_TARGET_FLAGS_x86 := x86_64-linux-gnu
+CLANG_TARGET_FLAGS_x86_64 := x86_64-linux-gnu
CLANG_TARGET_FLAGS := $(CLANG_TARGET_FLAGS_$(ARCH))
ifeq ($(CROSS_COMPILE),)
TEST_F(mdwe, mmap_FIXED)
{
- void *p, *p2;
+ void *p;
- p2 = mmap(NULL, self->size, PROT_READ | PROT_EXEC, self->flags, 0, 0);
self->p = mmap(NULL, self->size, PROT_READ, self->flags, 0, 0);
ASSERT_NE(self->p, MAP_FAILED);
# SPDX-License-Identifier: GPL-2.0-only
bind_bhash
bind_timewait
+bind_wildcard
csum
cmsg_sender
diag_uid
TEST_GEN_FILES += csum
TEST_GEN_FILES += nat6to4.o
TEST_GEN_FILES += ip_local_port_range
+TEST_GEN_FILES += bind_wildcard
TEST_FILES := settings
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright Amazon.com Inc. or its affiliates. */
+
+#include <sys/socket.h>
+#include <netinet/in.h>
+
+#include "../kselftest_harness.h"
+
+FIXTURE(bind_wildcard)
+{
+ struct sockaddr_in addr4;
+ struct sockaddr_in6 addr6;
+ int expected_errno;
+};
+
+FIXTURE_VARIANT(bind_wildcard)
+{
+ const __u32 addr4_const;
+ const struct in6_addr *addr6_const;
+};
+
+FIXTURE_VARIANT_ADD(bind_wildcard, v4_any_v6_any)
+{
+ .addr4_const = INADDR_ANY,
+ .addr6_const = &in6addr_any,
+};
+
+FIXTURE_VARIANT_ADD(bind_wildcard, v4_any_v6_local)
+{
+ .addr4_const = INADDR_ANY,
+ .addr6_const = &in6addr_loopback,
+};
+
+FIXTURE_VARIANT_ADD(bind_wildcard, v4_local_v6_any)
+{
+ .addr4_const = INADDR_LOOPBACK,
+ .addr6_const = &in6addr_any,
+};
+
+FIXTURE_VARIANT_ADD(bind_wildcard, v4_local_v6_local)
+{
+ .addr4_const = INADDR_LOOPBACK,
+ .addr6_const = &in6addr_loopback,
+};
+
+FIXTURE_SETUP(bind_wildcard)
+{
+ self->addr4.sin_family = AF_INET;
+ self->addr4.sin_port = htons(0);
+ self->addr4.sin_addr.s_addr = htonl(variant->addr4_const);
+
+ self->addr6.sin6_family = AF_INET6;
+ self->addr6.sin6_port = htons(0);
+ self->addr6.sin6_addr = *variant->addr6_const;
+
+ if (variant->addr6_const == &in6addr_any)
+ self->expected_errno = EADDRINUSE;
+ else
+ self->expected_errno = 0;
+}
+
+FIXTURE_TEARDOWN(bind_wildcard)
+{
+}
+
+void bind_sockets(struct __test_metadata *_metadata,
+ FIXTURE_DATA(bind_wildcard) *self,
+ struct sockaddr *addr1, socklen_t addrlen1,
+ struct sockaddr *addr2, socklen_t addrlen2)
+{
+ int fd[2];
+ int ret;
+
+ fd[0] = socket(addr1->sa_family, SOCK_STREAM, 0);
+ ASSERT_GT(fd[0], 0);
+
+ ret = bind(fd[0], addr1, addrlen1);
+ ASSERT_EQ(ret, 0);
+
+ ret = getsockname(fd[0], addr1, &addrlen1);
+ ASSERT_EQ(ret, 0);
+
+ ((struct sockaddr_in *)addr2)->sin_port = ((struct sockaddr_in *)addr1)->sin_port;
+
+ fd[1] = socket(addr2->sa_family, SOCK_STREAM, 0);
+ ASSERT_GT(fd[1], 0);
+
+ ret = bind(fd[1], addr2, addrlen2);
+ if (self->expected_errno) {
+ ASSERT_EQ(ret, -1);
+ ASSERT_EQ(errno, self->expected_errno);
+ } else {
+ ASSERT_EQ(ret, 0);
+ }
+
+ close(fd[1]);
+ close(fd[0]);
+}
+
+TEST_F(bind_wildcard, v4_v6)
+{
+ bind_sockets(_metadata, self,
+ (struct sockaddr *)&self->addr4, sizeof(self->addr6),
+ (struct sockaddr *)&self->addr6, sizeof(self->addr6));
+}
+
+TEST_F(bind_wildcard, v6_v4)
+{
+ bind_sockets(_metadata, self,
+ (struct sockaddr *)&self->addr6, sizeof(self->addr6),
+ (struct sockaddr *)&self->addr4, sizeof(self->addr4));
+}
+
+TEST_HARNESS_MAIN
assert stderr == ""
ports = json.loads(stdout)['port']
+ validate_devlink_output(ports, 'flavour')
+
for port in ports:
if dev in port:
if ports[port]['flavour'] == 'physical':
unsplit(port.bus_info)
+def validate_devlink_output(devlink_data, target_property=None):
+ """
+ Determine if test should be skipped by checking:
+ 1. devlink_data contains values
+ 2. The target_property exist in devlink_data
+ """
+ skip_reason = None
+ if any(devlink_data.values()):
+ if target_property:
+ skip_reason = "{} not found in devlink output, test skipped".format(target_property)
+ for key in devlink_data:
+ if target_property in devlink_data[key]:
+ skip_reason = None
+ else:
+ skip_reason = 'devlink output is empty, test skipped'
+
+ if skip_reason:
+ print(skip_reason)
+ sys.exit(KSFT_SKIP)
+
+
def make_parser():
parser = argparse.ArgumentParser(description='A test for port splitting.')
parser.add_argument('--dev',
stdout, stderr = run_command(cmd)
assert stderr == ""
+ validate_devlink_output(json.loads(stdout))
devs = json.loads(stdout)['dev']
- if devs:
- dev = list(devs.keys())[0]
- else:
- print("no devlink device was found, test skipped")
- sys.exit(KSFT_SKIP)
+ dev = list(devs.keys())[0]
cmd = "devlink dev show %s" % dev
stdout, stderr = run_command(cmd)
ports = devlink_ports(dev)
+ found_max_lanes = False
for port in ports.if_names:
max_lanes = get_max_lanes(port.name)
split_splittable_port(port, lane, max_lanes, dev)
lane //= 2
+ found_max_lanes = True
+
+ if not found_max_lanes:
+ print(f"Test not started, no port of device {dev} reports max_lanes")
+ sys.exit(KSFT_SKIP)
if __name__ == "__main__":
fi
stdbuf -o0 -e0 printf "\tExpected value for '%s': '%s', got '%s'.\n" \
- "${var}" "${!var}" "${!exp}"
+ "${var}" "${!exp}" "${!var}"
return 1
}
#include <sys/auxv.h>
#include <sys/mman.h>
#include <sys/shm.h>
+#include <sys/ptrace.h>
#include <sys/syscall.h>
#include <sys/wait.h>
+#include <sys/uio.h>
#include "../kselftest.h" /* For __cpuid_count() */
_exit(0);
}
+static inline int __compare_tiledata_state(struct xsave_buffer *xbuf1, struct xsave_buffer *xbuf2)
+{
+ return memcmp(&xbuf1->bytes[xtiledata.xbuf_offset],
+ &xbuf2->bytes[xtiledata.xbuf_offset],
+ xtiledata.size);
+}
+
/*
* Save current register state and compare it to @xbuf1.'
*
fatal_error("failed to allocate XSAVE buffer\n");
xsave(xbuf2, XFEATURE_MASK_XTILEDATA);
- ret = memcmp(&xbuf1->bytes[xtiledata.xbuf_offset],
- &xbuf2->bytes[xtiledata.xbuf_offset],
- xtiledata.size);
+ ret = __compare_tiledata_state(xbuf1, xbuf2);
free(xbuf2);
free(finfo);
}
+/* Ptrace test */
+
+/*
+ * Make sure the ptracee has the expanded kernel buffer on the first
+ * use. Then, initialize the state before performing the state
+ * injection from the ptracer.
+ */
+static inline void ptracee_firstuse_tiledata(void)
+{
+ load_rand_tiledata(stashed_xsave);
+ init_xtiledata();
+}
+
+/*
+ * Ptracer injects the randomized tile data state. It also reads
+ * before and after that, which will execute the kernel's state copy
+ * functions. So, the tester is advised to double-check any emitted
+ * kernel messages.
+ */
+static void ptracer_inject_tiledata(pid_t target)
+{
+ struct xsave_buffer *xbuf;
+ struct iovec iov;
+
+ xbuf = alloc_xbuf();
+ if (!xbuf)
+ fatal_error("unable to allocate XSAVE buffer");
+
+ printf("\tRead the init'ed tiledata via ptrace().\n");
+
+ iov.iov_base = xbuf;
+ iov.iov_len = xbuf_size;
+
+ memset(stashed_xsave, 0, xbuf_size);
+
+ if (ptrace(PTRACE_GETREGSET, target, (uint32_t)NT_X86_XSTATE, &iov))
+ fatal_error("PTRACE_GETREGSET");
+
+ if (!__compare_tiledata_state(stashed_xsave, xbuf))
+ printf("[OK]\tThe init'ed tiledata was read from ptracee.\n");
+ else
+ printf("[FAIL]\tThe init'ed tiledata was not read from ptracee.\n");
+
+ printf("\tInject tiledata via ptrace().\n");
+
+ load_rand_tiledata(xbuf);
+
+ memcpy(&stashed_xsave->bytes[xtiledata.xbuf_offset],
+ &xbuf->bytes[xtiledata.xbuf_offset],
+ xtiledata.size);
+
+ if (ptrace(PTRACE_SETREGSET, target, (uint32_t)NT_X86_XSTATE, &iov))
+ fatal_error("PTRACE_SETREGSET");
+
+ if (ptrace(PTRACE_GETREGSET, target, (uint32_t)NT_X86_XSTATE, &iov))
+ fatal_error("PTRACE_GETREGSET");
+
+ if (!__compare_tiledata_state(stashed_xsave, xbuf))
+ printf("[OK]\tTiledata was correctly written to ptracee.\n");
+ else
+ printf("[FAIL]\tTiledata was not correctly written to ptracee.\n");
+}
+
+static void test_ptrace(void)
+{
+ pid_t child;
+ int status;
+
+ child = fork();
+ if (child < 0) {
+ err(1, "fork");
+ } else if (!child) {
+ if (ptrace(PTRACE_TRACEME, 0, NULL, NULL))
+ err(1, "PTRACE_TRACEME");
+
+ ptracee_firstuse_tiledata();
+
+ raise(SIGTRAP);
+ _exit(0);
+ }
+
+ do {
+ wait(&status);
+ } while (WSTOPSIG(status) != SIGTRAP);
+
+ ptracer_inject_tiledata(child);
+
+ ptrace(PTRACE_DETACH, child, NULL, NULL);
+ wait(&status);
+ if (!WIFEXITED(status) || WEXITSTATUS(status))
+ err(1, "ptrace test");
+}
+
int main(void)
{
/* Check hardware availability at first */
ctxtswtest_config.num_threads = 5;
test_context_switch();
+ test_ptrace();
+
clearhandler(SIGILL);
free_stashed_xsave();
close(fd);
}
+#define INV_BUF_TEST_DATA_LEN 512
+
+static void test_inv_buf_client(const struct test_opts *opts, bool stream)
+{
+ unsigned char data[INV_BUF_TEST_DATA_LEN] = {0};
+ ssize_t ret;
+ int fd;
+
+ if (stream)
+ fd = vsock_stream_connect(opts->peer_cid, 1234);
+ else
+ fd = vsock_seqpacket_connect(opts->peer_cid, 1234);
+
+ if (fd < 0) {
+ perror("connect");
+ exit(EXIT_FAILURE);
+ }
+
+ control_expectln("SENDDONE");
+
+ /* Use invalid buffer here. */
+ ret = recv(fd, NULL, sizeof(data), 0);
+ if (ret != -1) {
+ fprintf(stderr, "expected recv(2) failure, got %zi\n", ret);
+ exit(EXIT_FAILURE);
+ }
+
+ if (errno != ENOMEM) {
+ fprintf(stderr, "unexpected recv(2) errno %d\n", errno);
+ exit(EXIT_FAILURE);
+ }
+
+ ret = recv(fd, data, sizeof(data), MSG_DONTWAIT);
+
+ if (stream) {
+ /* For SOCK_STREAM we must continue reading. */
+ if (ret != sizeof(data)) {
+ fprintf(stderr, "expected recv(2) success, got %zi\n", ret);
+ exit(EXIT_FAILURE);
+ }
+ /* Don't check errno in case of success. */
+ } else {
+ /* For SOCK_SEQPACKET socket's queue must be empty. */
+ if (ret != -1) {
+ fprintf(stderr, "expected recv(2) failure, got %zi\n", ret);
+ exit(EXIT_FAILURE);
+ }
+
+ if (errno != EAGAIN) {
+ fprintf(stderr, "unexpected recv(2) errno %d\n", errno);
+ exit(EXIT_FAILURE);
+ }
+ }
+
+ control_writeln("DONE");
+
+ close(fd);
+}
+
+static void test_inv_buf_server(const struct test_opts *opts, bool stream)
+{
+ unsigned char data[INV_BUF_TEST_DATA_LEN] = {0};
+ ssize_t res;
+ int fd;
+
+ if (stream)
+ fd = vsock_stream_accept(VMADDR_CID_ANY, 1234, NULL);
+ else
+ fd = vsock_seqpacket_accept(VMADDR_CID_ANY, 1234, NULL);
+
+ if (fd < 0) {
+ perror("accept");
+ exit(EXIT_FAILURE);
+ }
+
+ res = send(fd, data, sizeof(data), 0);
+ if (res != sizeof(data)) {
+ fprintf(stderr, "unexpected send(2) result %zi\n", res);
+ exit(EXIT_FAILURE);
+ }
+
+ control_writeln("SENDDONE");
+
+ control_expectln("DONE");
+
+ close(fd);
+}
+
+static void test_stream_inv_buf_client(const struct test_opts *opts)
+{
+ test_inv_buf_client(opts, true);
+}
+
+static void test_stream_inv_buf_server(const struct test_opts *opts)
+{
+ test_inv_buf_server(opts, true);
+}
+
+static void test_seqpacket_inv_buf_client(const struct test_opts *opts)
+{
+ test_inv_buf_client(opts, false);
+}
+
+static void test_seqpacket_inv_buf_server(const struct test_opts *opts)
+{
+ test_inv_buf_server(opts, false);
+}
+
static struct test_case test_cases[] = {
{
.name = "SOCK_STREAM connection reset",
.run_client = test_seqpacket_bigmsg_client,
.run_server = test_seqpacket_bigmsg_server,
},
+ {
+ .name = "SOCK_STREAM test invalid buffer",
+ .run_client = test_stream_inv_buf_client,
+ .run_server = test_stream_inv_buf_server,
+ },
+ {
+ .name = "SOCK_SEQPACKET test invalid buffer",
+ .run_client = test_seqpacket_inv_buf_client,
+ .run_server = test_seqpacket_inv_buf_server,
+ },
{},
};
*.d
virtio_test
vringh_test
+virtio-trace/trace-agent
* At this point, pending calls to invalidate_range_start()
* have completed but no more MMU notifiers will run, so
* mn_active_invalidate_count may remain unbalanced.
- * No threads can be waiting in install_new_memslots as the
+ * No threads can be waiting in kvm_swap_active_memslots() as the
* last reference on KVM has been dropped, but freeing
* memslots would deadlock without this manual intervention.
*/
kvm_arch_flush_shadow_memslot(kvm, old);
kvm_arch_guest_memory_reclaimed(kvm);
- /* Was released by kvm_swap_active_memslots, reacquire. */
+ /* Was released by kvm_swap_active_memslots(), reacquire. */
mutex_lock(&kvm->slots_arch_lock);
/*
* Copy the arch-specific field of the newly-installed slot back to the
* old slot as the arch data could have changed between releasing
- * slots_arch_lock in install_new_memslots() and re-acquiring the lock
+ * slots_arch_lock in kvm_swap_active_memslots() and re-acquiring the lock
* above. Writers are required to retrieve memslots *after* acquiring
* slots_arch_lock, thus the active slot's data is guaranteed to be fresh.
*/
int r;
/*
- * Released in kvm_swap_active_memslots.
+ * Released in kvm_swap_active_memslots().
*
- * Must be held from before the current memslots are copied until
- * after the new memslots are installed with rcu_assign_pointer,
- * then released before the synchronize srcu in kvm_swap_active_memslots.
+ * Must be held from before the current memslots are copied until after
+ * the new memslots are installed with rcu_assign_pointer, then
+ * released before the synchronize srcu in kvm_swap_active_memslots().
*
* When modifying memslots outside of the slots_lock, must be held
* before reading the pointer to the current memslots until after all
#ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS
static int vcpu_get_pid(void *data, u64 *val)
{
- struct kvm_vcpu *vcpu = (struct kvm_vcpu *) data;
+ struct kvm_vcpu *vcpu = data;
*val = pid_nr(rcu_access_pointer(vcpu->pid));
return 0;
}
const char *fmt)
{
int ret;
- struct kvm_stat_data *stat_data = (struct kvm_stat_data *)
- inode->i_private;
+ struct kvm_stat_data *stat_data = inode->i_private;
/*
* The debugfs files are a reference to the kvm struct which
static int kvm_debugfs_release(struct inode *inode, struct file *file)
{
- struct kvm_stat_data *stat_data = (struct kvm_stat_data *)
- inode->i_private;
+ struct kvm_stat_data *stat_data = inode->i_private;
simple_attr_release(inode, file);
kvm_put_kvm(stat_data->kvm);
static int kvm_stat_data_get(void *data, u64 *val)
{
int r = -EFAULT;
- struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data;
+ struct kvm_stat_data *stat_data = data;
switch (stat_data->kind) {
case KVM_STAT_VM:
static int kvm_stat_data_clear(void *data, u64 val)
{
int r = -EFAULT;
- struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data;
+ struct kvm_stat_data *stat_data = data;
if (val)
return -EINVAL;