memory.oom_control set/show oom controls.
memory.numa_stat show the number of memory usage per numa
node
+ memory.kmem.limit_in_bytes Deprecated knob to set and read the kernel
+ memory hard limit. Kernel hard limit is not
+ supported since 5.16. Writing any value to
+ do file will not have any effect same as if
+ nokmem kernel parameter was specified.
+ Kernel memory is still charged and reported
+ by memory.kmem.usage_in_bytes.
memory.kmem.usage_in_bytes show current kernel memory allocation
memory.kmem.failcnt show the number of kernel memory usage
hits limits
+------------------------------+---------+---------+
| Name | bits | visible |
+------------------------------+---------+---------+
+ | SME | [27-24] | y |
+ +------------------------------+---------+---------+
| MTE | [11-8] | y |
+------------------------------+---------+---------+
| SSBS | [7-4] | y |
+------------------------------+---------+---------+
| Name | bits | visible |
+------------------------------+---------+---------+
+ | CSSC | [55-52] | y |
+ +------------------------------+---------+---------+
+ | RPRFM | [51-48] | y |
+ +------------------------------+---------+---------+
+ | BC | [23-20] | y |
+ +------------------------------+---------+---------+
| MOPS | [19-16] | y |
+------------------------------+---------+---------+
+ | APA3 | [15-12] | y |
+ +------------------------------+---------+---------+
+ | GPA3 | [11-8] | y |
+ +------------------------------+---------+---------+
| RPRES | [7-4] | y |
+------------------------------+---------+---------+
| WFXT | [3-0] | y |
HWCAP2_MOPS
Functionality implied by ID_AA64ISAR2_EL1.MOPS == 0b0001.
+HWCAP2_HBC
+ Functionality implied by ID_AA64ISAR2_EL1.BC == 0b0001.
+
4. Unused AT_HWCAP bits
-----------------------
Documentation of LoongArch ELF psABI:
- https://github.com/loongson/LoongArch-Documentation/releases/latest/download/LoongArch-ELF-ABI-v2.00-CN.pdf (in Chinese)
+ https://github.com/loongson/LoongArch-Documentation/releases/latest/download/LoongArch-ELF-ABI-v2.01-CN.pdf (in Chinese)
- https://github.com/loongson/LoongArch-Documentation/releases/latest/download/LoongArch-ELF-ABI-v2.00-EN.pdf (in English)
+ https://github.com/loongson/LoongArch-Documentation/releases/latest/download/LoongArch-ELF-ABI-v2.01-EN.pdf (in English)
Linux kernel repository of Loongson and LoongArch:
ID number 0 and the slave drive will have ID number 1. The PATA port
nodes will be named "ide-port".
type: object
+ additionalProperties: false
properties:
reg:
maxItems: 1
'#clock-cells':
+ description:
+ The index in the assigned-clocks is mapped to the output clock as below
+ 0 - REF, 1 - SE1, 2 - SE2, 3 - SE3, 4 - DIFF1, 5 - DIFF2.
const: 1
clocks:
reg = <0x68>;
#clock-cells = <1>;
- clocks = <&x1_x2>;
+ clocks = <&x1>;
renesas,settings = [
80 00 11 19 4c 02 23 7f 83 19 08 a9 5f 25 24 bf
assigned-clocks = <&versa3 0>, <&versa3 1>,
<&versa3 2>, <&versa3 3>,
<&versa3 4>, <&versa3 5>;
- assigned-clock-rates = <12288000>, <25000000>,
- <12000000>, <11289600>,
- <11289600>, <24000000>;
+ assigned-clock-rates = <24000000>, <11289600>,
+ <11289600>, <12000000>,
+ <25000000>, <12288000>;
};
};
maintainers:
- Shawn Guo <shawnguo@kernel.org>
+allOf:
+ - $ref: /schemas/i2c/i2c-controller.yaml#
+
properties:
compatible:
enum:
- dmas
- dma-names
-additionalProperties: false
+unevaluatedProperties: false
examples:
- |
properties:
compatible:
- items:
- - enum:
- - loongson,ls2k0500-pmc
- - loongson,ls2k1000-pmc
- - const: syscon
+ oneOf:
+ - items:
+ - const: loongson,ls2k0500-pmc
+ - const: syscon
+ - items:
+ - enum:
+ - loongson,ls2k1000-pmc
+ - loongson,ls2k2000-pmc
+ - const: loongson,ls2k0500-pmc
+ - const: syscon
reg:
maxItems: 1
addition, the PM need according to it to indicate that current
SoC whether support Suspend To RAM.
+ syscon-poweroff:
+ $ref: /schemas/power/reset/syscon-poweroff.yaml#
+ type: object
+ description:
+ Node for power off method
+
+ syscon-reboot:
+ $ref: /schemas/power/reset/syscon-reboot.yaml#
+ type: object
+ description:
+ Node for reboot method
+
required:
- compatible
- reg
#include <dt-bindings/interrupt-controller/irq.h>
power-management@1fe27000 {
- compatible = "loongson,ls2k1000-pmc", "syscon";
+ compatible = "loongson,ls2k1000-pmc", "loongson,ls2k0500-pmc", "syscon";
reg = <0x1fe27000 0x58>;
interrupt-parent = <&liointc1>;
interrupts = <11 IRQ_TYPE_LEVEL_LOW>;
loongson,suspend-address = <0x0 0x1c000500>;
+
+ syscon-reboot {
+ compatible = "syscon-reboot";
+ offset = <0x30>;
+ mask = <0x1>;
+ };
+
+ syscon-poweroff {
+ compatible = "syscon-poweroff";
+ regmap = <&pmc>;
+ offset = <0x14>;
+ mask = <0x3c00>;
+ value = <0x3c00>;
+ };
};
- const: fsl,imx51-ecspi
- const: fsl,imx53-ecspi
- items:
+ - enum:
+ - fsl,imx25-cspi
+ - fsl,imx50-cspi
+ - fsl,imx51-cspi
+ - fsl,imx53-cspi
+ - const: fsl,imx35-cspi
+ - items:
- const: fsl,imx8mp-ecspi
- const: fsl,imx6ul-ecspi
- items:
https://btrfs.readthedocs.io
- https://btrfs.wiki.kernel.org
that maintains information about administration tasks, frequently asked
questions, use cases, mount options, comprehensible changelogs, features,
depend on the mmap_lock being held, but out of tree users should verify
for themselves. If they do need it, they can return VM_FAULT_RETRY to
be called with the mmap_lock held.
+
+---
+
+**mandatory**
+
+The order of opening block devices and matching or creating superblocks has
+changed.
+
+The old logic opened block devices first and then tried to find a
+suitable superblock to reuse based on the block device pointer.
+
+The new logic tries to find a suitable superblock first based on the device
+number, and opening the block device afterwards.
+
+Since opening block devices cannot happen under s_umount because of lock
+ordering requirements s_umount is now dropped while opening block devices and
+reacquired before calling fill_super().
+
+In the old logic concurrent mounters would find the superblock on the list of
+superblocks for the filesystem type. Since the first opener of the block device
+would hold s_umount they would wait until the superblock became either born or
+was discarded due to initialization failure.
+
+Since the new logic drops s_umount concurrent mounters could grab s_umount and
+would spin. Instead they are now made to wait using an explicit wait-wake
+mechanism without having to hold s_umount.
+
+---
+
+**mandatory**
+
+The holder of a block device is now the superblock.
+
+The holder of a block device used to be the file_system_type which wasn't
+particularly useful. It wasn't possible to go from block device to owning
+superblock without matching on the device pointer stored in the superblock.
+This mechanism would only work for a single device so the block layer couldn't
+find the owning superblock of any additional devices.
+
+In the old mechanism reusing or creating a superblock for a racing mount(2) and
+umount(2) relied on the file_system_type as the holder. This was severly
+underdocumented however:
+
+(1) Any concurrent mounter that managed to grab an active reference on an
+ existing superblock was made to wait until the superblock either became
+ ready or until the superblock was removed from the list of superblocks of
+ the filesystem type. If the superblock is ready the caller would simple
+ reuse it.
+
+(2) If the mounter came after deactivate_locked_super() but before
+ the superblock had been removed from the list of superblocks of the
+ filesystem type the mounter would wait until the superblock was shutdown,
+ reuse the block device and allocate a new superblock.
+
+(3) If the mounter came after deactivate_locked_super() and after
+ the superblock had been removed from the list of superblocks of the
+ filesystem type the mounter would reuse the block device and allocate a new
+ superblock (the bd_holder point may still be set to the filesystem type).
+
+Because the holder of the block device was the file_system_type any concurrent
+mounter could open the block devices of any superblock of the same
+file_system_type without risking seeing EBUSY because the block device was
+still in use by another superblock.
+
+Making the superblock the owner of the block device changes this as the holder
+is now a unique superblock and thus block devices associated with it cannot be
+reused by concurrent mounters. So a concurrent mounter in (2) could suddenly
+see EBUSY when trying to open a block device whose holder was a different
+superblock.
+
+The new logic thus waits until the superblock and the devices are shutdown in
+->kill_sb(). Removal of the superblock from the list of superblocks of the
+filesystem type is now moved to a later point when the devices are closed:
+
+(1) Any concurrent mounter managing to grab an active reference on an existing
+ superblock is made to wait until the superblock is either ready or until
+ the superblock and all devices are shutdown in ->kill_sb(). If the
+ superblock is ready the caller will simply reuse it.
+
+(2) If the mounter comes after deactivate_locked_super() but before
+ the superblock has been removed from the list of superblocks of the
+ filesystem type the mounter is made to wait until the superblock and the
+ devices are shut down in ->kill_sb() and the superblock is removed from the
+ list of superblocks of the filesystem type. The mounter will allocate a new
+ superblock and grab ownership of the block device (the bd_holder pointer of
+ the block device will be set to the newly allocated superblock).
+
+(3) This case is now collapsed into (2) as the superblock is left on the list
+ of superblocks of the filesystem type until all devices are shutdown in
+ ->kill_sb(). In other words, if the superblock isn't on the list of
+ superblock of the filesystem type anymore then it has given up ownership of
+ all associated block devices (the bd_holder pointer is NULL).
+
+As this is a VFS level change it has no practical consequences for filesystems
+other than that all of them must use one of the provided kill_litter_super(),
+kill_anon_super(), or kill_block_super() helpers.
bool "Support for foo hardware"
depends on ARCH_FOO_VENDOR || COMPILE_TEST
+Optional dependencies
+~~~~~~~~~~~~~~~~~~~~~
+
+Some drivers are able to optionally use a feature from another module
+or build cleanly with that module disabled, but cause a link failure
+when trying to use that loadable module from a built-in driver.
+
+The most common way to express this optional dependency in Kconfig logic
+uses the slightly counterintuitive::
+
+ config FOO
+ tristate "Support for foo hardware"
+ depends on BAR || !BAR
+
+This means that there is either a dependency on BAR that disallows
+the combination of FOO=y with BAR=m, or BAR is completely disabled.
+For a more formalized approach if there are multiple drivers that have
+the same dependency, a helper symbol can be used, like::
+
+ config FOO
+ tristate "Support for foo hardware"
+ depends on BAR_OPTIONAL
+
+ config BAR_OPTIONAL
+ def_tristate BAR || !BAR
+
Kconfig recursive dependency limitations
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
To use the amateur radio protocols within Linux you will need to get a
suitable copy of the AX.25 Utilities. More detailed information about
AX.25, NET/ROM and ROSE, associated programs and utilities can be
-found on http://www.linux-ax25.org.
+found on https://linux-ax25.in-berlin.de.
-There is an active mailing list for discussing Linux amateur radio matters
+There is a mailing list for discussing Linux amateur radio matters
called linux-hams@vger.kernel.org. To subscribe to it, send a message to
majordomo@vger.kernel.org with the words "subscribe linux-hams" in the body
of the message, the subject field is ignored. You don't need to be
IBM Z Christian Borntraeger <borntraeger@de.ibm.com>
Intel Tony Luck <tony.luck@intel.com>
Qualcomm Trilok Soni <tsoni@codeaurora.org>
+ RISC-V Palmer Dabbelt <palmer@dabbelt.com>
Samsung Javier González <javier.gonz@samsung.com>
Microsoft James Morris <jamorris@linux.microsoft.com>
doesn't respond to the new UMP inquiries, the driver falls back and
builds the topology based on Group Terminal Block (GTB) information
from the USB descriptor. Some device might be screwed up by the
-unexpected UMP command; in such a case, pass `midi2_probe=0` option to
-snd-usb-audio driver for skipping the UMP v1.1 inquiries.
+unexpected UMP command; in such a case, pass `midi2_ump_probe=0`
+option to snd-usb-audio driver for skipping the UMP v1.1 inquiries.
When the MIDI 2.0 device is probed, the kernel creates a rawmidi
device for each UMP Endpoint of the device. Its device name is
LoongArch的ELF psABI文档:
- https://github.com/loongson/LoongArch-Documentation/releases/latest/download/LoongArch-ELF-ABI-v2.00-CN.pdf (中文版)
+ https://github.com/loongson/LoongArch-Documentation/releases/latest/download/LoongArch-ELF-ABI-v2.01-CN.pdf (中文版)
- https://github.com/loongson/LoongArch-Documentation/releases/latest/download/LoongArch-ELF-ABI-v2.00-EN.pdf (英文版)
+ https://github.com/loongson/LoongArch-Documentation/releases/latest/download/LoongArch-ELF-ABI-v2.01-EN.pdf (英文版)
Loongson与LoongArch的Linux内核源码仓库:
F: arch/arm*/kernel/hw_breakpoint.c
F: arch/arm*/kernel/perf_*
F: drivers/perf/
-F: include/linux/perf/arm_pmu.h
+F: include/linux/perf/arm_pmu*.h
ARM PORT
M: Russell King <linux@armlinux.org.uk>
F: arch/arm/boot/dts/amlogic/
F: arch/arm/mach-meson/
F: arch/arm64/boot/dts/amlogic/
-F: drivers/genpd/amlogic/
+F: drivers/pmdomain/amlogic/
F: drivers/mmc/host/meson*
F: drivers/phy/amlogic/
F: drivers/pinctrl/meson/
F: drivers/clk/clk-apple-nco.c
F: drivers/cpufreq/apple-soc-cpufreq.c
F: drivers/dma/apple-admac.c
-F: drivers/genpd/apple/
+F: drivers/pmdomain/apple/
F: drivers/i2c/busses/i2c-pasemi-core.c
F: drivers/i2c/busses/i2c-pasemi-platform.c
F: drivers/iommu/apple-dart.c
ARM/ASPEED MACHINE SUPPORT
M: Joel Stanley <joel@jms.id.au>
-R: Andrew Jeffery <andrew@aj.id.au>
+R: Andrew Jeffery <andrew@codeconstruct.com.au>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: linux-aspeed@lists.ozlabs.org (moderated for non-subscribers)
S: Supported
Q: https://patchwork.ozlabs.org/project/linux-aspeed/list/
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/joel/aspeed.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/joel/bmc.git
F: Documentation/devicetree/bindings/arm/aspeed/
F: arch/arm/boot/dts/aspeed/
F: arch/arm/mach-aspeed/
F: drivers/clk/clk-nomadik.c
F: drivers/clocksource/clksrc-dbx500-prcmu.c
F: drivers/dma/ste_dma40*
-F: drivers/genpd/st/ste-ux500-pm-domain.c
+F: drivers/pmdomain/st/ste-ux500-pm-domain.c
F: drivers/hwspinlock/u8500_hsem.c
F: drivers/i2c/busses/i2c-nomadik.c
F: drivers/iio/adc/ab8500-gpadc.c
F: arch/arm/mach-shmobile/
F: arch/arm64/boot/dts/renesas/
F: arch/riscv/boot/dts/renesas/
-F: drivers/genpd/renesas/
+F: drivers/pmdomain/renesas/
F: drivers/soc/renesas/
F: include/linux/soc/renesas/
K: \brenesas,
F: drivers/peci/controller/peci-aspeed.c
ASPEED PINCTRL DRIVERS
-M: Andrew Jeffery <andrew@aj.id.au>
+M: Andrew Jeffery <andrew@codeconstruct.com.au>
L: linux-aspeed@lists.ozlabs.org (moderated for non-subscribers)
L: openbmc@lists.ozlabs.org (moderated for non-subscribers)
L: linux-gpio@vger.kernel.org
F: include/dt-bindings/interrupt-controller/aspeed-scu-ic.h
ASPEED SD/MMC DRIVER
-M: Andrew Jeffery <andrew@aj.id.au>
+M: Andrew Jeffery <andrew@codeconstruct.com.au>
L: linux-aspeed@lists.ozlabs.org (moderated for non-subscribers)
L: openbmc@lists.ozlabs.org (moderated for non-subscribers)
L: linux-mmc@vger.kernel.org
M: Ralf Baechle <ralf@linux-mips.org>
L: linux-hams@vger.kernel.org
S: Maintained
-W: http://www.linux-ax25.org/
+W: https://linux-ax25.in-berlin.de
F: include/net/ax25.h
F: include/uapi/linux/ax25.h
F: net/ax25/
F: drivers/irqchip/irq-bcm63*
F: drivers/irqchip/irq-bcm7*
F: drivers/irqchip/irq-brcmstb*
-F: drivers/genpd/bcm/bcm63xx-power.c
+F: drivers/pmdomain/bcm/bcm63xx-power.c
F: include/linux/bcm963xx_nvram.h
F: include/linux/bcm963xx_tag.h
BROADCOM BRCMSTB GPIO DRIVER
M: Doug Berger <opendmb@gmail.com>
-M: Florian Fainelli <florian.fainelli@broadcom>
+M: Florian Fainelli <florian.fainelli@broadcom.com>
R: Broadcom internal kernel review list <bcm-kernel-feedback-list@broadcom.com>
S: Supported
F: Documentation/devicetree/bindings/gpio/brcm,brcmstb-gpio.yaml
L: linux-pm@vger.kernel.org
S: Maintained
T: git https://github.com/broadcom/stblinux.git
-F: drivers/genpd/bcm/bcm-pmb.c
+F: drivers/pmdomain/bcm/bcm-pmb.c
F: include/dt-bindings/soc/bcm-pmb.h
BROADCOM SPECIFIC AMBA DRIVER (BCMA)
L: linux-btrfs@vger.kernel.org
S: Maintained
W: https://btrfs.readthedocs.io
-W: https://btrfs.wiki.kernel.org/
Q: https://patchwork.kernel.org/project/linux-btrfs/list/
C: irc://irc.libera.chat/btrfs
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux.git
F: drivers/gpu/drm/panel/panel-novatek-nt36672a.c
DRM DRIVER FOR NVIDIA GEFORCE/QUADRO GPUS
-M: Ben Skeggs <bskeggs@redhat.com>
M: Karol Herbst <kherbst@redhat.com>
M: Lyude Paul <lyude@redhat.com>
+M: Danilo Krummrich <dakr@redhat.com>
L: dri-devel@lists.freedesktop.org
L: nouveau@lists.freedesktop.org
S: Supported
L: linux-pm@vger.kernel.org
S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/linux-pm.git
-F: drivers/genpd/
+F: drivers/pmdomain/
GENERIC RESISTIVE TOUCHSCREEN ADC DRIVER
M: Eugen Hristev <eugen.hristev@microchip.com>
F: tools/testing/selftests/gpio/
GPIO REGMAP
-R: Michael Walle <michael@walle.cc>
+M: Michael Walle <michael@walle.cc>
S: Maintained
F: drivers/gpio/gpio-regmap.c
F: include/linux/gpio/regmap.h
MELLANOX HARDWARE PLATFORM SUPPORT
M: Hans de Goede <hdegoede@redhat.com>
+M: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
M: Mark Gross <markgross@kernel.org>
M: Vadim Pasternak <vadimp@nvidia.com>
L: platform-driver-x86@vger.kernel.org
MICROSOFT SURFACE HARDWARE PLATFORM SUPPORT
M: Hans de Goede <hdegoede@redhat.com>
+M: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
M: Mark Gross <markgross@kernel.org>
M: Maximilian Luz <luzmaximilian@gmail.com>
L: platform-driver-x86@vger.kernel.org
M: Ralf Baechle <ralf@linux-mips.org>
L: linux-hams@vger.kernel.org
S: Maintained
-W: http://www.linux-ax25.org/
+W: https://linux-ax25.in-berlin.de
F: include/net/netrom.h
F: include/uapi/linux/netrom.h
F: net/netrom/
L: linux-arm-msm@vger.kernel.org
S: Maintained
F: Documentation/devicetree/bindings/power/avs/qcom,cpr.yaml
-F: drivers/genpd/qcom/cpr.c
+F: drivers/pmdomain/qcom/cpr.c
QUALCOMM CPUFREQ DRIVER MSM8996/APQ8096
M: Ilia Lin <ilia.lin@kernel.org>
M: Ralf Baechle <ralf@linux-mips.org>
L: linux-hams@vger.kernel.org
S: Maintained
-W: http://www.linux-ax25.org/
+W: https://linux-ax25.in-berlin.de
F: include/net/rose.h
F: include/uapi/linux/rose.h
F: net/rose/
M: Walker Chen <walker.chen@starfivetech.com>
S: Supported
F: Documentation/devicetree/bindings/power/starfive*
-F: drivers/genpd/starfive/jh71xx-pmu.c
+F: drivers/pmdomain/starfive/jh71xx-pmu.c
F: include/dt-bindings/power/starfive,jh7110-pmu.h
STARFIVE SOC DRIVERS
F: drivers/irqchip/irq-ti-sci-intr.c
F: drivers/reset/reset-ti-sci.c
F: drivers/soc/ti/ti_sci_inta_msi.c
-F: drivers/genpd/ti/ti_sci_pm_domains.c
+F: drivers/pmdomain/ti/ti_sci_pm_domains.c
F: include/dt-bindings/soc/ti,sci_pm_domain.h
F: include/linux/soc/ti/ti_sci_inta_msi.h
F: include/linux/soc/ti/ti_sci_protocol.h
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/ti/linux.git
-F: drivers/genpd/ti/omap_prm.c
+F: drivers/pmdomain/ti/omap_prm.c
F: drivers/soc/ti/*
TI LM49xxx FAMILY ASoC CODEC DRIVERS
X86 PLATFORM DRIVERS
M: Hans de Goede <hdegoede@redhat.com>
+M: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
M: Mark Gross <markgross@kernel.org>
L: platform-driver-x86@vger.kernel.org
S: Maintained
+Q: https://patchwork.kernel.org/project/platform-driver-x86/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/pdx86/platform-drivers-x86.git
F: drivers/platform/olpc/
F: drivers/platform/x86/
VERSION = 6
PATCHLEVEL = 6
SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION = -rc4
NAME = Hurr durr I'ma ninja sloth
# *DOCUMENTATION*
/* Configure pwm clock source for timers 8 & 9 */
&timer8 {
assigned-clocks = <&abe_clkctrl OMAP4_TIMER8_CLKCTRL 24>;
- assigned-clock-parents = <&sys_clkin_ck>;
+ assigned-clock-parents = <&sys_32k_ck>;
};
&timer9 {
assigned-clocks = <&l4_per_clkctrl OMAP4_TIMER9_CLKCTRL 24>;
- assigned-clock-parents = <&sys_clkin_ck>;
+ assigned-clock-parents = <&sys_32k_ck>;
};
/*
&uart3 {
interrupts-extended = <&wakeupgen GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH
&omap4_pmx_core 0x17c>;
+ overrun-throttle-ms = <500>;
};
&uart4 {
polling-delay = <1000>; /* milliseconds */
coefficients = <0 20000>;
- /* sensor ID */
- thermal-sensors = <&bandgap 0>;
+ thermal-sensors = <&bandgap>;
cpu_trips: trips {
cpu_alert0: cpu_alert {
polling-delay-passive = <250>; /* milliseconds */
polling-delay = <1000>; /* milliseconds */
- /* sensor ID */
+ /*
+ * See 44xx files for single sensor addressing, omap5 and dra7 need
+ * also sensor ID for addressing.
+ */
thermal-sensors = <&bandgap 0>;
cpu_trips: trips {
};
&cpu_thermal {
+ thermal-sensors = <&bandgap>;
coefficients = <0 20000>;
};
};
&cpu_thermal {
+ thermal-sensors = <&bandgap>;
coefficients = <348 (-9301)>;
};
#define LOCOMO_DRIVER_NAME(_ldev) ((_ldev)->dev.driver->name)
-void locomo_lcd_power(struct locomo_dev *, int, unsigned int);
+extern void locomolcd_power(int on);
int locomo_driver_register(struct locomo_driver *);
void locomo_driver_unregister(struct locomo_driver *);
* possible causes.
* http://www.spinics.net/lists/arm-kernel/msg218641.html
*/
- pr_warn("A possible cause could be an old bootloader - try u-boot >= v2012.07\n");
+ pr_debug("A possible cause could be an old bootloader - try u-boot >= v2012.07\n");
} else {
pr_info("Successfully put all powerdomains to target state\n");
}
* http://www.spinics.net/lists/arm-kernel/msg218641.html
*/
if (cpu_is_omap44xx())
- pr_warn("OMAP4 PM: u-boot >= v2012.07 is required for full PM support\n");
+ pr_debug("OMAP4 PM: u-boot >= v2012.07 is required for full PM support\n");
ret = pwrdm_for_each(pwrdms_setup, NULL);
if (ret) {
#include "hardware.h" /* Gives GPIO_MAX */
-extern void locomolcd_power(int on);
-
#define COLLIE_SCOOP_GPIO_BASE (GPIO_MAX + 1)
#define COLLIE_GPIO_CHARGE_ON (COLLIE_SCOOP_GPIO_BASE + 0)
#define COLLIE_SCP_DIAG_BOOT1 SCOOP_GPCR_PA12
((op & UNIPHIER_SSCOQM_S_MASK) == UNIPHIER_SSCOQM_S_RANGE)
/**
- * uniphier_cache_data - UniPhier outer cache specific data
+ * struct uniphier_cache_data - UniPhier outer cache specific data
*
* @ctrl_base: virtual base address of control registers
* @rev_base: virtual base address of revision registers
* @op_base: virtual base address of operation registers
+ * @way_ctrl_base: virtual address of the way control registers for this
+ * SoC revision
* @way_mask: each bit specifies if the way is present
* @nsets: number of associativity sets
* @line_size: line size in bytes
static irqreturn_t xen_arm_callback(int irq, void *arg)
{
- xen_hvm_evtchn_do_upcall();
+ xen_evtchn_do_upcall();
return IRQ_HANDLED;
}
dtb-$(CONFIG_ARCH_MXC) += imx8mm-nitrogen-r2.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mm-phg.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mm-phyboard-polis-rdk.dtb
+dtb-$(CONFIG_ARCH_MXC) += imx8mm-prt8mm.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mm-tqma8mqml-mba8mx.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mm-var-som-symphony.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mm-venice-gw71xx-0x.dtb
port {
hdmi_connector_in: endpoint {
- remote-endpoint = <&adv7533_out>;
+ remote-endpoint = <&adv7535_out>;
};
};
};
enable-active-high;
};
+ reg_vddext_3v3: regulator-vddext-3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "VDDEXT_3V3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
backlight: backlight {
compatible = "pwm-backlight";
pwms = <&pwm1 0 5000000 0>;
hdmi@3d {
compatible = "adi,adv7535";
- reg = <0x3d>, <0x3c>, <0x3e>, <0x3f>;
- reg-names = "main", "cec", "edid", "packet";
+ reg = <0x3d>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <9 IRQ_TYPE_EDGE_FALLING>;
adi,dsi-lanes = <4>;
-
- adi,input-depth = <8>;
- adi,input-colorspace = "rgb";
- adi,input-clock = "1x";
- adi,input-style = <1>;
- adi,input-justification = "evenly";
+ avdd-supply = <&buck5_reg>;
+ dvdd-supply = <&buck5_reg>;
+ pvdd-supply = <&buck5_reg>;
+ a2vdd-supply = <&buck5_reg>;
+ v3p3-supply = <®_vddext_3v3>;
+ v1p2-supply = <&buck5_reg>;
ports {
#address-cells = <1>;
port@0 {
reg = <0>;
- adv7533_in: endpoint {
+ adv7535_in: endpoint {
remote-endpoint = <&dsi_out>;
};
};
port@1 {
reg = <1>;
- adv7533_out: endpoint {
+ adv7535_out: endpoint {
remote-endpoint = <&hdmi_connector_in>;
};
};
reg = <1>;
dsi_out: endpoint {
- remote-endpoint = <&adv7533_in>;
+ remote-endpoint = <&adv7535_in>;
data-lanes = <1 2 3 4>;
};
};
&sai3 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_sai3>;
- assigned-clocks = <&clk IMX8MP_CLK_SAI3>;
+ assigned-clocks = <&clk IMX8MP_CLK_SAI3>,
+ <&clk IMX8MP_AUDIO_PLL2> ;
assigned-clock-parents = <&clk IMX8MP_AUDIO_PLL2_OUT>;
- assigned-clock-rates = <12288000>;
+ assigned-clock-rates = <12288000>, <361267200>;
fsl,sai-mclk-direction-output;
status = "okay";
};
reg = <IMX8MP_POWER_DOMAIN_AUDIOMIX>;
clocks = <&clk IMX8MP_CLK_AUDIO_ROOT>,
<&clk IMX8MP_CLK_AUDIO_AXI>;
+ assigned-clocks = <&clk IMX8MP_CLK_AUDIO_AHB>,
+ <&clk IMX8MP_CLK_AUDIO_AXI_SRC>;
+ assigned-clock-parents = <&clk IMX8MP_SYS_PLL1_800M>,
+ <&clk IMX8MP_SYS_PLL1_800M>;
+ assigned-clock-rates = <400000000>,
+ <600000000>;
};
pgc_gpu2d: power-domain@6 {
&gpio1 {
pmic-irq-hog {
gpio-hog;
- gpios = <2 GPIO_ACTIVE_LOW>;
+ gpios = <3 GPIO_ACTIVE_LOW>;
input;
line-name = "PMIC_IRQ#";
};
clock-names = "merge","merge_async";
power-domains = <&spm MT8195_POWER_DOMAIN_VDOSYS1>;
mediatek,gce-client-reg = <&gce0 SUBSYS_1c10XXXX 0xc000 0x1000>;
- mediatek,merge-mute = <1>;
+ mediatek,merge-mute;
resets = <&vdosys1 MT8195_VDOSYS1_SW0_RST_B_MERGE0_DL_ASYNC>;
};
clock-names = "merge","merge_async";
power-domains = <&spm MT8195_POWER_DOMAIN_VDOSYS1>;
mediatek,gce-client-reg = <&gce0 SUBSYS_1c10XXXX 0xd000 0x1000>;
- mediatek,merge-mute = <1>;
+ mediatek,merge-mute;
resets = <&vdosys1 MT8195_VDOSYS1_SW0_RST_B_MERGE1_DL_ASYNC>;
};
clock-names = "merge","merge_async";
power-domains = <&spm MT8195_POWER_DOMAIN_VDOSYS1>;
mediatek,gce-client-reg = <&gce0 SUBSYS_1c10XXXX 0xe000 0x1000>;
- mediatek,merge-mute = <1>;
+ mediatek,merge-mute;
resets = <&vdosys1 MT8195_VDOSYS1_SW0_RST_B_MERGE2_DL_ASYNC>;
};
clock-names = "merge","merge_async";
power-domains = <&spm MT8195_POWER_DOMAIN_VDOSYS1>;
mediatek,gce-client-reg = <&gce0 SUBSYS_1c10XXXX 0xf000 0x1000>;
- mediatek,merge-mute = <1>;
+ mediatek,merge-mute;
resets = <&vdosys1 MT8195_VDOSYS1_SW0_RST_B_MERGE3_DL_ASYNC>;
};
clock-names = "merge","merge_async";
power-domains = <&spm MT8195_POWER_DOMAIN_VDOSYS1>;
mediatek,gce-client-reg = <&gce0 SUBSYS_1c11XXXX 0x0000 0x1000>;
- mediatek,merge-fifo-en = <1>;
+ mediatek,merge-fifo-en;
resets = <&vdosys1 MT8195_VDOSYS1_SW0_RST_B_MERGE4_DL_ASYNC>;
};
CONFIG_POWER_RESET_QCOM_PON=m
CONFIG_POWER_RESET_XGENE=y
CONFIG_POWER_RESET_SYSCON=y
+CONFIG_POWER_RESET_SYSCON_POWEROFF=y
CONFIG_SYSCON_REBOOT_MODE=y
CONFIG_NVMEM_REBOOT_MODE=m
CONFIG_BATTERY_SBS=m
CONFIG_COMMON_CLK_PWM=y
CONFIG_COMMON_CLK_RS9_PCIE=y
CONFIG_COMMON_CLK_VC5=y
-CONFIG_COMMON_CLK_NPCM8XX=y
CONFIG_COMMON_CLK_BD718XX=m
CONFIG_CLK_RASPBERRYPI=m
CONFIG_CLK_IMX8MM=y
isar2 = read_sanitised_ftr_reg(SYS_ID_AA64ISAR2_EL1);
return cpuid_feature_extract_unsigned_field(isar2,
- ID_AA64ISAR2_EL1_BC_SHIFT);
+ ID_AA64ISAR2_EL1_CLRBHB_SHIFT);
}
const struct cpumask *system_32bit_el0_cpumask(void);
#define arch_make_huge_pte arch_make_huge_pte
#define __HAVE_ARCH_HUGE_SET_HUGE_PTE_AT
extern void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pte);
+ pte_t *ptep, pte_t pte, unsigned long sz);
#define __HAVE_ARCH_HUGE_PTEP_SET_ACCESS_FLAGS
extern int huge_ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep,
u64 __guest_enter(struct kvm_vcpu *vcpu);
-bool kvm_host_psci_handler(struct kvm_cpu_context *host_ctxt);
+bool kvm_host_psci_handler(struct kvm_cpu_context *host_ctxt, u32 func_id);
#ifdef __KVM_NVHE_HYPERVISOR__
void __noreturn __hyp_do_panic(struct kvm_cpu_context *host_ctxt, u64 spsr,
static const struct arm64_ftr_bits ftr_id_aa64isar2[] = {
ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_CSSC_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_RPRFM_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, ID_AA64ISAR2_EL1_BC_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_CLRBHB_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_BC_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_MOPS_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
FTR_STRICT, FTR_EXACT, ID_AA64ISAR2_EL1_APA3_SHIFT, 4, 0),
#define FFA_MAX_FUNC_NUM 0x7F
int hyp_ffa_init(void *pages);
-bool kvm_host_ffa_handler(struct kvm_cpu_context *host_ctxt);
+bool kvm_host_ffa_handler(struct kvm_cpu_context *host_ctxt, u32 func_id);
#endif /* __KVM_HYP_FFA_H */
return true;
}
-bool kvm_host_ffa_handler(struct kvm_cpu_context *host_ctxt)
+bool kvm_host_ffa_handler(struct kvm_cpu_context *host_ctxt, u32 func_id)
{
- DECLARE_REG(u64, func_id, host_ctxt, 0);
struct arm_smccc_res res;
/*
cmp x0, #HVC_STUB_HCALL_NR
b.lo __kvm_handle_stub_hvc
+ bic x0, x0, #ARM_SMCCC_CALL_HINTS
mov x3, #KVM_HOST_SMCCC_FUNC(__kvm_hyp_init)
cmp x0, x3
b.eq 1f
if (static_branch_unlikely(&kvm_protected_mode_initialized))
hcall_min = __KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize;
+ id &= ~ARM_SMCCC_CALL_HINTS;
id -= KVM_HOST_SMCCC_ID(0);
if (unlikely(id < hcall_min || id >= ARRAY_SIZE(host_hcall)))
static void handle_host_smc(struct kvm_cpu_context *host_ctxt)
{
+ DECLARE_REG(u64, func_id, host_ctxt, 0);
bool handled;
- handled = kvm_host_psci_handler(host_ctxt);
+ func_id &= ~ARM_SMCCC_CALL_HINTS;
+
+ handled = kvm_host_psci_handler(host_ctxt, func_id);
if (!handled)
- handled = kvm_host_ffa_handler(host_ctxt);
+ handled = kvm_host_ffa_handler(host_ctxt, func_id);
if (!handled)
default_host_smc_handler(host_ctxt);
}
}
-bool kvm_host_psci_handler(struct kvm_cpu_context *host_ctxt)
+bool kvm_host_psci_handler(struct kvm_cpu_context *host_ctxt, u32 func_id)
{
- DECLARE_REG(u64, func_id, host_ctxt, 0);
unsigned long ret;
switch (kvm_host_psci_config.version) {
mutex_unlock(&kvm_hyp_pgd_mutex);
+ if (!ret)
+ *haddr = base;
+
return ret;
}
flush_tlb_range(&vma, saddr, addr);
}
-static inline struct folio *hugetlb_swap_entry_to_folio(swp_entry_t entry)
-{
- VM_BUG_ON(!is_migration_entry(entry) && !is_hwpoison_entry(entry));
-
- return page_folio(pfn_to_page(swp_offset_pfn(entry)));
-}
-
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pte)
+ pte_t *ptep, pte_t pte, unsigned long sz)
{
size_t pgsize;
int i;
unsigned long pfn, dpfn;
pgprot_t hugeprot;
- if (!pte_present(pte)) {
- struct folio *folio;
-
- folio = hugetlb_swap_entry_to_folio(pte_to_swp_entry(pte));
- ncontig = num_contig_ptes(folio_size(folio), &pgsize);
+ ncontig = num_contig_ptes(sz, &pgsize);
- for (i = 0; i < ncontig; i++, ptep++)
+ if (!pte_present(pte)) {
+ for (i = 0; i < ncontig; i++, ptep++, addr += pgsize)
set_pte_at(mm, addr, ptep, pte);
return;
}
return;
}
- ncontig = find_num_contig(mm, addr, ptep, &pgsize);
pfn = pte_pfn(pte);
dpfn = pgsize >> PAGE_SHIFT;
hugeprot = pte_pgprot(pte);
void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep,
pte_t old_pte, pte_t pte)
{
- set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
+ unsigned long psize = huge_page_size(hstate_vma(vma));
+
+ set_huge_pte_at(vma->vm_mm, addr, ptep, pte, psize);
}
0b0000 NI
0b0001 IMP
EndEnum
-Res0 47:28
+Res0 47:32
+UnsignedEnum 31:28 CLRBHB
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
UnsignedEnum 27:24 PAC_frac
0b0000 NI
0b0001 IMP
* TBD when IA64 starts to support suspend...
*/
int acpi_suspend_lowlevel(void) { return 0; }
+
+void acpi_proc_quirk_mwait_check(void)
+{
+}
*/
#ifndef __ASSEMBLY__
#ifndef PHYS_OFFSET
-#define PHYS_OFFSET _AC(0, UL)
+#define PHYS_OFFSET _UL(0)
#endif
extern unsigned long vm_map_base;
#endif /* __ASSEMBLY__ */
* Memory above this physical address will be considered highmem.
*/
#ifndef HIGHMEM_START
-#define HIGHMEM_START (_AC(1, UL) << _AC(DMW_PABITS, UL))
+#define HIGHMEM_START (_UL(1) << _UL(DMW_PABITS))
#endif
#define TO_PHYS(x) ( ((x) & TO_PHYS_MASK))
#define _ATYPE_
#define _ATYPE32_
#define _ATYPE64_
-#define _CONST64_(x) x
#else
#define _ATYPE_ __PTRDIFF_TYPE__
#define _ATYPE32_ int
#define _ATYPE64_ __s64
+#endif
+
#ifdef CONFIG_64BIT
-#define _CONST64_(x) x ## UL
+#define _CONST64_(x) _UL(x)
#else
-#define _CONST64_(x) x ## ULL
-#endif
+#define _CONST64_(x) _ULL(x)
#endif
/*
#define R_LARCH_TLS_GD_HI20 98
#define R_LARCH_32_PCREL 99
#define R_LARCH_RELAX 100
+#define R_LARCH_DELETE 101
+#define R_LARCH_ALIGN 102
+#define R_LARCH_PCREL20_S2 103
+#define R_LARCH_CFA 104
+#define R_LARCH_ADD6 105
+#define R_LARCH_SUB6 106
+#define R_LARCH_ADD_ULEB128 107
+#define R_LARCH_SUB_ULEB128 108
+#define R_LARCH_64_PCREL 109
#ifndef ELF_ARCH
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __ASM_EXCEPTION_H
+#define __ASM_EXCEPTION_H
+
+#include <asm/ptrace.h>
+#include <linux/kprobes.h>
+
+void show_registers(struct pt_regs *regs);
+
+asmlinkage void cache_parity_error(void);
+asmlinkage void noinstr do_ade(struct pt_regs *regs);
+asmlinkage void noinstr do_ale(struct pt_regs *regs);
+asmlinkage void noinstr do_bce(struct pt_regs *regs);
+asmlinkage void noinstr do_bp(struct pt_regs *regs);
+asmlinkage void noinstr do_ri(struct pt_regs *regs);
+asmlinkage void noinstr do_fpu(struct pt_regs *regs);
+asmlinkage void noinstr do_fpe(struct pt_regs *regs, unsigned long fcsr);
+asmlinkage void noinstr do_lsx(struct pt_regs *regs);
+asmlinkage void noinstr do_lasx(struct pt_regs *regs);
+asmlinkage void noinstr do_lbt(struct pt_regs *regs);
+asmlinkage void noinstr do_watch(struct pt_regs *regs);
+asmlinkage void noinstr do_syscall(struct pt_regs *regs);
+asmlinkage void noinstr do_reserved(struct pt_regs *regs);
+asmlinkage void noinstr do_vint(struct pt_regs *regs, unsigned long sp);
+asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
+ unsigned long write, unsigned long address);
+
+asmlinkage void handle_ade(void);
+asmlinkage void handle_ale(void);
+asmlinkage void handle_bce(void);
+asmlinkage void handle_sys(void);
+asmlinkage void handle_bp(void);
+asmlinkage void handle_ri(void);
+asmlinkage void handle_fpu(void);
+asmlinkage void handle_fpe(void);
+asmlinkage void handle_lsx(void);
+asmlinkage void handle_lasx(void);
+asmlinkage void handle_lbt(void);
+asmlinkage void handle_watch(void);
+asmlinkage void handle_reserved(void);
+asmlinkage void handle_vint(void);
+asmlinkage void noinstr handle_loongarch_irq(struct pt_regs *regs);
+
+#endif /* __ASM_EXCEPTION_H */
#include <asm/io.h>
#include <asm/pgtable.h>
-#define __HAVE_ARCH_SHADOW_MAP
-
#define KASAN_SHADOW_SCALE_SHIFT 3
#define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL)
extern bool kasan_early_stage;
extern unsigned char kasan_early_shadow_page[PAGE_SIZE];
+#define kasan_mem_to_shadow kasan_mem_to_shadow
+void *kasan_mem_to_shadow(const void *addr);
+
+#define kasan_shadow_to_mem kasan_shadow_to_mem
+const void *kasan_shadow_to_mem(const void *shadow_addr);
+
#define kasan_arch_is_ready kasan_arch_is_ready
static __always_inline bool kasan_arch_is_ready(void)
{
return !kasan_early_stage;
}
-static inline void *kasan_mem_to_shadow(const void *addr)
-{
- if (!kasan_arch_is_ready()) {
- return (void *)(kasan_early_shadow_page);
- } else {
- unsigned long maddr = (unsigned long)addr;
- unsigned long xrange = (maddr >> XRANGE_SHIFT) & 0xffff;
- unsigned long offset = 0;
-
- maddr &= XRANGE_SHADOW_MASK;
- switch (xrange) {
- case XKPRANGE_CC_SEG:
- offset = XKPRANGE_CC_SHADOW_OFFSET;
- break;
- case XKPRANGE_UC_SEG:
- offset = XKPRANGE_UC_SHADOW_OFFSET;
- break;
- case XKVRANGE_VC_SEG:
- offset = XKVRANGE_VC_SHADOW_OFFSET;
- break;
- default:
- WARN_ON(1);
- return NULL;
- }
-
- return (void *)((maddr >> KASAN_SHADOW_SCALE_SHIFT) + offset);
- }
-}
-
-static inline const void *kasan_shadow_to_mem(const void *shadow_addr)
+#define addr_has_metadata addr_has_metadata
+static __always_inline bool addr_has_metadata(const void *addr)
{
- unsigned long addr = (unsigned long)shadow_addr;
-
- if (unlikely(addr > KASAN_SHADOW_END) ||
- unlikely(addr < KASAN_SHADOW_START)) {
- WARN_ON(1);
- return NULL;
- }
-
- if (addr >= XKVRANGE_VC_SHADOW_OFFSET)
- return (void *)(((addr - XKVRANGE_VC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKVRANGE_VC_START);
- else if (addr >= XKPRANGE_UC_SHADOW_OFFSET)
- return (void *)(((addr - XKPRANGE_UC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKPRANGE_UC_START);
- else if (addr >= XKPRANGE_CC_SHADOW_OFFSET)
- return (void *)(((addr - XKPRANGE_CC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKPRANGE_CC_START);
- else {
- WARN_ON(1);
- return NULL;
- }
+ return (kasan_mem_to_shadow((void *)addr) != NULL);
}
void kasan_init(void);
extern struct secondary_data cpuboot_data;
extern asmlinkage void smpboot_entry(void);
+extern asmlinkage void start_secondary(void);
extern void calculate_cpu_foreign_map(void);
obj-$(CONFIG_ARCH_STRICT_ALIGN) += unaligned.o
+CFLAGS_module.o += $(call cc-option,-Wno-override-init,)
+CFLAGS_syscall.o += $(call cc-option,-Wno-override-init,)
+CFLAGS_perf_event.o += $(call cc-option,-Wno-override-init,)
+
ifdef CONFIG_FUNCTION_TRACER
ifndef CONFIG_DYNAMIC_FTRACE
obj-y += mcount.o ftrace.o
pr_info("SRAT: PXM %u -> CPU 0x%02x -> Node %u\n", pxm, pa->apic_id, node);
}
-void __init acpi_numa_arch_fixup(void) {}
#endif
void __init arch_reserve_mem_area(acpi_physical_address addr, size_t size)
}
memblock_set_current_limit(PFN_PHYS(max_low_pfn));
- memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0);
/* Reserve the first 2MB */
memblock_reserve(PHYS_OFFSET, 0x200000);
/* Reserve the kernel text/data/bss */
memblock_reserve(__pa_symbol(&_text),
__pa_symbol(&_end) - __pa_symbol(&_text));
+
+ memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0);
+ memblock_set_node(0, PHYS_ADDR_MAX, &memblock.reserved, 0);
}
#include <linux/elf.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/moduleloader.h>
#include <linux/ftrace.h>
Elf_Addr module_emit_got_entry(struct module *mod, Elf_Shdr *sechdrs, Elf_Addr val)
return apply_r_larch_pcala(mod, location, got, rela_stack, rela_stack_top, type);
}
+static int apply_r_larch_32_pcrel(struct module *mod, u32 *location, Elf_Addr v,
+ s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
+{
+ ptrdiff_t offset = (void *)v - (void *)location;
+
+ *(u32 *)location = offset;
+ return 0;
+}
+
+static int apply_r_larch_64_pcrel(struct module *mod, u32 *location, Elf_Addr v,
+ s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
+{
+ ptrdiff_t offset = (void *)v - (void *)location;
+
+ *(u64 *)location = offset;
+ return 0;
+}
+
/*
* reloc_handlers_rela() - Apply a particular relocation to a module
* @mod: the module to apply the reloc to
/* The handlers for known reloc types */
static reloc_rela_handler reloc_rela_handlers[] = {
- [R_LARCH_NONE ... R_LARCH_RELAX] = apply_r_larch_error,
+ [R_LARCH_NONE ... R_LARCH_64_PCREL] = apply_r_larch_error,
[R_LARCH_NONE] = apply_r_larch_none,
[R_LARCH_32] = apply_r_larch_32,
[R_LARCH_SOP_POP_32_S_10_5 ... R_LARCH_SOP_POP_32_U] = apply_r_larch_sop_imm_field,
[R_LARCH_ADD32 ... R_LARCH_SUB64] = apply_r_larch_add_sub,
[R_LARCH_PCALA_HI20...R_LARCH_PCALA64_HI12] = apply_r_larch_pcala,
+ [R_LARCH_32_PCREL] = apply_r_larch_32_pcrel,
+ [R_LARCH_64_PCREL] = apply_r_larch_64_pcrel,
};
int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
void __init mem_init(void)
{
- high_memory = (void *) __va(get_num_physpages() << PAGE_SHIFT);
+ high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
memblock_free_all();
}
#include <asm/bootinfo.h>
#include <asm/cpu.h>
#include <asm/elf.h>
+#include <asm/exec.h>
#include <asm/fpu.h>
#include <asm/lbt.h>
#include <asm/io.h>
LONG_ADDI s5, s5, -1
beqz s5, process_entry
b copy_word
- b process_entry
done:
ibar 0
#include <linux/audit.h>
#include <linux/cache.h>
#include <linux/context_tracking.h>
+#include <linux/entry-common.h>
#include <linux/irqflags.h>
#include <linux/sched.h>
#include <linux/mm.h>
return new_sp;
}
-void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
- struct extctx_layout *extctx)
+static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
+ struct extctx_layout *extctx)
{
unsigned long sp;
* Atomically swap in the new signal mask, and wait for a signal.
*/
-asmlinkage long sys_rt_sigreturn(void)
+SYSCALL_DEFINE0(rt_sigreturn)
{
int sig;
sigset_t set;
#include <linux/cpumask.h>
#include <linux/init.h>
#include <linux/interrupt.h>
+#include <linux/profile.h>
#include <linux/seq_file.h>
#include <linux/smp.h>
#include <linux/threads.h>
smp_call_function(stop_this_cpu, NULL, 0);
}
+#ifdef CONFIG_PROFILING
int setup_profiling_timer(unsigned int multiplier)
{
return 0;
}
+#endif
static void flush_tlb_all_ipi(void *info)
{
#include <linux/unistd.h>
#include <asm/asm.h>
+#include <asm/exception.h>
#include <asm/signal.h>
#include <asm/switch_to.h>
#include <asm-generic/syscalls.h>
{
}
-irqreturn_t constant_timer_interrupt(int irq, void *data)
+static irqreturn_t constant_timer_interrupt(int irq, void *data)
{
int cpu = smp_processor_id();
struct clock_event_device *cd;
// SPDX-License-Identifier: GPL-2.0
+#include <linux/acpi.h>
#include <linux/cpu.h>
#include <linux/cpumask.h>
#include <linux/init.h>
#include <linux/percpu.h>
#include <asm/bootinfo.h>
+#include <acpi/processor.h>
+
static DEFINE_PER_CPU(struct cpu, cpu_devices);
#ifdef CONFIG_HOTPLUG_CPU
#include <linux/ptrace.h>
#include <linux/kgdb.h>
#include <linux/kdebug.h>
-#include <linux/kprobes.h>
#include <linux/notifier.h>
#include <linux/irq.h>
#include <linux/perf_event.h>
#include <asm/branch.h>
#include <asm/break.h>
#include <asm/cpu.h>
+#include <asm/exception.h>
#include <asm/fpu.h>
#include <asm/lbt.h>
#include <asm/inst.h>
#include "access-helper.h"
-extern asmlinkage void handle_ade(void);
-extern asmlinkage void handle_ale(void);
-extern asmlinkage void handle_bce(void);
-extern asmlinkage void handle_sys(void);
-extern asmlinkage void handle_bp(void);
-extern asmlinkage void handle_ri(void);
-extern asmlinkage void handle_fpu(void);
-extern asmlinkage void handle_fpe(void);
-extern asmlinkage void handle_lbt(void);
-extern asmlinkage void handle_lsx(void);
-extern asmlinkage void handle_lasx(void);
-extern asmlinkage void handle_reserved(void);
-extern asmlinkage void handle_watch(void);
-extern asmlinkage void handle_vint(void);
-
static void show_backtrace(struct task_struct *task, const struct pt_regs *regs,
const char *loglvl, bool user)
{
* happen together with Overflow or Underflow, and `ptrace' can set
* any bits.
*/
-void force_fcsr_sig(unsigned long fcsr, void __user *fault_addr,
- struct task_struct *tsk)
+static void force_fcsr_sig(unsigned long fcsr,
+ void __user *fault_addr, struct task_struct *tsk)
{
int si_code = FPE_FLTUNK;
force_sig_fault(SIGFPE, si_code, fault_addr);
}
-int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcsr)
+static int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcsr)
{
int si_code;
asmlinkage void noinstr do_ri(struct pt_regs *regs)
{
int status = SIGILL;
- unsigned int opcode = 0;
+ unsigned int __maybe_unused opcode;
unsigned int __user *era = (unsigned int __user *)exception_era(regs);
irqentry_state_t state = irqentry_enter(regs);
. = ALIGN(PECOFF_SEGMENT_ALIGN);
_etext = .;
- /*
- * struct alt_inst entries. From the header (alternative.h):
- * "Alternative instructions for different CPU types or capabilities"
- * Think locking instructions on spinlocks.
- */
- . = ALIGN(4);
- .altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) {
- __alt_instructions = .;
- *(.altinstructions)
- __alt_instructions_end = .;
- }
-
-#ifdef CONFIG_RELOCATABLE
- . = ALIGN(8);
- .la_abs : AT(ADDR(.la_abs) - LOAD_OFFSET) {
- __la_abs_begin = .;
- *(.la_abs)
- __la_abs_end = .;
- }
-#endif
-
- .got : ALIGN(16) { *(.got) }
- .plt : ALIGN(16) { *(.plt) }
- .got.plt : ALIGN(16) { *(.got.plt) }
-
- .data.rel : { *(.data.rel*) }
-
. = ALIGN(PECOFF_SEGMENT_ALIGN);
__init_begin = .;
__inittext_begin = .;
__initdata_begin = .;
+ /*
+ * struct alt_inst entries. From the header (alternative.h):
+ * "Alternative instructions for different CPU types or capabilities"
+ * Think locking instructions on spinlocks.
+ */
+ . = ALIGN(4);
+ .altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) {
+ __alt_instructions = .;
+ *(.altinstructions)
+ __alt_instructions_end = .;
+ }
+
INIT_DATA_SECTION(16)
.exit.data : {
EXIT_DATA
_sdata = .;
RO_DATA(4096)
+
+ .got : ALIGN(16) { *(.got) }
+ .plt : ALIGN(16) { *(.plt) }
+ .got.plt : ALIGN(16) { *(.got.plt) }
+
RW_DATA(1 << CONFIG_L1_CACHE_SHIFT, PAGE_SIZE, THREAD_SIZE)
.rela.dyn : ALIGN(8) {
__rela_dyn_end = .;
}
+ .data.rel : { *(.data.rel*) }
+
+#ifdef CONFIG_RELOCATABLE
+ . = ALIGN(8);
+ .la_abs : AT(ADDR(.la_abs) - LOAD_OFFSET) {
+ __la_abs_begin = .;
+ *(.la_abs)
+ __la_abs_end = .;
+ }
+#endif
+
.sdata : {
*(.sdata)
}
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/kdebug.h>
-#include <linux/kprobes.h>
#include <linux/perf_event.h>
#include <linux/uaccess.h>
#include <linux/kfence.h>
#include <asm/branch.h>
+#include <asm/exception.h>
#include <asm/mmu_context.h>
#include <asm/ptrace.h>
return (pte_t *) pmd;
}
-/*
- * This function checks for proper alignment of input addr and len parameters.
- */
-int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
-{
- if (len & ~HPAGE_MASK)
- return -EINVAL;
- if (addr & ~HPAGE_MASK)
- return -EINVAL;
- return 0;
-}
-
int pmd_huge(pmd_t pmd)
{
return (pmd_val(pmd) & _PAGE_HUGE) != 0;
*/
#include <asm/io.h>
+#include <asm-generic/early_ioremap.h>
void __init __iomem *early_ioremap(u64 phys_addr, unsigned long size)
{
bool kasan_early_stage = true;
+void *kasan_mem_to_shadow(const void *addr)
+{
+ if (!kasan_arch_is_ready()) {
+ return (void *)(kasan_early_shadow_page);
+ } else {
+ unsigned long maddr = (unsigned long)addr;
+ unsigned long xrange = (maddr >> XRANGE_SHIFT) & 0xffff;
+ unsigned long offset = 0;
+
+ maddr &= XRANGE_SHADOW_MASK;
+ switch (xrange) {
+ case XKPRANGE_CC_SEG:
+ offset = XKPRANGE_CC_SHADOW_OFFSET;
+ break;
+ case XKPRANGE_UC_SEG:
+ offset = XKPRANGE_UC_SHADOW_OFFSET;
+ break;
+ case XKVRANGE_VC_SEG:
+ offset = XKVRANGE_VC_SHADOW_OFFSET;
+ break;
+ default:
+ WARN_ON(1);
+ return NULL;
+ }
+
+ return (void *)((maddr >> KASAN_SHADOW_SCALE_SHIFT) + offset);
+ }
+}
+
+const void *kasan_shadow_to_mem(const void *shadow_addr)
+{
+ unsigned long addr = (unsigned long)shadow_addr;
+
+ if (unlikely(addr > KASAN_SHADOW_END) ||
+ unlikely(addr < KASAN_SHADOW_START)) {
+ WARN_ON(1);
+ return NULL;
+ }
+
+ if (addr >= XKVRANGE_VC_SHADOW_OFFSET)
+ return (void *)(((addr - XKVRANGE_VC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKVRANGE_VC_START);
+ else if (addr >= XKPRANGE_UC_SHADOW_OFFSET)
+ return (void *)(((addr - XKPRANGE_UC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKPRANGE_UC_START);
+ else if (addr >= XKPRANGE_CC_SHADOW_OFFSET)
+ return (void *)(((addr - XKPRANGE_CC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKPRANGE_CC_START);
+ else {
+ WARN_ON(1);
+ return NULL;
+ }
+}
+
/*
* Alloc memory for shadow memory page table.
*/
#endif
extern long exception_handlers[VECSIZE * 128 / sizeof(long)];
-void setup_tlb_handler(int cpu)
+static void setup_tlb_handler(int cpu)
{
setup_ptwalker();
local_flush_tlb_all();
/******************************************************************************/
+#ifdef CONFIG_MMC_AU1X
static irqreturn_t db1100_mmc_cd(int irq, void *ptr)
{
mmc_detect_change(ptr, msecs_to_jiffies(500));
.num_resources = ARRAY_SIZE(au1100_mmc1_res),
.resource = au1100_mmc1_res,
};
+#endif /* CONFIG_MMC_AU1X */
/******************************************************************************/
static struct platform_device *db1100_devs[] = {
&au1100_lcd_device,
+#ifdef CONFIG_MMC_AU1X
&db1100_mmc0_dev,
&db1100_mmc1_dev,
+#endif
};
int __init db1000_dev_setup(void)
/**********************************************************************/
+#ifdef CONFIG_MMC_AU1X
/* SD carddetects: they're supposed to be edge-triggered, but ack
* doesn't seem to work (CPLD Rev 2). Instead, the screaming one
* is disabled and its counterpart enabled. The 200ms timeout is
.num_resources = ARRAY_SIZE(au1200_mmc1_res),
.resource = au1200_mmc1_res,
};
+#endif /* CONFIG_MMC_AU1X */
/**********************************************************************/
static struct platform_device *db1200_devs[] __initdata = {
NULL, /* PSC0, selected by S6.8 */
&db1200_ide_dev,
+#ifdef CONFIG_MMC_AU1X
&db1200_mmc0_dev,
+#endif
&au1200_lcd_dev,
&db1200_eth_dev,
&db1200_nand_dev,
};
static struct platform_device *pb1200_devs[] __initdata = {
+#ifdef CONFIG_MMC_AU1X
&pb1200_mmc1_dev,
+#endif
};
/* Some peripheral base addresses differ on the PB1200 */
/**********************************************************************/
+#ifdef CONFIG_MMC_AU1X
static irqreturn_t db1300_mmc_cd(int irq, void *ptr)
{
disable_irq_nosync(irq);
.resource = au1300_sd0_res,
.num_resources = ARRAY_SIZE(au1300_sd0_res),
};
+#endif /* CONFIG_MMC_AU1X */
/**********************************************************************/
&db1300_5waysw_dev,
&db1300_nand_dev,
&db1300_ide_dev,
+#ifdef CONFIG_MMC_AU1X
&db1300_sd0_dev,
&db1300_sd1_dev,
+#endif
&db1300_lcd_dev,
&db1300_ac97_dev,
&db1300_i2s_dev,
extern int dcache_stride;
extern int icache_stride;
extern struct pdc_cache_info cache_info;
+extern struct pdc_btlb_info btlb_info;
void parisc_setup_cache_timing(void);
#define pdtlb(sr, addr) asm volatile("pdtlb 0(%%sr%0,%1)" \
#define __HAVE_ARCH_HUGE_SET_HUGE_PTE_AT
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pte);
+ pte_t *ptep, pte_t pte, unsigned long sz);
#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
+++ /dev/null
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef ASM_PARISC_MCKINLEY_H
-#define ASM_PARISC_MCKINLEY_H
-
-/* declared in arch/parisc/kernel/setup.c */
-extern struct proc_dir_entry * proc_mckinley_root;
-
-#endif /*ASM_PARISC_MCKINLEY_H*/
int pdc_model_platform_info(char *orig_prod_num, char *current_prod_num, char *serial_no);
int pdc_cache_info(struct pdc_cache_info *cache);
int pdc_spaceid_bits(unsigned long *space_bits);
-#ifndef CONFIG_PA20
int pdc_btlb_info(struct pdc_btlb_info *btlb);
+int pdc_btlb_insert(unsigned long long vpage, unsigned long physpage, unsigned long len,
+ unsigned long entry_info, unsigned long slot);
+int pdc_btlb_purge_all(void);
int pdc_mem_map_hpa(struct pdc_memory_map *r_addr, struct pdc_module_path *mod_path);
-#endif /* !CONFIG_PA20 */
int pdc_pim_toc11(struct pdc_toc_pim_11 *ret);
int pdc_pim_toc20(struct pdc_toc_pim_20 *ret);
int pdc_lan_station_id(char *lan_addr, unsigned long net_hpa);
struct seq_file;
extern void early_trap_init(void);
extern void collect_boot_cpu_data(void);
+extern void btlb_init_per_cpu(void);
extern int show_cpuinfo (struct seq_file *m, void *v);
/* driver code in driver/parisc */
struct ioc {
void __iomem *ioc_hpa; /* I/O MMU base address */
char *res_map; /* resource map, bit == pdir entry */
- u64 *pdir_base; /* physical base address */
+ __le64 *pdir_base; /* physical base address */
unsigned long ibase; /* pdir IOV Space base - shared w/lba_pci */
unsigned long imask; /* pdir IOV Space mask - shared w/lba_pci */
#ifdef ZX1_SUPPORT
struct ioc ioc[MAX_IOC];
};
+/* list of SBA's in system, see drivers/parisc/sba_iommu.c */
+extern struct sba_device *sba_list;
+
#define ASTRO_RUNWAY_PORT 0x582
#define IKE_MERCED_PORT 0x803
#define REO_MERCED_PORT 0x804
#define SBA_PDIR_VALID_BIT 0x8000000000000000ULL
-#define SBA_AGPGART_COOKIE 0x0000badbadc0ffeeULL
+#define SBA_AGPGART_COOKIE (__force __le64) 0x0000badbadc0ffeeULL
#define SBA_FUNC_ID 0x0000 /* function id */
#define SBA_FCLASS 0x0008 /* function class, bist, header, rev... */
#ifndef _ASMPARISC_SHMPARAM_H
#define _ASMPARISC_SHMPARAM_H
+/*
+ * PA-RISC uses virtually indexed & physically tagged (VIPT) caches
+ * which has strict requirements when two pages to the same physical
+ * address are accessed through different mappings. Read the section
+ * "Address Aliasing" in the arch docs for more detail:
+ * PA-RISC 1.1 (page 3-6):
+ * https://parisc.wiki.kernel.org/images-parisc/6/68/Pa11_acd.pdf
+ * PA-RISC 2.0 (page F-5):
+ * https://parisc.wiki.kernel.org/images-parisc/7/73/Parisc2.0.pdf
+ *
+ * For Linux we allow kernel and userspace to map pages on page size
+ * granularity (SHMLBA) but have to ensure that, if two pages are
+ * mapped to the same physical address, the virtual and physical
+ * addresses modulo SHM_COLOUR are identical.
+ */
#define SHMLBA PAGE_SIZE /* attach addr a multiple of this */
#define SHM_COLOUR 0x00400000 /* shared mappings colouring */
* and kernel data on physical huge pages */
#ifdef CONFIG_HUGETLB_PAGE
DEFINE(HUGEPAGE_SIZE, 1UL << REAL_HPAGE_SHIFT);
+#elif !defined(CONFIG_64BIT)
+ DEFINE(HUGEPAGE_SIZE, 4*1024*1024);
#else
DEFINE(HUGEPAGE_SIZE, PAGE_SIZE);
#endif
struct pdc_cache_info cache_info __ro_after_init;
#ifndef CONFIG_PA20
-static struct pdc_btlb_info btlb_info __ro_after_init;
+struct pdc_btlb_info btlb_info __ro_after_init;
#endif
DEFINE_STATIC_KEY_TRUE(parisc_has_cache);
icache_stride = CAFL_STRIDE(cache_info.ic_conf);
#undef CAFL_STRIDE
-#ifndef CONFIG_PA20
- if (pdc_btlb_info(&btlb_info) < 0) {
- memset(&btlb_info, 0, sizeof btlb_info);
- }
-#endif
-
if ((boot_cpu_data.pdc.capabilities & PDC_MODEL_NVA_MASK) ==
PDC_MODEL_NVA_UNSUPPORTED) {
printk(KERN_WARNING "parisc_cache_init: Only equivalent aliasing supported!\n");
pr_info("#define PARISC_MODEL \"%s\"\n\n",
boot_cpu_data.pdc.sys_model_name);
+ #define p ((unsigned long *)&boot_cpu_data.pdc.model)
pr_info("#define PARISC_PDC_MODEL 0x%lx, 0x%lx, 0x%lx, "
"0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%lx\n\n",
- #define p ((unsigned long *)&boot_cpu_data.pdc.model)
p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], p[8]);
#undef p
return retval;
}
-#ifndef CONFIG_PA20
/**
* pdc_btlb_info - Return block TLB information.
* @btlb: The return buffer.
*/
int pdc_btlb_info(struct pdc_btlb_info *btlb)
{
- int retval;
+ int retval;
unsigned long flags;
- spin_lock_irqsave(&pdc_lock, flags);
- retval = mem_pdc_call(PDC_BLOCK_TLB, PDC_BTLB_INFO, __pa(pdc_result), 0);
- memcpy(btlb, pdc_result, sizeof(*btlb));
- spin_unlock_irqrestore(&pdc_lock, flags);
+ if (IS_ENABLED(CONFIG_PA20))
+ return PDC_BAD_PROC;
- if(retval < 0) {
- btlb->max_size = 0;
- }
- return retval;
+ spin_lock_irqsave(&pdc_lock, flags);
+ retval = mem_pdc_call(PDC_BLOCK_TLB, PDC_BTLB_INFO, __pa(pdc_result), 0);
+ memcpy(btlb, pdc_result, sizeof(*btlb));
+ spin_unlock_irqrestore(&pdc_lock, flags);
+
+ if(retval < 0) {
+ btlb->max_size = 0;
+ }
+ return retval;
+}
+
+int pdc_btlb_insert(unsigned long long vpage, unsigned long physpage, unsigned long len,
+ unsigned long entry_info, unsigned long slot)
+{
+ int retval;
+ unsigned long flags;
+
+ if (IS_ENABLED(CONFIG_PA20))
+ return PDC_BAD_PROC;
+
+ spin_lock_irqsave(&pdc_lock, flags);
+ retval = mem_pdc_call(PDC_BLOCK_TLB, PDC_BTLB_INSERT, (unsigned long) (vpage >> 32),
+ (unsigned long) vpage, physpage, len, entry_info, slot);
+ spin_unlock_irqrestore(&pdc_lock, flags);
+ return retval;
+}
+
+int pdc_btlb_purge_all(void)
+{
+ int retval;
+ unsigned long flags;
+
+ if (IS_ENABLED(CONFIG_PA20))
+ return PDC_BAD_PROC;
+
+ spin_lock_irqsave(&pdc_lock, flags);
+ retval = mem_pdc_call(PDC_BLOCK_TLB, PDC_BTLB_PURGE_ALL);
+ spin_unlock_irqrestore(&pdc_lock, flags);
+ return retval;
}
/**
int retval;
unsigned long flags;
+ if (IS_ENABLED(CONFIG_PA20))
+ return PDC_BAD_PROC;
+
spin_lock_irqsave(&pdc_lock, flags);
memcpy(pdc_result2, mod_path, sizeof(*mod_path));
retval = mem_pdc_call(PDC_MEM_MAP, PDC_MEM_MAP_HPA, __pa(pdc_result),
return retval;
}
-#endif /* !CONFIG_PA20 */
/**
* pdc_lan_station_id - Get the LAN address.
std %dp,0x18(%r10)
#endif
-#ifdef CONFIG_64BIT
- /* Get PDCE_PROC for monarch CPU. */
#define MEM_PDC_LO 0x388
#define MEM_PDC_HI 0x35C
+#ifdef CONFIG_64BIT
+ /* Get PDCE_PROC for monarch CPU. */
ldw MEM_PDC_LO(%r0),%r3
ldw MEM_PDC_HI(%r0),%r10
depd %r10, 31, 32, %r3 /* move to upper word */
tovirt_r1 %r6
mtctl %r6,%cr30 /* restore task thread info */
#endif
-
+
+#ifndef CONFIG_64BIT
+ /* clear all BTLBs */
+ ldi PDC_BLOCK_TLB,%arg0
+ load32 PA(stext_pdc_btlb_ret), %rp
+ ldw MEM_PDC_LO(%r0),%r3
+ bv (%r3)
+ ldi PDC_BTLB_PURGE_ALL,%arg1
+stext_pdc_btlb_ret:
+#endif
+
/* PARANOID: clear user scratch/user space SR's */
mtsp %r0,%sr0
mtsp %r0,%sr1
volatile unsigned int lock[1];
};
-DEFINE_PER_CPU(union irq_stack_union, irq_stack_union) = {
+static DEFINE_PER_CPU(union irq_stack_union, irq_stack_union) = {
.slock = { 1,1,1,1 },
};
#endif
/* FUTURE: Enable Performance Monitor : ccr bit 0x20 */
init_percpu_prof(cpunum);
+ btlb_init_per_cpu();
+
return ret;
}
}
/* End of data section */
+ . = ALIGN(PAGE_SIZE);
_edata = .;
/* BSS */
}
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t entry)
+ pte_t *ptep, pte_t entry, unsigned long sz)
{
__set_huge_pte_at(mm, addr, ptep, entry);
}
#include <asm/sections.h>
#include <asm/msgbuf.h>
#include <asm/sparsemem.h>
+#include <asm/asm-offsets.h>
extern int data_start;
extern void parisc_kernel_start(void); /* Kernel entry point in head.S */
parisc_bootmem_free();
}
+static void alloc_btlb(unsigned long start, unsigned long end, int *slot,
+ unsigned long entry_info)
+{
+ const int slot_max = btlb_info.fixed_range_info.num_comb;
+ int min_num_pages = btlb_info.min_size;
+ unsigned long size;
+
+ /* map at minimum 4 pages */
+ if (min_num_pages < 4)
+ min_num_pages = 4;
+
+ size = HUGEPAGE_SIZE;
+ while (start < end && *slot < slot_max && size >= PAGE_SIZE) {
+ /* starting address must have same alignment as size! */
+ /* if correctly aligned and fits in double size, increase */
+ if (((start & (2 * size - 1)) == 0) &&
+ (end - start) >= (2 * size)) {
+ size <<= 1;
+ continue;
+ }
+ /* if current size alignment is too big, try smaller size */
+ if ((start & (size - 1)) != 0) {
+ size >>= 1;
+ continue;
+ }
+ if ((end - start) >= size) {
+ if ((size >> PAGE_SHIFT) >= min_num_pages)
+ pdc_btlb_insert(start >> PAGE_SHIFT, __pa(start) >> PAGE_SHIFT,
+ size >> PAGE_SHIFT, entry_info, *slot);
+ (*slot)++;
+ start += size;
+ continue;
+ }
+ size /= 2;
+ continue;
+ }
+}
+
+void btlb_init_per_cpu(void)
+{
+ unsigned long s, t, e;
+ int slot;
+
+ /* BTLBs are not available on 64-bit CPUs */
+ if (IS_ENABLED(CONFIG_PA20))
+ return;
+ else if (pdc_btlb_info(&btlb_info) < 0) {
+ memset(&btlb_info, 0, sizeof btlb_info);
+ }
+
+ /* insert BLTLBs for code and data segments */
+ s = (uintptr_t) dereference_function_descriptor(&_stext);
+ e = (uintptr_t) dereference_function_descriptor(&_etext);
+ t = (uintptr_t) dereference_function_descriptor(&_sdata);
+ BUG_ON(t != e);
+
+ /* code segments */
+ slot = 0;
+ alloc_btlb(s, e, &slot, 0x13800000);
+
+ /* sanity check */
+ t = (uintptr_t) dereference_function_descriptor(&_edata);
+ e = (uintptr_t) dereference_function_descriptor(&__bss_start);
+ BUG_ON(t != e);
+
+ /* data segments */
+ s = (uintptr_t) dereference_function_descriptor(&_sdata);
+ e = (uintptr_t) dereference_function_descriptor(&__bss_stop);
+ alloc_btlb(s, e, &slot, 0x11800000);
+}
+
#ifdef CONFIG_PA20
/*
select HAVE_KPROBES
select HAVE_KPROBES_ON_FTRACE
select HAVE_KRETPROBES
- select HAVE_LD_DEAD_CODE_DATA_ELIMINATION if HAVE_OBJTOOL_MCOUNT
+ select HAVE_LD_DEAD_CODE_DATA_ELIMINATION if HAVE_OBJTOOL_MCOUNT && (!ARCH_USING_PATCHABLE_FUNCTION_ENTRY || (!CC_IS_GCC || GCC_VERSION >= 110100))
select HAVE_LIVEPATCH if HAVE_DYNAMIC_FTRACE_WITH_REGS
select HAVE_MOD_ARCH_SPECIFIC
select HAVE_NMI if PERF_EVENTS || (PPC64 && PPC_BOOK3S)
}
#define __HAVE_ARCH_HUGE_SET_HUGE_PTE_AT
-void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte);
+void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
+ pte_t pte, unsigned long sz);
#define __HAVE_ARCH_HUGE_PTE_CLEAR
static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
struct arch_hw_breakpoint *info;
int i;
+ preempt_disable();
+
for (i = 0; i < nr_wp_slots(); i++) {
struct perf_event *bp = __this_cpu_read(bp_per_reg[i]);
if (unlikely(bp && counter_arch_bp(bp)->perf_single_step))
goto reset;
}
- return;
+ goto out;
reset:
regs_set_return_msr(regs, regs->msr & ~MSR_SE);
__set_breakpoint(i, info);
info->perf_single_step = false;
}
+
+out:
+ preempt_enable();
}
static bool is_larx_stcx_instr(int type)
}
}
+/*
+ * Handle a DABR or DAWR exception.
+ *
+ * Called in atomic context.
+ */
int hw_breakpoint_handler(struct die_args *args)
{
bool err = false;
/*
* Handle single-step exceptions following a DABR hit.
+ *
+ * Called in atomic context.
*/
static int single_step_dabr_instruction(struct die_args *args)
{
/*
* Handle debug exception notifications.
+ *
+ * Called in atomic context.
*/
int hw_breakpoint_exceptions_notify(
struct notifier_block *unused, unsigned long val, void *data)
int *type, int *size, unsigned long *ea)
{
struct instruction_op op;
+ int err;
- if (__get_user_instr(*instr, (void __user *)regs->nip))
+ pagefault_disable();
+ err = __get_user_instr(*instr, (void __user *)regs->nip);
+ pagefault_enable();
+
+ if (err)
return;
analyse_instr(&op, regs, *instr);
bool firstframe;
stack_end = stack_page + THREAD_SIZE;
- if (!is_idle_task(task)) {
- /*
- * For user tasks, this is the SP value loaded on
- * kernel entry, see "PACAKSAVE(r13)" in _switch() and
- * system_call_common().
- *
- * Likewise for non-swapper kernel threads,
- * this also happens to be the top of the stack
- * as setup by copy_thread().
- *
- * Note that stack backlinks are not properly setup by
- * copy_thread() and thus, a forked task() will have
- * an unreliable stack trace until it's been
- * _switch()'ed to for the first time.
- */
- stack_end -= STACK_USER_INT_FRAME_SIZE;
- } else {
- /*
- * idle tasks have a custom stack layout,
- * c.f. cpu_idle_thread_init().
- */
+
+ // See copy_thread() for details.
+ if (task->flags & PF_KTHREAD)
stack_end -= STACK_FRAME_MIN_SIZE;
- }
+ else
+ stack_end -= STACK_USER_INT_FRAME_SIZE;
if (task == current)
sp = current_stack_frame();
return;
}
- if (cpu_has_feature(CPU_FTR_DEXCR_NPHIE) && user_mode(regs)) {
- ppc_inst_t insn;
-
- if (get_user_instr(insn, (void __user *)regs->nip)) {
- _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
- return;
- }
-
- if (ppc_inst_primary_opcode(insn) == 31 &&
- get_xop(ppc_inst_val(insn)) == OP_31_XOP_HASHCHK) {
- _exception(SIGILL, regs, ILL_ILLOPN, regs->nip);
- return;
- }
+ /* User mode considers other cases after enabling IRQs */
+ if (!user_mode(regs)) {
+ _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
+ return;
}
-
- _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
- return;
}
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
if (reason & REASON_TM) {
/*
* If we took the program check in the kernel skip down to sending a
- * SIGILL. The subsequent cases all relate to emulating instructions
- * which we should only do for userspace. We also do not want to enable
- * interrupts for kernel faults because that might lead to further
- * faults, and loose the context of the original exception.
+ * SIGILL. The subsequent cases all relate to user space, such as
+ * emulating instructions which we should only do for user space. We
+ * also do not want to enable interrupts for kernel faults because that
+ * might lead to further faults, and loose the context of the original
+ * exception.
*/
if (!user_mode(regs))
goto sigill;
interrupt_cond_local_irq_enable(regs);
+ /*
+ * (reason & REASON_TRAP) is mostly handled before enabling IRQs,
+ * except get_user_instr() can sleep so we cannot reliably inspect the
+ * current instruction in that context. Now that we know we are
+ * handling a user space trap and can sleep, we can check if the trap
+ * was a hashchk failure.
+ */
+ if (reason & REASON_TRAP) {
+ if (cpu_has_feature(CPU_FTR_DEXCR_NPHIE)) {
+ ppc_inst_t insn;
+
+ if (get_user_instr(insn, (void __user *)regs->nip)) {
+ _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
+ return;
+ }
+
+ if (ppc_inst_primary_opcode(insn) == 31 &&
+ get_xop(ppc_inst_val(insn)) == OP_31_XOP_HASHCHK) {
+ _exception(SIGILL, regs, ILL_ILLOPN, regs->nip);
+ return;
+ }
+ }
+
+ _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
+ return;
+ }
+
/* (reason & REASON_ILLEGAL) would be the obvious thing here,
* but there seems to be a hardware bug on the 405GP (RevD)
* that means ESR is sometimes set incorrectly - either to
void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr,
pte_t *ptep, pte_t old_pte, pte_t pte)
{
+ unsigned long psize;
if (radix_enabled())
return radix__huge_ptep_modify_prot_commit(vma, addr, ptep,
old_pte, pte);
- set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
+
+ psize = huge_page_size(hstate_vma(vma));
+ set_huge_pte_at(vma->vm_mm, addr, ptep, pte, psize);
}
void __init hugetlbpage_init_defaultsize(void)
pte_t old_pte, pte_t pte)
{
struct mm_struct *mm = vma->vm_mm;
+ unsigned long psize = huge_page_size(hstate_vma(vma));
/*
* POWER9 NMMU must flush the TLB after clearing the PTE before
atomic_read(&mm->context.copros) > 0)
radix__flush_hugetlb_page(vma, addr);
- set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
+ set_huge_pte_at(vma->vm_mm, addr, ptep, pte, psize);
}
if (new && WARN_ON(pte_present(*ptep) && pgprot_val(prot)))
return -EINVAL;
- set_huge_pte_at(&init_mm, va, ptep, pte_mkhuge(pfn_pte(pa >> PAGE_SHIFT, prot)));
+ set_huge_pte_at(&init_mm, va, ptep,
+ pte_mkhuge(pfn_pte(pa >> PAGE_SHIFT, prot)), psize);
return 0;
}
}
#if defined(CONFIG_PPC_8xx)
-void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte)
+void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
+ pte_t pte, unsigned long sz)
{
pmd_t *pmd = pmd_off(mm, addr);
pte_basic_t val;
}
domain = event_get_domain(event);
- if (domain >= HV_PERF_DOMAIN_MAX) {
+ if (domain == 0 || domain >= HV_PERF_DOMAIN_MAX) {
pr_devel("invalid domain %d\n", domain);
return -EINVAL;
}
menuconfig PPC_82xx
bool "82xx-based boards (PQ II)"
depends on PPC_BOOK3S_32
+ select FSL_SOC
if PPC_82xx
bool "Embedded Planet EP8248E (a.k.a. CWH-PPC-8248N-VE)"
select CPM2
select PPC_INDIRECT_PCI if PCI
- select FSL_SOC
select PHYLIB if NETDEVICES
select MDIO_BITBANG if PHYLIB
help
bool "Keymile MGCOGE"
select CPM2
select PPC_INDIRECT_PCI if PCI
- select FSL_SOC
help
This enables support for the Keymile MGCOGE board.
reg = <0x100000 0x400000>;
};
reserved-data@600000 {
- reg = <0x600000 0x1000000>;
+ reg = <0x600000 0xa00000>;
};
};
};
};
};
- uart0_pins: uart0-0 {
- tx-pins {
- pinmux = <GPIOMUX(5, GPOUT_SYS_UART0_TX,
- GPOEN_ENABLE,
- GPI_NONE)>;
- bias-disable;
- drive-strength = <12>;
- input-disable;
- input-schmitt-disable;
- slew-rate = <0>;
- };
-
- rx-pins {
- pinmux = <GPIOMUX(6, GPOUT_LOW,
- GPOEN_DISABLE,
- GPI_SYS_UART0_RX)>;
- bias-disable; /* external pull-up */
- drive-strength = <2>;
- input-enable;
- input-schmitt-enable;
- slew-rate = <0>;
- };
- };
-
tdm_pins: tdm-0 {
tx-pins {
pinmux = <GPIOMUX(44, GPOUT_SYS_TDM_TXD,
input-enable;
};
};
+
+ uart0_pins: uart0-0 {
+ tx-pins {
+ pinmux = <GPIOMUX(5, GPOUT_SYS_UART0_TX,
+ GPOEN_ENABLE,
+ GPI_NONE)>;
+ bias-disable;
+ drive-strength = <12>;
+ input-disable;
+ input-schmitt-disable;
+ slew-rate = <0>;
+ };
+
+ rx-pins {
+ pinmux = <GPIOMUX(6, GPOUT_LOW,
+ GPOEN_DISABLE,
+ GPI_SYS_UART0_RX)>;
+ bias-disable; /* external pull-up */
+ drive-strength = <2>;
+ input-enable;
+ input-schmitt-enable;
+ slew-rate = <0>;
+ };
+ };
};
&tdm {
&usb0 {
dr_mode = "peripheral";
+ status = "okay";
};
&U74_1 {
* | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 |
* 0000001 01001 rs1 000 00000 0001011
* dcache.cva rs1 (clean, virtual address)
- * 0000001 00100 rs1 000 00000 0001011
+ * 0000001 00101 rs1 000 00000 0001011
*
* dcache.cipa rs1 (clean then invalidate, physical address)
* | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 |
* 0000000 11001 00000 000 00000 0001011
*/
#define THEAD_inval_A0 ".long 0x0265000b"
-#define THEAD_clean_A0 ".long 0x0245000b"
+#define THEAD_clean_A0 ".long 0x0255000b"
#define THEAD_flush_A0 ".long 0x0275000b"
#define THEAD_SYNC_S ".long 0x0190000b"
#define __HAVE_ARCH_HUGE_SET_HUGE_PTE_AT
void set_huge_pte_at(struct mm_struct *mm,
- unsigned long addr, pte_t *ptep, pte_t pte);
+ unsigned long addr, pte_t *ptep, pte_t pte,
+ unsigned long sz);
#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
kbuf.image = image;
kbuf.buf_min = lowest_paddr;
kbuf.buf_max = ULONG_MAX;
- kbuf.buf_align = PAGE_SIZE;
+
+ /*
+ * Current riscv boot protocol requires 2MB alignment for
+ * RV64 and 4MB alignment for RV32
+ *
+ */
+ kbuf.buf_align = PMD_SIZE;
kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
kbuf.memsz = ALIGN(kernel_len, PAGE_SIZE);
kbuf.top_down = false;
reg_num >= ARRAY_SIZE(kvm_isa_ext_arr))
return -ENOENT;
- *reg_val = 0;
host_isa_ext = kvm_isa_ext_arr[reg_num];
+ if (!__riscv_isa_extension_available(NULL, host_isa_ext))
+ return -ENOENT;
+
+ *reg_val = 0;
if (__riscv_isa_extension_available(vcpu->arch.isa, host_isa_ext))
*reg_val = 1; /* Mark the given extension as available */
u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_ISA_EXT | i;
isa_ext = kvm_isa_ext_arr[i];
- if (!__riscv_isa_extension_available(vcpu->arch.isa, isa_ext))
+ if (!__riscv_isa_extension_available(NULL, isa_ext))
continue;
if (uindices) {
void set_huge_pte_at(struct mm_struct *mm,
unsigned long addr,
pte_t *ptep,
- pte_t pte)
+ pte_t pte,
+ unsigned long sz)
{
int i, pte_num;
CONFIG_EXPERT=y
# CONFIG_SYSFS_SYSCALL is not set
CONFIG_PROFILING=y
+CONFIG_KEXEC_FILE=y
+CONFIG_KEXEC_SIG=y
+CONFIG_CRASH_DUMP=y
CONFIG_LIVEPATCH=y
CONFIG_MARCH_ZEC12=y
CONFIG_TUNE_ZEC12=y
CONFIG_NR_CPUS=512
CONFIG_NUMA=y
CONFIG_HZ_100=y
-CONFIG_KEXEC_FILE=y
-CONFIG_KEXEC_SIG=y
+CONFIG_CERT_STORE=y
CONFIG_EXPOLINE=y
CONFIG_EXPOLINE_AUTO=y
CONFIG_CHSC_SCH=y
CONFIG_VFIO_CCW=m
CONFIG_VFIO_AP=m
-CONFIG_CRASH_DUMP=y
CONFIG_PROTECTED_VIRTUALIZATION_GUEST=y
CONFIG_CMM=m
CONFIG_APPLDATA_BASE=y
+CONFIG_S390_HYPFS_FS=y
CONFIG_KVM=m
CONFIG_S390_UNWIND_SELFTEST=m
CONFIG_S390_KPROBES_SANITY_TEST=m
CONFIG_SCSI_DH_ALUA=m
CONFIG_MD=y
CONFIG_BLK_DEV_MD=y
+# CONFIG_MD_BITMAP_FILE is not set
CONFIG_MD_LINEAR=m
CONFIG_MD_MULTIPATH=m
CONFIG_MD_FAULTY=m
CONFIG_DIAG288_WATCHDOG=m
# CONFIG_DRM_DEBUG_MODESET_LOCK is not set
CONFIG_FB=y
+# CONFIG_FB_DEVICE is not set
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
# CONFIG_HID_SUPPORT is not set
CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
CONFIG_TMPFS_INODE64=y
+CONFIG_TMPFS_QUOTA=y
CONFIG_HUGETLBFS=y
CONFIG_ECRYPT_FS=m
CONFIG_CRAMFS=m
CONFIG_IMA_APPRAISE=y
CONFIG_LSM="yama,loadpin,safesetid,integrity,selinux,smack,tomoyo,apparmor"
CONFIG_INIT_STACK_NONE=y
+CONFIG_BUG_ON_DATA_CORRUPTION=y
CONFIG_CRYPTO_USER=m
# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
CONFIG_CRYPTO_PCRYPT=m
CONFIG_DEBUG_ATOMIC_SLEEP=y
CONFIG_DEBUG_LOCKING_API_SELFTESTS=y
CONFIG_DEBUG_IRQFLAGS=y
+CONFIG_DEBUG_LIST=y
CONFIG_DEBUG_SG=y
CONFIG_DEBUG_NOTIFIERS=y
-CONFIG_BUG_ON_DATA_CORRUPTION=y
CONFIG_DEBUG_CREDENTIALS=y
CONFIG_RCU_TORTURE_TEST=m
CONFIG_RCU_REF_SCALE_TEST=m
CONFIG_EXPERT=y
# CONFIG_SYSFS_SYSCALL is not set
CONFIG_PROFILING=y
+CONFIG_KEXEC_FILE=y
+CONFIG_KEXEC_SIG=y
+CONFIG_CRASH_DUMP=y
CONFIG_LIVEPATCH=y
CONFIG_MARCH_ZEC12=y
CONFIG_TUNE_ZEC12=y
CONFIG_NR_CPUS=512
CONFIG_NUMA=y
CONFIG_HZ_100=y
-CONFIG_KEXEC_FILE=y
-CONFIG_KEXEC_SIG=y
+CONFIG_CERT_STORE=y
CONFIG_EXPOLINE=y
CONFIG_EXPOLINE_AUTO=y
CONFIG_CHSC_SCH=y
CONFIG_VFIO_CCW=m
CONFIG_VFIO_AP=m
-CONFIG_CRASH_DUMP=y
CONFIG_PROTECTED_VIRTUALIZATION_GUEST=y
CONFIG_CMM=m
CONFIG_APPLDATA_BASE=y
+CONFIG_S390_HYPFS_FS=y
CONFIG_KVM=m
CONFIG_S390_UNWIND_SELFTEST=m
CONFIG_S390_KPROBES_SANITY_TEST=m
CONFIG_SCSI_DH_ALUA=m
CONFIG_MD=y
CONFIG_BLK_DEV_MD=y
+# CONFIG_MD_BITMAP_FILE is not set
CONFIG_MD_LINEAR=m
CONFIG_MD_MULTIPATH=m
CONFIG_MD_FAULTY=m
CONFIG_SOFT_WATCHDOG=m
CONFIG_DIAG288_WATCHDOG=m
CONFIG_FB=y
+# CONFIG_FB_DEVICE is not set
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
# CONFIG_HID_SUPPORT is not set
CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
CONFIG_TMPFS_INODE64=y
+CONFIG_TMPFS_QUOTA=y
CONFIG_HUGETLBFS=y
CONFIG_CONFIGFS_FS=m
CONFIG_ECRYPT_FS=m
CONFIG_IMA_APPRAISE=y
CONFIG_LSM="yama,loadpin,safesetid,integrity,selinux,smack,tomoyo,apparmor"
CONFIG_INIT_STACK_NONE=y
+CONFIG_BUG_ON_DATA_CORRUPTION=y
CONFIG_CRYPTO_FIPS=y
CONFIG_CRYPTO_USER=m
# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
CONFIG_DEBUG_MEMORY_INIT=y
CONFIG_PANIC_ON_OOPS=y
CONFIG_TEST_LOCKUP=m
-CONFIG_BUG_ON_DATA_CORRUPTION=y
CONFIG_RCU_TORTURE_TEST=m
CONFIG_RCU_REF_SCALE_TEST=m
CONFIG_RCU_CPU_STALL_TIMEOUT=60
# CONFIG_NET_NS is not set
CONFIG_BLK_DEV_INITRD=y
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_CRASH_DUMP=y
CONFIG_MARCH_ZEC12=y
CONFIG_TUNE_ZEC12=y
# CONFIG_COMPAT is not set
CONFIG_HZ_100=y
# CONFIG_CHSC_SCH is not set
# CONFIG_SCM_BUS is not set
-CONFIG_CRASH_DUMP=y
# CONFIG_PFAULT is not set
-# CONFIG_S390_HYPFS_FS is not set
+# CONFIG_S390_HYPFS is not set
# CONFIG_VIRTUALIZATION is not set
# CONFIG_S390_GUEST is not set
# CONFIG_SECCOMP is not set
#define hugepages_supported() (MACHINE_HAS_EDAT1)
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte, unsigned long sz);
+void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte);
pte_t huge_ptep_get(pte_t *ptep);
pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
int changed = !pte_same(huge_ptep_get(ptep), pte);
if (changed) {
huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
- set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
+ __set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
}
return changed;
}
unsigned long addr, pte_t *ptep)
{
pte_t pte = huge_ptep_get_and_clear(mm, addr, ptep);
- set_huge_pte_at(mm, addr, ptep, pte_wrprotect(pte));
+ __set_huge_pte_at(mm, addr, ptep, pte_wrprotect(pte));
}
static inline pte_t mk_huge_pte(struct page *page, pgprot_t pgprot)
char *desc;
cs_token = vcssb->cs_token;
- /* Description string contains "%64s:%04u:%08u\0". */
+ /* Description string contains "%64s:%05u:%010u\0". */
name_len = sizeof(vce->vce_hdr.vc_name);
- len = name_len + 1 + 4 + 1 + 8 + 1;
+ len = name_len + 1 + 5 + 1 + 10 + 1;
desc = kmalloc(len, GFP_KERNEL);
if (!desc)
return NULL;
memcpy(desc, vce->vce_hdr.vc_name, name_len);
- sprintf(desc + name_len, ":%04u:%08u", vce->vce_hdr.vc_index, cs_token);
+ snprintf(desc + name_len, len - name_len, ":%05u:%010u",
+ vce->vce_hdr.vc_index, cs_token);
return desc;
}
__storage_key_init_range(paddr, paddr + size - 1);
}
-void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
+void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte)
{
unsigned long rste;
set_pte(ptep, __pte(rste));
}
+void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte, unsigned long sz)
+{
+ __set_huge_pte_at(mm, addr, ptep, pte);
+}
+
pte_t huge_ptep_get(pte_t *ptep)
{
return __rste_to_pte(pte_val(*ptep));
#define __ioremap_29bit(offset, size, prot) NULL
#endif /* CONFIG_29BIT */
-void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size,
- unsigned long prot)
+void __iomem __ref *ioremap_prot(phys_addr_t phys_addr, size_t size,
+ unsigned long prot)
{
void __iomem *mapped;
pgprot_t pgprot = __pgprot(prot);
#define __HAVE_ARCH_HUGE_SET_HUGE_PTE_AT
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte, unsigned long sz);
+void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte);
#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
unsigned long addr, pte_t *ptep)
{
pte_t old_pte = *ptep;
- set_huge_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
+ __set_huge_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
}
#define __HAVE_ARCH_HUGE_PTEP_SET_ACCESS_FLAGS
{
int changed = !pte_same(*ptep, pte);
if (changed) {
- set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
+ __set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
flush_tlb_page(vma, addr);
}
return changed;
return pte_offset_huge(pmd, addr);
}
-void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
+void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t entry)
{
unsigned int nptes, orig_shift, shift;
orig_shift);
}
+void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t entry, unsigned long sz)
+{
+ __set_huge_pte_at(mm, addr, ptep, entry);
+}
+
pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
{
select UCS2_STRING
select EFI_RUNTIME_WRAPPERS
select ARCH_USE_MEMREMAP_PROT
+ select EFI_RUNTIME_MAP if KEXEC_CORE
help
This enables the kernel to use EFI runtime services that are
available (such as the EFI variable services).
config EFI_RUNTIME_MAP
bool "Export EFI runtime maps to sysfs" if EXPERT
depends on EFI
- default KEXEC_CORE
help
Export EFI runtime memory regions to /sys/firmware/efi/runtime-map.
That memory map is required by the 2nd kernel to set up EFI virtual
return NULL;
}
+ /* Consumed more tables than expected? */
+ if (pages->pgt_buf_offset == BOOT_PGT_SIZE_WARN) {
+ debug_putstr("pgt_buf running low in " __FILE__ "\n");
+ debug_putstr("Need to raise BOOT_PGT_SIZE?\n");
+ debug_putaddr(pages->pgt_buf_offset);
+ debug_putaddr(pages->pgt_buf_size);
+ }
+
entry = pages->pgt_buf + pages->pgt_buf_offset;
pages->pgt_buf_offset += PAGE_SIZE;
inc_irq_stat(irq_hv_callback_count);
- xen_hvm_evtchn_do_upcall();
+ xen_evtchn_do_upcall();
set_irq_regs(old_regs);
}
/* Clear enable bits i.e. PerfCntrGlobalCtl.PerfCntrEn */
wrmsrl(MSR_AMD64_PERF_CNTR_GLOBAL_CTL, 0);
- /* Clear overflow bits i.e. PerfCntrGLobalStatus.PerfCntrOvfl */
- wrmsrl(MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR, amd_pmu_global_cntr_mask);
+ /*
+ * Clear freeze and overflow bits i.e. PerfCntrGLobalStatus.LbrFreeze
+ * and PerfCntrGLobalStatus.PerfCntrOvfl
+ */
+ wrmsrl(MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR,
+ GLOBAL_STATUS_LBRS_FROZEN | amd_pmu_global_cntr_mask);
}
static int amd_pmu_cpu_prepare(int cpu)
int i, nb_id;
cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY;
+ amd_pmu_cpu_reset(cpu);
if (!x86_pmu.amd_nb_constraints)
return;
cpuc->amd_nb->nb_id = nb_id;
cpuc->amd_nb->refcnt++;
-
- amd_pmu_cpu_reset(cpu);
}
static void amd_pmu_cpu_dead(int cpu)
kfree(cpuhw->lbr_sel);
cpuhw->lbr_sel = NULL;
+ amd_pmu_cpu_reset(cpu);
if (!x86_pmu.amd_nb_constraints)
return;
cpuhw->amd_nb = NULL;
}
-
- amd_pmu_cpu_reset(cpu);
}
static inline void amd_pmu_set_global_ctl(u64 ctl)
struct hw_perf_event *hwc;
struct perf_event *event;
int handled = 0, idx;
- u64 status, mask;
+ u64 reserved, status, mask;
bool pmu_enabled;
/*
status &= ~GLOBAL_STATUS_LBRS_FROZEN;
}
+ reserved = status & ~amd_pmu_global_cntr_mask;
+ if (reserved)
+ pr_warn_once("Reserved PerfCntrGlobalStatus bits are set (0x%llx), please consider updating microcode\n",
+ reserved);
+
+ /* Clear any reserved bits set by buggy microcode */
+ status &= amd_pmu_global_cntr_mask;
+
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
if (!test_bit(idx, cpuc->active_mask))
continue;
#ifdef CONFIG_X86_64
# define BOOT_STACK_SIZE 0x4000
+/*
+ * Used by decompressor's startup_32() to allocate page tables for identity
+ * mapping of the 4G of RAM in 4-level paging mode:
+ * - 1 level4 table;
+ * - 1 level3 table;
+ * - 4 level2 table that maps everything with 2M pages;
+ *
+ * The additional level5 table needed for 5-level paging is allocated from
+ * trampoline_32bit memory.
+ */
# define BOOT_INIT_PGT_SIZE (6*4096)
-# ifdef CONFIG_RANDOMIZE_BASE
+
/*
- * Assuming all cross the 512GB boundary:
- * 1 page for level4
- * (2+2)*4 pages for kernel, param, cmd_line, and randomized kernel
- * 2 pages for first 2M (video RAM: CONFIG_X86_VERBOSE_BOOTUP).
- * Total is 19 pages.
+ * Total number of page tables kernel_add_identity_map() can allocate,
+ * including page tables consumed by startup_32().
+ *
+ * Worst-case scenario:
+ * - 5-level paging needs 1 level5 table;
+ * - KASLR needs to map kernel, boot_params, cmdline and randomized kernel,
+ * assuming all of them cross 256T boundary:
+ * + 4*2 level4 table;
+ * + 4*2 level3 table;
+ * + 4*2 level2 table;
+ * - X86_VERBOSE_BOOTUP needs to map the first 2M (video RAM):
+ * + 1 level4 table;
+ * + 1 level3 table;
+ * + 1 level2 table;
+ * Total: 28 tables
+ *
+ * Add 4 spare table in case decompressor touches anything beyond what is
+ * accounted above. Warn if it happens.
*/
-# ifdef CONFIG_X86_VERBOSE_BOOTUP
-# define BOOT_PGT_SIZE (19*4096)
-# else /* !CONFIG_X86_VERBOSE_BOOTUP */
-# define BOOT_PGT_SIZE (17*4096)
-# endif
-# else /* !CONFIG_RANDOMIZE_BASE */
-# define BOOT_PGT_SIZE BOOT_INIT_PGT_SIZE
-# endif
+# define BOOT_PGT_SIZE_WARN (28*4096)
+# define BOOT_PGT_SIZE (32*4096)
#else /* !CONFIG_X86_64 */
# define BOOT_STACK_SIZE 0x1000
#ifdef CONFIG_X86_32
#define EFI_X86_KERNEL_ALLOC_LIMIT (SZ_512M - 1)
-
-#define arch_efi_call_virt_setup() \
-({ \
- efi_fpu_begin(); \
- firmware_restrict_branch_speculation_start(); \
-})
-
-#define arch_efi_call_virt_teardown() \
-({ \
- firmware_restrict_branch_speculation_end(); \
- efi_fpu_end(); \
-})
-
#else /* !CONFIG_X86_32 */
#define EFI_X86_KERNEL_ALLOC_LIMIT EFI_ALLOC_LIMIT
__efi_call(__VA_ARGS__); \
})
-#define arch_efi_call_virt_setup() \
-({ \
- efi_sync_low_kernel_mappings(); \
- efi_fpu_begin(); \
- firmware_restrict_branch_speculation_start(); \
- efi_enter_mm(); \
-})
-
#undef arch_efi_call_virt
#define arch_efi_call_virt(p, f, args...) ({ \
u64 ret, ibt = ibt_save(efi_disable_ibt_for_runtime); \
ret; \
})
-#define arch_efi_call_virt_teardown() \
-({ \
- efi_leave_mm(); \
- firmware_restrict_branch_speculation_end(); \
- efi_fpu_end(); \
-})
-
#ifdef CONFIG_KASAN
/*
* CONFIG_KASAN may redefine memset to __memset. __memset function is present
extern void efi_crash_gracefully_on_page_fault(unsigned long phys_addr);
extern void efi_free_boot_services(void);
-void efi_enter_mm(void);
-void efi_leave_mm(void);
+void arch_efi_call_virt_setup(void);
+void arch_efi_call_virt_teardown(void);
/* kexec external ABI */
struct efi_setup_data {
* the thread holds the MMU lock in write mode.
*/
spinlock_t tdp_mmu_pages_lock;
- struct workqueue_struct *tdp_mmu_zap_wq;
#endif /* CONFIG_X86_64 */
/*
void kvm_mmu_destroy(struct kvm_vcpu *vcpu);
int kvm_mmu_create(struct kvm_vcpu *vcpu);
-int kvm_mmu_init_vm(struct kvm *kvm);
+void kvm_mmu_init_vm(struct kvm *kvm);
void kvm_mmu_uninit_vm(struct kvm *kvm);
void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu);
#undef notrace
#define notrace __attribute__((no_instrument_function))
+#ifdef CONFIG_64BIT
+/*
+ * The generic version tends to create spurious ENDBR instructions under
+ * certain conditions.
+ */
+#define _THIS_IP_ ({ unsigned long __here; asm ("lea 0(%%rip), %0" : "=r" (__here)); __here; })
+#endif
+
#ifdef CONFIG_X86_32
#define asmlinkage CPP_ASMLINKAGE __attribute__((regparm(0)))
#endif /* CONFIG_X86_32 */
CFI_POST_PADDING \
SYM_FUNC_END(__cfi_##name)
+/* UML needs to be able to override memcpy() and friends for KASAN. */
+#ifdef CONFIG_UML
+# define SYM_FUNC_ALIAS_MEMFUNC SYM_FUNC_ALIAS_WEAK
+#else
+# define SYM_FUNC_ALIAS_MEMFUNC SYM_FUNC_ALIAS
+#endif
+
/* SYM_TYPED_FUNC_START -- use for indirectly called globals, w/ CFI type */
#define SYM_TYPED_FUNC_START(name) \
SYM_TYPED_START(name, SYM_L_GLOBAL, SYM_F_ALIGN) \
#else
#define deactivate_mm(tsk, mm) \
do { \
- if (!tsk->vfork_done) \
- shstk_free(tsk); \
+ shstk_free(tsk); \
load_gs_index(0); \
loadsegment(fs, 0); \
} while (0)
u8 type; /* type of this instruction */
u8 len; /* length of original instruction */
};
-
-/* Lazy mode for batching updates / context switch */
-enum paravirt_lazy_mode {
- PARAVIRT_LAZY_NONE,
- PARAVIRT_LAZY_MMU,
- PARAVIRT_LAZY_CPU,
-};
#endif
#ifdef CONFIG_PARAVIRT
__PVOP_VCALL(op, PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
-enum paravirt_lazy_mode paravirt_get_lazy_mode(void);
-void paravirt_start_context_switch(struct task_struct *prev);
-void paravirt_end_context_switch(struct task_struct *next);
-
-void paravirt_enter_lazy_mmu(void);
-void paravirt_leave_lazy_mmu(void);
-void paravirt_flush_lazy_mmu(void);
-
void _paravirt_nop(void);
void paravirt_BUG(void);
unsigned long paravirt_ret0(void);
return a.pte == b.pte;
}
+static inline pte_t pte_next_pfn(pte_t pte)
+{
+ if (__pte_needs_invert(pte_val(pte)))
+ return __pte(pte_val(pte) - (1UL << PFN_PTE_SHIFT));
+ return __pte(pte_val(pte) + (1UL << PFN_PTE_SHIFT));
+}
+#define pte_next_pfn pte_next_pfn
+
static inline int pte_present(pte_t a)
{
return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE);
#ifdef CONFIG_CPU_SUP_AMD
extern u32 amd_get_nodes_per_socket(void);
extern u32 amd_get_highest_perf(void);
-extern bool cpu_has_ibpb_brtype_microcode(void);
extern void amd_clear_divider(void);
extern void amd_check_microcode(void);
#else
static inline u32 amd_get_nodes_per_socket(void) { return 0; }
static inline u32 amd_get_highest_perf(void) { return 0; }
-static inline bool cpu_has_ibpb_brtype_microcode(void) { return false; }
static inline void amd_clear_divider(void) { }
static inline void amd_check_microcode(void) { }
#endif
extern struct shared_info *HYPERVISOR_shared_info;
extern struct start_info *xen_start_info;
+#include <asm/bug.h>
#include <asm/processor.h>
#define XEN_SIGNATURE "XenVMMXenVMM"
void __init mem_map_via_hcall(struct boot_params *boot_params_p);
#endif
+/* Lazy mode for batching updates / context switch */
+enum xen_lazy_mode {
+ XEN_LAZY_NONE,
+ XEN_LAZY_MMU,
+ XEN_LAZY_CPU,
+};
+
+DECLARE_PER_CPU(enum xen_lazy_mode, xen_lazy_mode);
+DECLARE_PER_CPU(unsigned int, xen_lazy_nesting);
+
+static inline void enter_lazy(enum xen_lazy_mode mode)
+{
+ enum xen_lazy_mode old_mode = this_cpu_read(xen_lazy_mode);
+
+ if (mode == old_mode) {
+ this_cpu_inc(xen_lazy_nesting);
+ return;
+ }
+
+ BUG_ON(old_mode != XEN_LAZY_NONE);
+
+ this_cpu_write(xen_lazy_mode, mode);
+}
+
+static inline void leave_lazy(enum xen_lazy_mode mode)
+{
+ BUG_ON(this_cpu_read(xen_lazy_mode) != mode);
+
+ if (this_cpu_read(xen_lazy_nesting) == 0)
+ this_cpu_write(xen_lazy_mode, XEN_LAZY_NONE);
+ else
+ this_cpu_dec(xen_lazy_nesting);
+}
+
+enum xen_lazy_mode xen_get_lazy_mode(void);
+
#endif /* _ASM_X86_XEN_HYPERVISOR_H */
{
s32 *s;
- /*
- * Do not patch out the default return thunks if those needed are the
- * ones generated by the compiler.
- */
- if (cpu_feature_enabled(X86_FEATURE_RETHUNK) &&
- (x86_return_thunk == __x86_return_thunk))
- return;
+ if (cpu_feature_enabled(X86_FEATURE_RETHUNK))
+ static_call_force_reinit();
for (s = start; s < end; s++) {
void *dest = NULL, *addr = (void *)s + *s;
{
struct uv_gam_range_entry *gre = uv_gre_table;
int nums, numn, nump;
- int cpu, i, lnid;
+ int i, lnid, apicid;
int minsock = _min_socket;
int maxsock = _max_socket;
int minpnode = _min_pnode;
/* Set socket -> node values: */
lnid = NUMA_NO_NODE;
- for_each_possible_cpu(cpu) {
- int nid = cpu_to_node(cpu);
- int apicid, sockid;
+ for (apicid = 0; apicid < ARRAY_SIZE(__apicid_to_node); apicid++) {
+ int nid = __apicid_to_node[apicid];
+ int sockid;
- if (lnid == nid)
+ if ((nid == NUMA_NO_NODE) || (lnid == nid))
continue;
lnid = nid;
- apicid = per_cpu(x86_cpu_to_apicid, cpu);
sockid = apicid >> uv_cpuid.socketid_shift;
if (_socket_to_node[sockid - minsock] == SOCK_EMPTY)
pr_info("Setting up call depth tracking\n");
mutex_lock(&text_mutex);
callthunks_setup(&cs, &builtin_coretext);
- static_call_force_reinit();
thunks_initialized = true;
mutex_unlock(&text_mutex);
}
if (cpu_has(c, X86_FEATURE_TOPOEXT))
smp_num_siblings = ((cpuid_ebx(0x8000001e) >> 8) & 0xff) + 1;
+
+ if (!cpu_has(c, X86_FEATURE_HYPERVISOR) && !cpu_has(c, X86_FEATURE_IBPB_BRTYPE)) {
+ if (c->x86 == 0x17 && boot_cpu_has(X86_FEATURE_AMD_IBPB))
+ setup_force_cpu_cap(X86_FEATURE_IBPB_BRTYPE);
+ else if (c->x86 >= 0x19 && !wrmsrl_safe(MSR_IA32_PRED_CMD, PRED_CMD_SBPB)) {
+ setup_force_cpu_cap(X86_FEATURE_IBPB_BRTYPE);
+ setup_force_cpu_cap(X86_FEATURE_SBPB);
+ }
+ }
}
static void init_amd_k8(struct cpuinfo_x86 *c)
on_each_cpu(zenbleed_check_cpu, NULL, 1);
}
-bool cpu_has_ibpb_brtype_microcode(void)
-{
- switch (boot_cpu_data.x86) {
- /* Zen1/2 IBPB flushes branch type predictions too. */
- case 0x17:
- return boot_cpu_has(X86_FEATURE_AMD_IBPB);
- case 0x19:
- /* Poke the MSR bit on Zen3/4 to check its presence. */
- if (!wrmsrl_safe(MSR_IA32_PRED_CMD, PRED_CMD_SBPB)) {
- setup_force_cpu_cap(X86_FEATURE_SBPB);
- return true;
- } else {
- return false;
- }
- default:
- return false;
- }
-}
-
/*
* Issue a DIV 0/1 insn to clear any division data from previous DIV
* operations.
static void __init srso_select_mitigation(void)
{
- bool has_microcode;
+ bool has_microcode = boot_cpu_has(X86_FEATURE_IBPB_BRTYPE);
if (!boot_cpu_has_bug(X86_BUG_SRSO) || cpu_mitigations_off())
goto pred_cmd;
- /*
- * The first check is for the kernel running as a guest in order
- * for guests to verify whether IBPB is a viable mitigation.
- */
- has_microcode = boot_cpu_has(X86_FEATURE_IBPB_BRTYPE) || cpu_has_ibpb_brtype_microcode();
if (!has_microcode) {
pr_warn("IBPB-extending microcode not applied!\n");
pr_warn(SRSO_NOTICE);
} else {
/*
- * Enable the synthetic (even if in a real CPUID leaf)
- * flags for guests.
- */
- setup_force_cpu_cap(X86_FEATURE_IBPB_BRTYPE);
-
- /*
* Zen1/2 with SMT off aren't vulnerable after the right
* IBPB microcode has been applied.
*/
switch (srso_cmd) {
case SRSO_CMD_OFF:
- return;
+ goto pred_cmd;
case SRSO_CMD_MICROCODE:
if (has_microcode) {
return sysfs_emit(buf, "%s%s\n",
srso_strings[srso_mitigation],
- (cpu_has_ibpb_brtype_microcode() ? "" : ", no microcode"));
+ boot_cpu_has(X86_FEATURE_IBPB_BRTYPE) ? "" : ", no microcode");
}
static ssize_t gds_show_state(char *buf)
VULNBL_AMD(0x15, RETBLEED),
VULNBL_AMD(0x16, RETBLEED),
VULNBL_AMD(0x17, RETBLEED | SMT_RSB | SRSO),
- VULNBL_HYGON(0x18, RETBLEED | SMT_RSB),
+ VULNBL_HYGON(0x18, RETBLEED | SMT_RSB | SRSO),
VULNBL_AMD(0x19, SRSO),
{}
};
return epc_page;
}
+/*
+ * Ensure the SECS page is not swapped out. Must be called with encl->lock
+ * to protect the enclave states including SECS and ensure the SECS page is
+ * not swapped out again while being used.
+ */
+static struct sgx_epc_page *sgx_encl_load_secs(struct sgx_encl *encl)
+{
+ struct sgx_epc_page *epc_page = encl->secs.epc_page;
+
+ if (!epc_page)
+ epc_page = sgx_encl_eldu(&encl->secs, NULL);
+
+ return epc_page;
+}
+
static struct sgx_encl_page *__sgx_encl_load_page(struct sgx_encl *encl,
struct sgx_encl_page *entry)
{
return entry;
}
- if (!(encl->secs.epc_page)) {
- epc_page = sgx_encl_eldu(&encl->secs, NULL);
- if (IS_ERR(epc_page))
- return ERR_CAST(epc_page);
- }
+ epc_page = sgx_encl_load_secs(encl);
+ if (IS_ERR(epc_page))
+ return ERR_CAST(epc_page);
epc_page = sgx_encl_eldu(entry, encl->secs.epc_page);
if (IS_ERR(epc_page))
mutex_lock(&encl->lock);
+ epc_page = sgx_encl_load_secs(encl);
+ if (IS_ERR(epc_page)) {
+ if (PTR_ERR(epc_page) == -EBUSY)
+ vmret = VM_FAULT_NOPAGE;
+ goto err_out_unlock;
+ }
+
epc_page = sgx_alloc_epc_page(encl_page, false);
if (IS_ERR(epc_page)) {
if (PTR_ERR(epc_page) == -EBUSY)
}
/**
- *
* kgdb_skipexception - Bail out of KGDB when we've been triggered.
* @exception: Exception vector number
* @regs: Current &struct pt_regs.
return request_resource(&ioport_resource, &reserve_ioports);
}
-static DEFINE_PER_CPU(enum paravirt_lazy_mode, paravirt_lazy_mode) = PARAVIRT_LAZY_NONE;
-
-static inline void enter_lazy(enum paravirt_lazy_mode mode)
-{
- BUG_ON(this_cpu_read(paravirt_lazy_mode) != PARAVIRT_LAZY_NONE);
-
- this_cpu_write(paravirt_lazy_mode, mode);
-}
-
-static void leave_lazy(enum paravirt_lazy_mode mode)
-{
- BUG_ON(this_cpu_read(paravirt_lazy_mode) != mode);
-
- this_cpu_write(paravirt_lazy_mode, PARAVIRT_LAZY_NONE);
-}
-
-void paravirt_enter_lazy_mmu(void)
-{
- enter_lazy(PARAVIRT_LAZY_MMU);
-}
-
-void paravirt_leave_lazy_mmu(void)
-{
- leave_lazy(PARAVIRT_LAZY_MMU);
-}
-
-void paravirt_flush_lazy_mmu(void)
-{
- preempt_disable();
-
- if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
- arch_leave_lazy_mmu_mode();
- arch_enter_lazy_mmu_mode();
- }
-
- preempt_enable();
-}
-
#ifdef CONFIG_PARAVIRT_XXL
-void paravirt_start_context_switch(struct task_struct *prev)
-{
- BUG_ON(preemptible());
-
- if (this_cpu_read(paravirt_lazy_mode) == PARAVIRT_LAZY_MMU) {
- arch_leave_lazy_mmu_mode();
- set_ti_thread_flag(task_thread_info(prev), TIF_LAZY_MMU_UPDATES);
- }
- enter_lazy(PARAVIRT_LAZY_CPU);
-}
-
-void paravirt_end_context_switch(struct task_struct *next)
-{
- BUG_ON(preemptible());
-
- leave_lazy(PARAVIRT_LAZY_CPU);
-
- if (test_and_clear_ti_thread_flag(task_thread_info(next), TIF_LAZY_MMU_UPDATES))
- arch_enter_lazy_mmu_mode();
-}
-
static noinstr void pv_native_write_cr2(unsigned long val)
{
native_write_cr2(val);
}
#endif
-enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
-{
- if (in_interrupt())
- return PARAVIRT_LAZY_NONE;
-
- return this_cpu_read(paravirt_lazy_mode);
-}
-
struct pv_info pv_info = {
.name = "bare hardware",
#ifdef CONFIG_PARAVIRT_XXL
if (!ret && unlikely(test_tsk_thread_flag(current, TIF_IO_BITMAP)))
io_bitmap_share(p);
- /*
- * If copy_thread() if failing, don't leak the shadow stack possibly
- * allocated in shstk_alloc_thread_stack() above.
- */
- if (ret)
- shstk_free(p);
-
return ret;
}
#if defined(CONFIG_HAVE_IMA_KEXEC) && !defined(CONFIG_OF_FLATTREE)
int __init ima_free_kexec_buffer(void)
{
- int rc;
-
if (!ima_kexec_buffer_size)
return -ENOENT;
- rc = memblock_phys_free(ima_kexec_buffer_phys,
- ima_kexec_buffer_size);
- if (rc)
- return rc;
+ memblock_free_late(ima_kexec_buffer_phys,
+ ima_kexec_buffer_size);
ima_kexec_buffer_phys = 0;
ima_kexec_buffer_size = 0;
return 0;
/*
- * For CLONE_VM, except vfork, the child needs a separate shadow
+ * For CLONE_VFORK the child will share the parents shadow stack.
+ * Make sure to clear the internal tracking of the thread shadow
+ * stack so the freeing logic run for child knows to leave it alone.
+ */
+ if (clone_flags & CLONE_VFORK) {
+ shstk->base = 0;
+ shstk->size = 0;
+ return 0;
+ }
+
+ /*
+ * For !CLONE_VM the child will use a copy of the parents shadow
* stack.
*/
- if ((clone_flags & (CLONE_VFORK | CLONE_VM)) != CLONE_VM)
+ if (!(clone_flags & CLONE_VM))
return 0;
size = adjust_shstk_size(stack_size);
if (!tsk->mm || tsk->mm != current->mm)
return;
+ /*
+ * If shstk->base is NULL, then this task is not managing its
+ * own shadow stack (CLONE_VFORK). So skip freeing it.
+ */
+ if (!shstk->base)
+ return;
+
+ /*
+ * shstk->base is NULL for CLONE_VFORK child tasks, and so is
+ * normal. But size = 0 on a shstk->base is not normal and
+ * indicated an attempt to free the thread shadow stack twice.
+ * Warn about it.
+ */
+ if (WARN_ON(!shstk->size))
+ return;
+
unmap_shadow_stack(shstk->base, shstk->size);
+
+ shstk->size = 0;
}
static int wrss_control(bool enable)
}
-#if defined(CONFIG_SCHED_SMT) || defined(CONFIG_SCHED_CLUSTER) || defined(CONFIG_SCHED_MC)
static inline int x86_sched_itmt_flags(void)
{
return sysctl_sched_itmt_enabled ? SD_ASYM_PACKING : 0;
return cpu_cluster_flags() | x86_sched_itmt_flags();
}
#endif
-#endif
+
+static int x86_die_flags(void)
+{
+ if (cpu_feature_enabled(X86_FEATURE_HYBRID_CPU))
+ return x86_sched_itmt_flags();
+
+ return 0;
+}
/*
* Set if a package/die has multiple NUMA nodes inside.
*/
if (!x86_has_numa_in_package) {
x86_topology[i++] = (struct sched_domain_topology_level){
- cpu_cpu_mask, SD_INIT_NAME(DIE)
+ cpu_cpu_mask, x86_die_flags, SD_INIT_NAME(DIE)
};
}
return unlikely(!list_empty_careful(&kvm->arch.zapped_obsolete_pages));
}
-int kvm_mmu_init_vm(struct kvm *kvm)
+void kvm_mmu_init_vm(struct kvm *kvm)
{
- int r;
-
INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages);
INIT_LIST_HEAD(&kvm->arch.possible_nx_huge_pages);
spin_lock_init(&kvm->arch.mmu_unsync_pages_lock);
- if (tdp_mmu_enabled) {
- r = kvm_mmu_init_tdp_mmu(kvm);
- if (r < 0)
- return r;
- }
+ if (tdp_mmu_enabled)
+ kvm_mmu_init_tdp_mmu(kvm);
kvm->arch.split_page_header_cache.kmem_cache = mmu_page_header_cache;
kvm->arch.split_page_header_cache.gfp_zero = __GFP_ZERO;
kvm->arch.split_desc_cache.kmem_cache = pte_list_desc_cache;
kvm->arch.split_desc_cache.gfp_zero = __GFP_ZERO;
-
- return 0;
}
static void mmu_free_vm_memory_caches(struct kvm *kvm)
void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
{
bool flush;
- int i;
if (WARN_ON_ONCE(gfn_end <= gfn_start))
return;
flush = kvm_rmap_zap_gfn_range(kvm, gfn_start, gfn_end);
- if (tdp_mmu_enabled) {
- for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
- flush = kvm_tdp_mmu_zap_leafs(kvm, i, gfn_start,
- gfn_end, true, flush);
- }
+ if (tdp_mmu_enabled)
+ flush = kvm_tdp_mmu_zap_leafs(kvm, gfn_start, gfn_end, flush);
if (flush)
kvm_flush_remote_tlbs_range(kvm, gfn_start, gfn_end - gfn_start);
bool tdp_mmu_page;
bool unsync;
- u8 mmu_valid_gen;
+ union {
+ u8 mmu_valid_gen;
+
+ /* Only accessed under slots_lock. */
+ bool tdp_mmu_scheduled_root_to_zap;
+ };
/*
* The shadow page can't be replaced by an equivalent huge page
struct kvm_rmap_head parent_ptes; /* rmap pointers to parent sptes */
tdp_ptep_t ptep;
};
- union {
- DECLARE_BITMAP(unsync_child_bitmap, 512);
- struct {
- struct work_struct tdp_mmu_async_work;
- void *tdp_mmu_async_data;
- };
- };
+ DECLARE_BITMAP(unsync_child_bitmap, 512);
/*
* Tracks shadow pages that, if zapped, would allow KVM to create an NX
#include <trace/events/kvm.h>
/* Initializes the TDP MMU for the VM, if enabled. */
-int kvm_mmu_init_tdp_mmu(struct kvm *kvm)
+void kvm_mmu_init_tdp_mmu(struct kvm *kvm)
{
- struct workqueue_struct *wq;
-
- wq = alloc_workqueue("kvm", WQ_UNBOUND|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE, 0);
- if (!wq)
- return -ENOMEM;
-
INIT_LIST_HEAD(&kvm->arch.tdp_mmu_roots);
spin_lock_init(&kvm->arch.tdp_mmu_pages_lock);
- kvm->arch.tdp_mmu_zap_wq = wq;
- return 1;
}
/* Arbitrarily returns true so that this may be used in if statements. */
* ultimately frees all roots.
*/
kvm_tdp_mmu_invalidate_all_roots(kvm);
-
- /*
- * Destroying a workqueue also first flushes the workqueue, i.e. no
- * need to invoke kvm_tdp_mmu_zap_invalidated_roots().
- */
- destroy_workqueue(kvm->arch.tdp_mmu_zap_wq);
+ kvm_tdp_mmu_zap_invalidated_roots(kvm);
WARN_ON(atomic64_read(&kvm->arch.tdp_mmu_pages));
WARN_ON(!list_empty(&kvm->arch.tdp_mmu_roots));
/*
* Ensure that all the outstanding RCU callbacks to free shadow pages
- * can run before the VM is torn down. Work items on tdp_mmu_zap_wq
- * can call kvm_tdp_mmu_put_root and create new callbacks.
+ * can run before the VM is torn down. Putting the last reference to
+ * zapped roots will create new callbacks.
*/
rcu_barrier();
}
tdp_mmu_free_sp(sp);
}
-static void tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root,
- bool shared);
-
-static void tdp_mmu_zap_root_work(struct work_struct *work)
-{
- struct kvm_mmu_page *root = container_of(work, struct kvm_mmu_page,
- tdp_mmu_async_work);
- struct kvm *kvm = root->tdp_mmu_async_data;
-
- read_lock(&kvm->mmu_lock);
-
- /*
- * A TLB flush is not necessary as KVM performs a local TLB flush when
- * allocating a new root (see kvm_mmu_load()), and when migrating vCPU
- * to a different pCPU. Note, the local TLB flush on reuse also
- * invalidates any paging-structure-cache entries, i.e. TLB entries for
- * intermediate paging structures, that may be zapped, as such entries
- * are associated with the ASID on both VMX and SVM.
- */
- tdp_mmu_zap_root(kvm, root, true);
-
- /*
- * Drop the refcount using kvm_tdp_mmu_put_root() to test its logic for
- * avoiding an infinite loop. By design, the root is reachable while
- * it's being asynchronously zapped, thus a different task can put its
- * last reference, i.e. flowing through kvm_tdp_mmu_put_root() for an
- * asynchronously zapped root is unavoidable.
- */
- kvm_tdp_mmu_put_root(kvm, root, true);
-
- read_unlock(&kvm->mmu_lock);
-}
-
-static void tdp_mmu_schedule_zap_root(struct kvm *kvm, struct kvm_mmu_page *root)
-{
- root->tdp_mmu_async_data = kvm;
- INIT_WORK(&root->tdp_mmu_async_work, tdp_mmu_zap_root_work);
- queue_work(kvm->arch.tdp_mmu_zap_wq, &root->tdp_mmu_async_work);
-}
-
void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
bool shared)
{
#define for_each_valid_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared) \
__for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared, true)
-#define for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id) \
- __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, false, false)
+#define for_each_tdp_mmu_root_yield_safe(_kvm, _root, _shared) \
+ for (_root = tdp_mmu_next_root(_kvm, NULL, _shared, false); \
+ _root; \
+ _root = tdp_mmu_next_root(_kvm, _root, _shared, false)) \
+ if (!kvm_lockdep_assert_mmu_lock_held(_kvm, _shared)) { \
+ } else
/*
* Iterate over all TDP MMU roots. Requires that mmu_lock be held for write,
* by a memslot update or by the destruction of the VM. Initialize the
* refcount to two; one reference for the vCPU, and one reference for
* the TDP MMU itself, which is held until the root is invalidated and
- * is ultimately put by tdp_mmu_zap_root_work().
+ * is ultimately put by kvm_tdp_mmu_zap_invalidated_roots().
*/
refcount_set(&root->tdp_mmu_root_count, 2);
* true if a TLB flush is needed before releasing the MMU lock, i.e. if one or
* more SPTEs were zapped since the MMU lock was last acquired.
*/
-bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, int as_id, gfn_t start, gfn_t end,
- bool can_yield, bool flush)
+bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, gfn_t start, gfn_t end, bool flush)
{
struct kvm_mmu_page *root;
- for_each_tdp_mmu_root_yield_safe(kvm, root, as_id)
- flush = tdp_mmu_zap_leafs(kvm, root, start, end, can_yield, flush);
+ for_each_tdp_mmu_root_yield_safe(kvm, root, false)
+ flush = tdp_mmu_zap_leafs(kvm, root, start, end, true, flush);
return flush;
}
void kvm_tdp_mmu_zap_all(struct kvm *kvm)
{
struct kvm_mmu_page *root;
- int i;
/*
* Zap all roots, including invalid roots, as all SPTEs must be dropped
* is being destroyed or the userspace VMM has exited. In both cases,
* KVM_RUN is unreachable, i.e. no vCPUs will ever service the request.
*/
- for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
- for_each_tdp_mmu_root_yield_safe(kvm, root, i)
- tdp_mmu_zap_root(kvm, root, false);
- }
+ for_each_tdp_mmu_root_yield_safe(kvm, root, false)
+ tdp_mmu_zap_root(kvm, root, false);
}
/*
*/
void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm)
{
- flush_workqueue(kvm->arch.tdp_mmu_zap_wq);
+ struct kvm_mmu_page *root;
+
+ read_lock(&kvm->mmu_lock);
+
+ for_each_tdp_mmu_root_yield_safe(kvm, root, true) {
+ if (!root->tdp_mmu_scheduled_root_to_zap)
+ continue;
+
+ root->tdp_mmu_scheduled_root_to_zap = false;
+ KVM_BUG_ON(!root->role.invalid, kvm);
+
+ /*
+ * A TLB flush is not necessary as KVM performs a local TLB
+ * flush when allocating a new root (see kvm_mmu_load()), and
+ * when migrating a vCPU to a different pCPU. Note, the local
+ * TLB flush on reuse also invalidates paging-structure-cache
+ * entries, i.e. TLB entries for intermediate paging structures,
+ * that may be zapped, as such entries are associated with the
+ * ASID on both VMX and SVM.
+ */
+ tdp_mmu_zap_root(kvm, root, true);
+
+ /*
+ * The referenced needs to be put *after* zapping the root, as
+ * the root must be reachable by mmu_notifiers while it's being
+ * zapped
+ */
+ kvm_tdp_mmu_put_root(kvm, root, true);
+ }
+
+ read_unlock(&kvm->mmu_lock);
}
/*
* Mark each TDP MMU root as invalid to prevent vCPUs from reusing a root that
* is about to be zapped, e.g. in response to a memslots update. The actual
- * zapping is performed asynchronously. Using a separate workqueue makes it
- * easy to ensure that the destruction is performed before the "fast zap"
- * completes, without keeping a separate list of invalidated roots; the list is
- * effectively the list of work items in the workqueue.
+ * zapping is done separately so that it happens with mmu_lock with read,
+ * whereas invalidating roots must be done with mmu_lock held for write (unless
+ * the VM is being destroyed).
*
- * Note, the asynchronous worker is gifted the TDP MMU's reference.
+ * Note, kvm_tdp_mmu_zap_invalidated_roots() is gifted the TDP MMU's reference.
* See kvm_tdp_mmu_get_vcpu_root_hpa().
*/
void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm)
/*
* As above, mmu_lock isn't held when destroying the VM! There can't
* be other references to @kvm, i.e. nothing else can invalidate roots
- * or be consuming roots, but walking the list of roots does need to be
- * guarded against roots being deleted by the asynchronous zap worker.
+ * or get/put references to roots.
*/
- rcu_read_lock();
-
- list_for_each_entry_rcu(root, &kvm->arch.tdp_mmu_roots, link) {
+ list_for_each_entry(root, &kvm->arch.tdp_mmu_roots, link) {
+ /*
+ * Note, invalid roots can outlive a memslot update! Invalid
+ * roots must be *zapped* before the memslot update completes,
+ * but a different task can acquire a reference and keep the
+ * root alive after its been zapped.
+ */
if (!root->role.invalid) {
+ root->tdp_mmu_scheduled_root_to_zap = true;
root->role.invalid = true;
- tdp_mmu_schedule_zap_root(kvm, root);
}
}
-
- rcu_read_unlock();
}
/*
bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range,
bool flush)
{
- return kvm_tdp_mmu_zap_leafs(kvm, range->slot->as_id, range->start,
- range->end, range->may_block, flush);
+ struct kvm_mmu_page *root;
+
+ __for_each_tdp_mmu_root_yield_safe(kvm, root, range->slot->as_id, false, false)
+ flush = tdp_mmu_zap_leafs(kvm, root, range->start, range->end,
+ range->may_block, flush);
+
+ return flush;
}
typedef bool (*tdp_handler_t)(struct kvm *kvm, struct tdp_iter *iter,
#include "spte.h"
-int kvm_mmu_init_tdp_mmu(struct kvm *kvm);
+void kvm_mmu_init_tdp_mmu(struct kvm *kvm);
void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm);
hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu);
void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
bool shared);
-bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, int as_id, gfn_t start,
- gfn_t end, bool can_yield, bool flush);
+bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, gfn_t start, gfn_t end, bool flush);
bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp);
void kvm_tdp_mmu_zap_all(struct kvm *kvm);
void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm);
count, in);
}
+static void sev_es_vcpu_after_set_cpuid(struct vcpu_svm *svm)
+{
+ struct kvm_vcpu *vcpu = &svm->vcpu;
+
+ if (boot_cpu_has(X86_FEATURE_V_TSC_AUX)) {
+ bool v_tsc_aux = guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP) ||
+ guest_cpuid_has(vcpu, X86_FEATURE_RDPID);
+
+ set_msr_interception(vcpu, svm->msrpm, MSR_TSC_AUX, v_tsc_aux, v_tsc_aux);
+ }
+}
+
+void sev_vcpu_after_set_cpuid(struct vcpu_svm *svm)
+{
+ struct kvm_vcpu *vcpu = &svm->vcpu;
+ struct kvm_cpuid_entry2 *best;
+
+ /* For sev guests, the memory encryption bit is not reserved in CR3. */
+ best = kvm_find_cpuid_entry(vcpu, 0x8000001F);
+ if (best)
+ vcpu->arch.reserved_gpa_bits &= ~(1UL << (best->ebx & 0x3f));
+
+ if (sev_es_guest(svm->vcpu.kvm))
+ sev_es_vcpu_after_set_cpuid(svm);
+}
+
static void sev_es_init_vmcb(struct vcpu_svm *svm)
{
struct vmcb *vmcb = svm->vmcb01.ptr;
set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1);
set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
-
- if (boot_cpu_has(X86_FEATURE_V_TSC_AUX) &&
- (guest_cpuid_has(&svm->vcpu, X86_FEATURE_RDTSCP) ||
- guest_cpuid_has(&svm->vcpu, X86_FEATURE_RDPID))) {
- set_msr_interception(vcpu, svm->msrpm, MSR_TSC_AUX, 1, 1);
- if (guest_cpuid_has(&svm->vcpu, X86_FEATURE_RDTSCP))
- svm_clr_intercept(svm, INTERCEPT_RDTSCP);
- }
}
void sev_init_vmcb(struct vcpu_svm *svm)
amd_pmu_enable_virt();
+ /*
+ * If TSC_AUX virtualization is supported, TSC_AUX becomes a swap type
+ * "B" field (see sev_es_prepare_switch_to_guest()) for SEV-ES guests.
+ * Since Linux does not change the value of TSC_AUX once set, prime the
+ * TSC_AUX field now to avoid a RDMSR on every vCPU run.
+ */
+ if (boot_cpu_has(X86_FEATURE_V_TSC_AUX)) {
+ struct sev_es_save_area *hostsa;
+ u32 msr_hi;
+
+ hostsa = (struct sev_es_save_area *)(page_address(sd->save_area) + 0x400);
+
+ rdmsr(MSR_TSC_AUX, hostsa->tsc_aux, msr_hi);
+ }
+
return 0;
}
if (tsc_scaling)
__svm_write_tsc_multiplier(vcpu->arch.tsc_scaling_ratio);
- if (likely(tsc_aux_uret_slot >= 0))
+ /*
+ * TSC_AUX is always virtualized for SEV-ES guests when the feature is
+ * available. The user return MSR support is not required in this case
+ * because TSC_AUX is restored on #VMEXIT from the host save area
+ * (which has been initialized in svm_hardware_enable()).
+ */
+ if (likely(tsc_aux_uret_slot >= 0) &&
+ (!boot_cpu_has(X86_FEATURE_V_TSC_AUX) || !sev_es_guest(vcpu->kvm)))
kvm_set_user_return_msr(tsc_aux_uret_slot, svm->tsc_aux, -1ull);
svm->guest_state_loaded = true;
break;
case MSR_TSC_AUX:
/*
+ * TSC_AUX is always virtualized for SEV-ES guests when the
+ * feature is available. The user return MSR support is not
+ * required in this case because TSC_AUX is restored on #VMEXIT
+ * from the host save area (which has been initialized in
+ * svm_hardware_enable()).
+ */
+ if (boot_cpu_has(X86_FEATURE_V_TSC_AUX) && sev_es_guest(vcpu->kvm))
+ break;
+
+ /*
* TSC_AUX is usually changed only during boot and never read
* directly. Intercept TSC_AUX instead of exposing it to the
* guest via direct_access_msrs, and switch it via user return.
static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
- struct kvm_cpuid_entry2 *best;
/*
* SVM doesn't provide a way to disable just XSAVES in the guest, KVM
set_msr_interception(vcpu, svm->msrpm, MSR_IA32_FLUSH_CMD, 0,
!!guest_cpuid_has(vcpu, X86_FEATURE_FLUSH_L1D));
- /* For sev guests, the memory encryption bit is not reserved in CR3. */
- if (sev_guest(vcpu->kvm)) {
- best = kvm_find_cpuid_entry(vcpu, 0x8000001F);
- if (best)
- vcpu->arch.reserved_gpa_bits &= ~(1UL << (best->ebx & 0x3f));
- }
+ if (sev_guest(vcpu->kvm))
+ sev_vcpu_after_set_cpuid(svm);
init_vmcb_after_set_cpuid(vcpu);
}
void sev_hardware_unsetup(void);
int sev_cpu_init(struct svm_cpu_data *sd);
void sev_init_vmcb(struct vcpu_svm *svm);
+void sev_vcpu_after_set_cpuid(struct vcpu_svm *svm);
void sev_free_vcpu(struct kvm_vcpu *vcpu);
int sev_handle_vmgexit(struct kvm_vcpu *vcpu);
int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in);
if (ret)
goto out;
- ret = kvm_mmu_init_vm(kvm);
- if (ret)
- goto out_page_track;
+ kvm_mmu_init_vm(kvm);
ret = static_call(kvm_x86_vm_init)(kvm);
if (ret)
out_uninit_mmu:
kvm_mmu_uninit_vm(kvm);
-out_page_track:
kvm_page_track_cleanup(kvm);
out:
return ret;
SYM_FUNC_END(__memcpy)
EXPORT_SYMBOL(__memcpy)
-SYM_FUNC_ALIAS(memcpy, __memcpy)
+SYM_FUNC_ALIAS_MEMFUNC(memcpy, __memcpy)
EXPORT_SYMBOL(memcpy)
SYM_FUNC_START_LOCAL(memcpy_orig)
SYM_FUNC_END(__memmove)
EXPORT_SYMBOL(__memmove)
-SYM_FUNC_ALIAS(memmove, __memmove)
+SYM_FUNC_ALIAS_MEMFUNC(memmove, __memmove)
EXPORT_SYMBOL(memmove)
SYM_FUNC_END(__memset)
EXPORT_SYMBOL(__memset)
-SYM_FUNC_ALIAS(memset, __memset)
+SYM_FUNC_ALIAS_MEMFUNC(memset, __memset)
EXPORT_SYMBOL(memset)
SYM_FUNC_START_LOCAL(memset_orig)
EXPORT_SYMBOL(__put_user_1)
SYM_FUNC_START(__put_user_nocheck_1)
- ENDBR
ASM_STAC
2: movb %al,(%_ASM_CX)
xor %ecx,%ecx
EXPORT_SYMBOL(__put_user_2)
SYM_FUNC_START(__put_user_nocheck_2)
- ENDBR
ASM_STAC
4: movw %ax,(%_ASM_CX)
xor %ecx,%ecx
EXPORT_SYMBOL(__put_user_4)
SYM_FUNC_START(__put_user_nocheck_4)
- ENDBR
ASM_STAC
6: movl %eax,(%_ASM_CX)
xor %ecx,%ecx
EXPORT_SYMBOL(__put_user_8)
SYM_FUNC_START(__put_user_nocheck_8)
- ENDBR
ASM_STAC
9: mov %_ASM_AX,(%_ASM_CX)
#ifdef CONFIG_X86_32
}
}
}
+
+void arch_efi_call_virt_setup(void)
+{
+ efi_fpu_begin();
+ firmware_restrict_branch_speculation_start();
+}
+
+void arch_efi_call_virt_teardown(void)
+{
+ firmware_restrict_branch_speculation_end();
+ efi_fpu_end();
+}
* can not change under us.
* It should be ensured that there are no concurrent calls to this function.
*/
-void efi_enter_mm(void)
+static void efi_enter_mm(void)
{
efi_prev_mm = current->active_mm;
current->active_mm = &efi_mm;
switch_mm(efi_prev_mm, &efi_mm, NULL);
}
-void efi_leave_mm(void)
+static void efi_leave_mm(void)
{
current->active_mm = efi_prev_mm;
switch_mm(&efi_mm, efi_prev_mm, NULL);
}
+void arch_efi_call_virt_setup(void)
+{
+ efi_sync_low_kernel_mappings();
+ efi_fpu_begin();
+ firmware_restrict_branch_speculation_start();
+ efi_enter_mm();
+}
+
+void arch_efi_call_virt_teardown(void)
+{
+ efi_leave_mm();
+ firmware_restrict_branch_speculation_end();
+ efi_fpu_end();
+}
+
static DEFINE_SPINLOCK(efi_runtime_lock);
/*
# optimization flags.
KBUILD_CFLAGS := $(filter-out -fprofile-sample-use=% -fprofile-use=%,$(KBUILD_CFLAGS))
+# When LTO is enabled, llvm emits many text sections, which is not supported
+# by kexec. Remove -flto=* flags.
+KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_LTO),$(KBUILD_CFLAGS))
+
# When linking purgatory.ro with -r unresolved symbols are not checked,
# also link a purgatory.chk binary without -r to check for unresolved symbols.
PURGATORY_LDFLAGS := -e purgatory_start -z nodefaultlib
if (efi_systab_xen == NULL)
return;
- strncpy((char *)&boot_params->efi_info.efi_loader_signature, "Xen",
+ strscpy((char *)&boot_params->efi_info.efi_loader_signature, "Xen",
sizeof(boot_params->efi_info.efi_loader_signature));
boot_params->efi_info.efi_systab = (__u32)__pa(efi_systab_xen);
boot_params->efi_info.efi_systab_hi = (__u32)(__pa(efi_systab_xen) >> 32);
* &HYPERVISOR_shared_info->vcpu_info[cpu]. See xen_hvm_init_shared_info
* and xen_vcpu_setup for details. By default it points to share_info->vcpu_info
* but during boot it is switched to point to xen_vcpu_info.
- * The pointer is used in __xen_evtchn_do_upcall to acknowledge pending events.
+ * The pointer is used in xen_evtchn_do_upcall to acknowledge pending events.
*/
DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu);
DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info);
inc_irq_stat(irq_hv_callback_count);
- xen_hvm_evtchn_do_upcall();
+ xen_evtchn_do_upcall();
set_irq_regs(old_regs);
}
struct desc_struct desc[3];
};
+DEFINE_PER_CPU(enum xen_lazy_mode, xen_lazy_mode) = XEN_LAZY_NONE;
+DEFINE_PER_CPU(unsigned int, xen_lazy_nesting);
+
+enum xen_lazy_mode xen_get_lazy_mode(void)
+{
+ if (in_interrupt())
+ return XEN_LAZY_NONE;
+
+ return this_cpu_read(xen_lazy_mode);
+}
+
/*
* Updating the 3 TLS descriptors in the GDT on every task switch is
* surprisingly expensive so we avoid updating them if they haven't
return HYPERVISOR_get_debugreg(reg);
}
+static void xen_start_context_switch(struct task_struct *prev)
+{
+ BUG_ON(preemptible());
+
+ if (this_cpu_read(xen_lazy_mode) == XEN_LAZY_MMU) {
+ arch_leave_lazy_mmu_mode();
+ set_ti_thread_flag(task_thread_info(prev), TIF_LAZY_MMU_UPDATES);
+ }
+ enter_lazy(XEN_LAZY_CPU);
+}
+
static void xen_end_context_switch(struct task_struct *next)
{
+ BUG_ON(preemptible());
+
xen_mc_flush();
- paravirt_end_context_switch(next);
+ leave_lazy(XEN_LAZY_CPU);
+ if (test_and_clear_ti_thread_flag(task_thread_info(next), TIF_LAZY_MMU_UPDATES))
+ arch_enter_lazy_mmu_mode();
}
static unsigned long xen_store_tr(void)
MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
- xen_mc_issue(PARAVIRT_LAZY_CPU);
+ xen_mc_issue(XEN_LAZY_CPU);
}
static void xen_load_gdt(const struct desc_ptr *dtr)
* exception between the new %fs descriptor being loaded and
* %fs being effectively cleared at __switch_to().
*/
- if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU)
+ if (xen_get_lazy_mode() == XEN_LAZY_CPU)
loadsegment(fs, 0);
xen_mc_batch();
load_TLS_descriptor(t, cpu, 1);
load_TLS_descriptor(t, cpu, 2);
- xen_mc_issue(PARAVIRT_LAZY_CPU);
+ xen_mc_issue(XEN_LAZY_CPU);
}
static void xen_load_gs_index(unsigned int idx)
mcs = xen_mc_entry(0);
MULTI_stack_switch(mcs.mc, __KERNEL_DS, sp0);
- xen_mc_issue(PARAVIRT_LAZY_CPU);
+ xen_mc_issue(XEN_LAZY_CPU);
this_cpu_write(cpu_tss_rw.x86_tss.sp0, sp0);
}
MULTI_fpu_taskswitch(mcs.mc, (cr0 & X86_CR0_TS) != 0);
- xen_mc_issue(PARAVIRT_LAZY_CPU);
+ xen_mc_issue(XEN_LAZY_CPU);
}
static void xen_write_cr4(unsigned long cr4)
#endif
.io_delay = xen_io_delay,
- .start_context_switch = paravirt_start_context_switch,
+ .start_context_switch = xen_start_context_switch,
.end_context_switch = xen_end_context_switch,
},
};
u.val = pmd_val_ma(val);
xen_extend_mmu_update(&u);
- xen_mc_issue(PARAVIRT_LAZY_MMU);
+ xen_mc_issue(XEN_LAZY_MMU);
preempt_enable();
}
{
struct mmu_update u;
- if (paravirt_get_lazy_mode() != PARAVIRT_LAZY_MMU)
+ if (xen_get_lazy_mode() != XEN_LAZY_MMU)
return false;
xen_mc_batch();
u.val = pte_val_ma(pteval);
xen_extend_mmu_update(&u);
- xen_mc_issue(PARAVIRT_LAZY_MMU);
+ xen_mc_issue(XEN_LAZY_MMU);
return true;
}
u.val = pte_val_ma(pte);
xen_extend_mmu_update(&u);
- xen_mc_issue(PARAVIRT_LAZY_MMU);
+ xen_mc_issue(XEN_LAZY_MMU);
}
/* Assume pteval_t is equivalent to all the other *val_t types. */
u.val = pud_val_ma(val);
xen_extend_mmu_update(&u);
- xen_mc_issue(PARAVIRT_LAZY_MMU);
+ xen_mc_issue(XEN_LAZY_MMU);
preempt_enable();
}
__xen_set_p4d_hyper(ptr, val);
- xen_mc_issue(PARAVIRT_LAZY_MMU);
+ xen_mc_issue(XEN_LAZY_MMU);
preempt_enable();
}
if (user_ptr)
__xen_set_p4d_hyper((p4d_t *)user_ptr, val);
- xen_mc_issue(PARAVIRT_LAZY_MMU);
+ xen_mc_issue(XEN_LAZY_MMU);
}
#if CONFIG_PGTABLE_LEVELS >= 5
op->cmd = MMUEXT_TLB_FLUSH_LOCAL;
MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
- xen_mc_issue(PARAVIRT_LAZY_MMU);
+ xen_mc_issue(XEN_LAZY_MMU);
preempt_enable();
}
op->arg1.linear_addr = addr & PAGE_MASK;
MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
- xen_mc_issue(PARAVIRT_LAZY_MMU);
+ xen_mc_issue(XEN_LAZY_MMU);
preempt_enable();
}
MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF);
- xen_mc_issue(PARAVIRT_LAZY_MMU);
+ xen_mc_issue(XEN_LAZY_MMU);
}
static unsigned long xen_read_cr3(void)
else
__xen_write_cr3(false, 0);
- xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */
+ xen_mc_issue(XEN_LAZY_CPU); /* interrupts restored */
}
/*
__xen_write_cr3(true, cr3);
- xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */
+ xen_mc_issue(XEN_LAZY_CPU); /* interrupts restored */
}
static int xen_pgd_alloc(struct mm_struct *mm)
if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS && !pinned)
__pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
- xen_mc_issue(PARAVIRT_LAZY_MMU);
+ xen_mc_issue(XEN_LAZY_MMU);
}
}
__set_pfn_prot(pfn, PAGE_KERNEL);
- xen_mc_issue(PARAVIRT_LAZY_MMU);
+ xen_mc_issue(XEN_LAZY_MMU);
ClearPagePinned(page);
}
*/
xen_mc_batch();
__xen_write_cr3(true, __pa(init_top_pgt));
- xen_mc_issue(PARAVIRT_LAZY_CPU);
+ xen_mc_issue(XEN_LAZY_CPU);
/* We can't that easily rip out L3 and L2, as the Xen pagetables are
* set out this way: [L4], [L1], [L2], [L3], [L1], [L1] ... for
#endif
}
+static void xen_enter_lazy_mmu(void)
+{
+ enter_lazy(XEN_LAZY_MMU);
+}
+
+static void xen_flush_lazy_mmu(void)
+{
+ preempt_disable();
+
+ if (xen_get_lazy_mode() == XEN_LAZY_MMU) {
+ arch_leave_lazy_mmu_mode();
+ arch_enter_lazy_mmu_mode();
+ }
+
+ preempt_enable();
+}
+
static void __init xen_post_allocator_init(void)
{
pv_ops.mmu.set_pte = xen_set_pte;
{
preempt_disable();
xen_mc_flush();
- paravirt_leave_lazy_mmu();
+ leave_lazy(XEN_LAZY_MMU);
preempt_enable();
}
.exit_mmap = xen_exit_mmap,
.lazy_mode = {
- .enter = paravirt_enter_lazy_mmu,
+ .enter = xen_enter_lazy_mmu,
.leave = xen_leave_lazy_mmu,
- .flush = paravirt_flush_lazy_mmu,
+ .flush = xen_flush_lazy_mmu,
},
.set_fixmap = xen_set_fixmap,
op->cmd = MMUEXT_TLB_FLUSH_ALL;
MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
- xen_mc_issue(PARAVIRT_LAZY_MMU);
+ xen_mc_issue(XEN_LAZY_MMU);
preempt_enable();
}
/* need to disable interrupts until this entry is complete */
local_irq_save(flags);
- trace_xen_mc_batch(paravirt_get_lazy_mode());
+ trace_xen_mc_batch(xen_get_lazy_mode());
__this_cpu_write(xen_mc_irq_flags, flags);
}
{
trace_xen_mc_issue(mode);
- if ((paravirt_get_lazy_mode() & mode) == 0)
+ if ((xen_get_lazy_mode() & mode) == 0)
xen_mc_flush();
/* restore flags saved in xen_mc_batch */
# KBUILD_CFLAGS used when building rest of boot (takes effect recursively)
-KBUILD_CFLAGS += -fno-builtin -Iarch/$(ARCH)/boot/include
-HOSTFLAGS += -Iarch/$(ARCH)/boot/include
+KBUILD_CFLAGS += -fno-builtin
subdir-y := lib
targets += vmlinux.bin vmlinux.bin.gz
/* bits taken from ppc */
extern void *avail_ram, *end_avail;
+void gunzip(void *dst, int dstlen, unsigned char *src, int *lenp);
-void exit (void)
+static void exit(void)
{
for (;;);
}
-void *zalloc(unsigned size)
+static void *zalloc(unsigned int size)
{
void *p = avail_ram;
#include <variant/core.h>
+#ifndef XCHAL_HAVE_DIV32
+#define XCHAL_HAVE_DIV32 0
+#endif
+
#ifndef XCHAL_HAVE_EXCLUSIVE
#define XCHAL_HAVE_EXCLUSIVE 0
#endif
void hw_breakpoint_pmu_read(struct perf_event *bp);
int check_hw_breakpoint(struct pt_regs *regs);
void clear_ptrace_hw_breakpoint(struct task_struct *tsk);
+void restore_dbreak(void);
#else
#include <linux/compiler.h>
#include <linux/stringify.h>
+
+#include <asm/bootparam.h>
#include <asm/ptrace.h>
#include <asm/types.h>
#include <asm/regs.h>
extern unsigned long __get_wchan(struct task_struct *p);
+void init_arch(bp_tag_t *bp_start);
+void do_notify_resume(struct pt_regs *regs);
+
#define KSTK_EIP(tsk) (task_pt_regs(tsk)->pc)
#define KSTK_ESP(tsk) (task_pt_regs(tsk)->areg[1])
return regs->areg[2];
}
+int do_syscall_trace_enter(struct pt_regs *regs);
+void do_syscall_trace_leave(struct pt_regs *regs);
+
#else /* __ASSEMBLY__ */
# include <asm/asm-offsets.h>
void arch_send_call_function_ipi_mask(const struct cpumask *mask);
void arch_send_call_function_single_ipi(int cpu);
+void secondary_start_kernel(void);
void smp_init_cpus(void);
void secondary_init_irq(void);
void ipi_init(void);
#define __pte_free_tlb(tlb, pte, address) pte_free((tlb)->mm, pte)
+void check_tlb_sanity(void);
+
#endif /* _XTENSA_TLB_H */
#include <linux/percpu.h>
#include <linux/perf_event.h>
#include <asm/core.h>
+#include <asm/hw_breakpoint.h>
/* Breakpoint currently in use for each IBREAKA. */
static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[XCHAL_NUM_IBREAK]);
#include <asm/mxregs.h>
#include <linux/uaccess.h>
#include <asm/platform.h>
+#include <asm/traps.h>
DECLARE_PER_CPU(unsigned long, nmi_count);
return ret;
}
-void do_syscall_trace_leave(struct pt_regs *regs);
int do_syscall_trace_enter(struct pt_regs *regs)
{
if (regs->syscall == NO_SYSCALL)
#include <linux/uaccess.h>
#include <asm/cacheflush.h>
#include <asm/coprocessor.h>
+#include <asm/processor.h>
+#include <asm/syscall.h>
#include <asm/unistd.h>
extern struct task_struct *coproc_owners[];
#include <linux/irq.h>
#include <linux/kdebug.h>
#include <linux/module.h>
+#include <linux/profile.h>
#include <linux/sched/mm.h>
#include <linux/sched/hotplug.h>
#include <linux/sched/task_stack.h>
#include <linux/sched.h>
#include <linux/stacktrace.h>
+#include <asm/ftrace.h>
#include <asm/stacktrace.h>
#include <asm/traps.h>
#include <linux/uaccess.h>
* for more details.
*/
+#include <linux/cpu.h>
#include <linux/kernel.h>
#include <linux/sched/signal.h>
#include <linux/sched/debug.h>
#include <asm/asmmacro.h>
#include <asm/core.h>
-#if !XCHAL_HAVE_MUL16 && !XCHAL_HAVE_MUL32 && !XCHAL_HAVE_MAC16
+#if XCHAL_HAVE_MUL16 || XCHAL_HAVE_MUL32 || XCHAL_HAVE_MAC16
+#define XCHAL_NO_MUL 0
+#else
#define XCHAL_NO_MUL 1
#endif
#include <asm/mmu_context.h>
#include <asm/cacheflush.h>
#include <asm/hardirq.h>
+#include <asm/traps.h>
void bad_page_fault(struct pt_regs*, unsigned long, int);
#include <linux/mm.h>
#include <asm/processor.h>
#include <asm/mmu_context.h>
+#include <asm/tlb.h>
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>
return simc_write(lp->tp.info.tuntap.fd, (*skb)->data, (*skb)->len);
}
-unsigned short tuntap_protocol(struct sk_buff *skb)
+static unsigned short tuntap_protocol(struct sk_buff *skb)
{
return eth_type_trans(skb, skb->dev);
}
return -EINVAL;
}
-void iss_net_user_timer_expire(struct timer_list *unused)
+static void iss_net_user_timer_expire(struct timer_list *unused)
{
}
struct blk_mq_tags **new_tags;
int i;
- if (set->nr_hw_queues >= new_nr_hw_queues) {
- for (i = new_nr_hw_queues; i < set->nr_hw_queues; i++)
- __blk_mq_free_map_and_rqs(set, i);
+ if (set->nr_hw_queues >= new_nr_hw_queues)
goto done;
- }
new_tags = kcalloc_node(new_nr_hw_queues, sizeof(struct blk_mq_tags *),
GFP_KERNEL, set->numa_node);
{
struct request_queue *q;
LIST_HEAD(head);
- int prev_nr_hw_queues;
+ int prev_nr_hw_queues = set->nr_hw_queues;
+ int i;
lockdep_assert_held(&set->tag_list_lock);
blk_mq_sysfs_unregister_hctxs(q);
}
- prev_nr_hw_queues = set->nr_hw_queues;
if (blk_mq_realloc_tag_set_tags(set, nr_hw_queues) < 0)
goto reregister;
list_for_each_entry(q, &set->tag_list, tag_set_list)
blk_mq_unfreeze_queue(q);
+
+ /* Free the excess tags when nr_hw_queues shrink. */
+ for (i = set->nr_hw_queues; i < prev_nr_hw_queues; i++)
+ __blk_mq_free_map_and_rqs(set, i);
}
void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
finish_wait(&rqw->wait, &data.wq);
/*
- * We raced with wbt_wake_function() getting a token,
+ * We raced with rq_qos_wake_function() getting a token,
* which means we now have two. Put our local token
* and wake anyone else potentially waiting for one.
*/
/**
* disk_force_media_change - force a media change event
* @disk: the disk which will raise the event
- * @events: the events to raise
*
* Should be called when the media changes for @disk. Generates a uevent
* and attempts to free all dentries and inodes and invalidates all block
if (!ec)
return -ENOMEM;
- err = __sm2_set_pub_key(ec, key, keylen);
+ err = sm2_ec_ctx_init(ec);
if (err)
goto out_free_ec;
+ err = __sm2_set_pub_key(ec, key, keylen);
+ if (err)
+ goto out_deinit_ec;
+
bits_len = SM2_DEFAULT_USERID_LEN * 8;
entl[0] = bits_len >> 8;
entl[1] = bits_len & 0xff;
# SOC specific infrastructure drivers.
obj-y += soc/
-obj-$(CONFIG_PM_GENERIC_DOMAINS) += genpd/
+obj-$(CONFIG_PM_GENERIC_DOMAINS) += pmdomain/
obj-y += virtio/
obj-$(CONFIG_VDPA) += vdpa/
}
if (!ret)
- ivpu_info(vdev, "VPU ready message received successfully\n");
+ ivpu_dbg(vdev, PM, "VPU ready message received successfully\n");
else
ivpu_hw_diagnose_failure(vdev);
static struct pci_device_id ivpu_pci_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_MTL) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_ARL) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_LNL) },
{ }
};
#define DRIVER_DATE "20230117"
#define PCI_DEVICE_ID_MTL 0x7d1d
+#define PCI_DEVICE_ID_ARL 0xad1d
#define PCI_DEVICE_ID_LNL 0x643e
#define IVPU_HW_37XX 37
{
switch (ivpu_device_id(vdev)) {
case PCI_DEVICE_ID_MTL:
+ case PCI_DEVICE_ID_ARL:
return IVPU_HW_37XX;
case PCI_DEVICE_ID_LNL:
return IVPU_HW_40XX;
if (ret)
return ret;
- fw->mem = ivpu_bo_alloc_internal(vdev, fw->runtime_addr, fw->runtime_size, DRM_IVPU_BO_WC);
+ fw->mem = ivpu_bo_alloc_internal(vdev, fw->runtime_addr, fw->runtime_size,
+ DRM_IVPU_BO_CACHED | DRM_IVPU_BO_NOSNOOP);
if (!fw->mem) {
ivpu_err(vdev, "Failed to allocate firmware runtime memory\n");
return -ENOMEM;
memset(start, 0, size);
}
- wmb(); /* Flush WC buffers after writing fw->mem */
+ clflush_cache_range(fw->mem->kvaddr, fw->mem->base.size);
return 0;
}
if (!ivpu_fw_is_cold_boot(vdev)) {
boot_params->save_restore_ret_address = 0;
vdev->pm->is_warmboot = true;
+ clflush_cache_range(vdev->fw->mem->kvaddr, SZ_4K);
return;
}
boot_params->punit_telemetry_sram_size = ivpu_hw_reg_telemetry_size_get(vdev);
boot_params->vpu_telemetry_enable = ivpu_hw_reg_telemetry_enable_get(vdev);
- wmb(); /* Flush WC buffers after writing bootparams */
+ clflush_cache_range(vdev->fw->mem->kvaddr, SZ_4K);
ivpu_fw_boot_params_print(vdev, boot_params);
}
#include <drm/drm_gem.h>
#include <drm/drm_mm.h>
+#define DRM_IVPU_BO_NOSNOOP 0x10000000
+
struct dma_buf;
struct ivpu_bo_ops;
struct ivpu_file_priv;
static inline bool ivpu_bo_is_snooped(struct ivpu_bo *bo)
{
+ if (bo->flags & DRM_IVPU_BO_NOSNOOP)
+ return false;
+
return ivpu_bo_cache_mode(bo) == DRM_IVPU_BO_CACHED;
}
#define ICB_0_1_IRQ_MASK ((((u64)ICB_1_IRQ_MASK) << 32) | ICB_0_IRQ_MASK)
-#define BUTTRESS_IRQ_MASK ((REG_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, FREQ_CHANGE)) | \
- (REG_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, ATS_ERR)) | \
+#define BUTTRESS_IRQ_MASK ((REG_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, ATS_ERR)) | \
(REG_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, CFI0_ERR)) | \
(REG_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, CFI1_ERR)) | \
(REG_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, IMR0_ERR)) | \
return REGB_POLL_FLD(VPU_40XX_BUTTRESS_VPU_STATUS, READY, 1, PLL_TIMEOUT_US);
}
+static int ivpu_wait_for_clock_own_resource_ack(struct ivpu_device *vdev)
+{
+ if (ivpu_is_simics(vdev))
+ return 0;
+
+ return REGB_POLL_FLD(VPU_40XX_BUTTRESS_VPU_STATUS, CLOCK_RESOURCE_OWN_ACK, 1, TIMEOUT_US);
+}
+
static void ivpu_pll_init_frequency_ratios(struct ivpu_device *vdev)
{
struct ivpu_hw_info *hw = vdev->hw;
{
int ret;
+ ret = ivpu_wait_for_clock_own_resource_ack(vdev);
+ if (ret) {
+ ivpu_err(vdev, "Timed out waiting for clock own resource ACK\n");
+ return ret;
+ }
+
ivpu_boot_pwr_island_trickle_drive(vdev, true);
ivpu_boot_pwr_island_drive(vdev, true);
if (status == 0)
return IRQ_NONE;
- REGB_WR32(VPU_40XX_BUTTRESS_INTERRUPT_STAT, status);
-
if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, FREQ_CHANGE, status))
ivpu_dbg(vdev, IRQ, "FREQ_CHANGE");
schedule_recovery = true;
}
+ /* This must be done after interrupts are cleared at the source. */
+ REGB_WR32(VPU_40XX_BUTTRESS_INTERRUPT_STAT, status);
+
if (schedule_recovery)
ivpu_pm_schedule_recovery(vdev);
struct ivpu_device *vdev = ptr;
irqreturn_t ret = IRQ_NONE;
+ REGB_WR32(VPU_40XX_BUTTRESS_GLOBAL_INT_MASK, 0x1);
+
ret |= ivpu_hw_40xx_irqv_handler(vdev, irq);
ret |= ivpu_hw_40xx_irqb_handler(vdev, irq);
+ /* Re-enable global interrupts to re-trigger MSI for pending interrupts */
+ REGB_WR32(VPU_40XX_BUTTRESS_GLOBAL_INT_MASK, 0x0);
+
if (ret & IRQ_WAKE_THREAD)
return IRQ_WAKE_THREAD;
#define VPU_40XX_BUTTRESS_VPU_STATUS_READY_MASK BIT_MASK(0)
#define VPU_40XX_BUTTRESS_VPU_STATUS_IDLE_MASK BIT_MASK(1)
#define VPU_40XX_BUTTRESS_VPU_STATUS_DUP_IDLE_MASK BIT_MASK(2)
+#define VPU_40XX_BUTTRESS_VPU_STATUS_CLOCK_RESOURCE_OWN_ACK_MASK BIT_MASK(6)
+#define VPU_40XX_BUTTRESS_VPU_STATUS_POWER_RESOURCE_OWN_ACK_MASK BIT_MASK(7)
#define VPU_40XX_BUTTRESS_VPU_STATUS_PERF_CLK_MASK BIT_MASK(11)
#define VPU_40XX_BUTTRESS_VPU_STATUS_DISABLE_CLK_RELINQUISH_MASK BIT_MASK(12)
struct ivpu_ipc_rx_msg *rx_msg;
int wait_ret, ret = 0;
- wait_ret = wait_event_interruptible_timeout(cons->rx_msg_wq,
- (IS_KTHREAD() && kthread_should_stop()) ||
- !list_empty(&cons->rx_msg_list),
- msecs_to_jiffies(timeout_ms));
+ wait_ret = wait_event_timeout(cons->rx_msg_wq,
+ (IS_KTHREAD() && kthread_should_stop()) ||
+ !list_empty(&cons->rx_msg_list),
+ msecs_to_jiffies(timeout_ms));
if (IS_KTHREAD() && kthread_should_stop())
return -EINTR;
if (wait_ret == 0)
return -ETIMEDOUT;
- if (wait_ret < 0)
- return -ERESTARTSYS;
-
spin_lock_irq(&cons->rx_msg_lock);
rx_msg = list_first_entry_or_null(&cons->rx_msg_list, struct ivpu_ipc_rx_msg, link);
if (!rx_msg) {
!auto_detect)
acpi_video_bus_register_backlight(video);
- acpi_video_bus_add_notify_handler(video);
+ error = acpi_video_bus_add_notify_handler(video);
+ if (error)
+ goto err_del;
error = acpi_dev_install_notify_handler(device, ACPI_DEVICE_NOTIFY,
acpi_video_bus_notify);
return 0;
err_remove:
+ acpi_video_bus_remove_notify_handler(video);
+err_del:
mutex_lock(&video_list_lock);
list_del(&video->entry);
mutex_unlock(&video_list_lock);
- acpi_video_bus_remove_notify_handler(video);
acpi_video_bus_unregister_backlight(video);
err_put_video:
acpi_video_bus_put_devices(video);
{
buf[0] = ACPI_PDC_REVISION_ID;
buf[1] = 1;
+ buf[2] = 0;
/* Twiddle arch-specific bits needed for _PDC */
arch_acpi_set_proc_cap_bits(&buf[2]);
}
static int thermal_get_trend(struct thermal_zone_device *thermal,
- struct thermal_trip *trip,
+ const struct thermal_trip *trip,
enum thermal_trend *trend)
{
struct acpi_thermal *tz = thermal_zone_device_priv(thermal);
else
dev_info(&pdev->dev, "SSS flag set, parallel bus scan disabled\n");
+ if (!(hpriv->cap & HOST_CAP_PART))
+ host->flags |= ATA_HOST_NO_PART;
+
+ if (!(hpriv->cap & HOST_CAP_SSC))
+ host->flags |= ATA_HOST_NO_SSC;
+
+ if (!(hpriv->cap2 & HOST_CAP2_SDS))
+ host->flags |= ATA_HOST_NO_DEVSLP;
+
if (pi.flags & ATA_FLAG_EM)
ahci_reset_em(host);
return sprintf(buf, "%d\n", emp->blink_policy);
}
+static void ahci_port_clear_pending_irq(struct ata_port *ap)
+{
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+ void __iomem *port_mmio = ahci_port_base(ap);
+ u32 tmp;
+
+ /* clear SError */
+ tmp = readl(port_mmio + PORT_SCR_ERR);
+ dev_dbg(ap->host->dev, "PORT_SCR_ERR 0x%x\n", tmp);
+ writel(tmp, port_mmio + PORT_SCR_ERR);
+
+ /* clear port IRQ */
+ tmp = readl(port_mmio + PORT_IRQ_STAT);
+ dev_dbg(ap->host->dev, "PORT_IRQ_STAT 0x%x\n", tmp);
+ if (tmp)
+ writel(tmp, port_mmio + PORT_IRQ_STAT);
+
+ writel(1 << ap->port_no, hpriv->mmio + HOST_IRQ_STAT);
+}
+
static void ahci_port_init(struct device *dev, struct ata_port *ap,
int port_no, void __iomem *mmio,
void __iomem *port_mmio)
if (rc)
dev_warn(dev, "%s (%d)\n", emsg, rc);
- /* clear SError */
- tmp = readl(port_mmio + PORT_SCR_ERR);
- dev_dbg(dev, "PORT_SCR_ERR 0x%x\n", tmp);
- writel(tmp, port_mmio + PORT_SCR_ERR);
-
- /* clear port IRQ */
- tmp = readl(port_mmio + PORT_IRQ_STAT);
- dev_dbg(dev, "PORT_IRQ_STAT 0x%x\n", tmp);
- if (tmp)
- writel(tmp, port_mmio + PORT_IRQ_STAT);
-
- writel(1 << port_no, mmio + HOST_IRQ_STAT);
+ ahci_port_clear_pending_irq(ap);
/* mark esata ports */
tmp = readl(port_mmio + PORT_CMD);
tf.status = ATA_BUSY;
ata_tf_to_fis(&tf, 0, 0, d2h_fis);
+ ahci_port_clear_pending_irq(ap);
+
rc = sata_link_hardreset(link, timing, deadline, online,
ahci_check_ready);
}
/**
+ * ata_dev_power_set_standby - Set a device power mode to standby
+ * @dev: target device
+ *
+ * Issue a STANDBY IMMEDIATE command to set a device power mode to standby.
+ * For an HDD device, this spins down the disks.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep).
+ */
+void ata_dev_power_set_standby(struct ata_device *dev)
+{
+ unsigned long ap_flags = dev->link->ap->flags;
+ struct ata_taskfile tf;
+ unsigned int err_mask;
+
+ /* Issue STANDBY IMMEDIATE command only if supported by the device */
+ if (dev->class != ATA_DEV_ATA && dev->class != ATA_DEV_ZAC)
+ return;
+
+ /*
+ * Some odd clown BIOSes issue spindown on power off (ACPI S4 or S5)
+ * causing some drives to spin up and down again. For these, do nothing
+ * if we are being called on shutdown.
+ */
+ if ((ap_flags & ATA_FLAG_NO_POWEROFF_SPINDOWN) &&
+ system_state == SYSTEM_POWER_OFF)
+ return;
+
+ if ((ap_flags & ATA_FLAG_NO_HIBERNATE_SPINDOWN) &&
+ system_entering_hibernation())
+ return;
+
+ ata_tf_init(dev, &tf);
+ tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
+ tf.protocol = ATA_PROT_NODATA;
+ tf.command = ATA_CMD_STANDBYNOW1;
+
+ ata_dev_notice(dev, "Entering standby power mode\n");
+
+ err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
+ if (err_mask)
+ ata_dev_err(dev, "STANDBY IMMEDIATE failed (err_mask=0x%x)\n",
+ err_mask);
+}
+
+/**
+ * ata_dev_power_set_active - Set a device power mode to active
+ * @dev: target device
+ *
+ * Issue a VERIFY command to enter to ensure that the device is in the
+ * active power mode. For a spun-down HDD (standby or idle power mode),
+ * the VERIFY command will complete after the disk spins up.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep).
+ */
+void ata_dev_power_set_active(struct ata_device *dev)
+{
+ struct ata_taskfile tf;
+ unsigned int err_mask;
+
+ /*
+ * Issue READ VERIFY SECTORS command for 1 sector at lba=0 only
+ * if supported by the device.
+ */
+ if (dev->class != ATA_DEV_ATA && dev->class != ATA_DEV_ZAC)
+ return;
+
+ ata_tf_init(dev, &tf);
+ tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
+ tf.protocol = ATA_PROT_NODATA;
+ tf.command = ATA_CMD_VERIFY;
+ tf.nsect = 1;
+ if (dev->flags & ATA_DFLAG_LBA) {
+ tf.flags |= ATA_TFLAG_LBA;
+ tf.device |= ATA_LBA;
+ } else {
+ /* CHS */
+ tf.lbal = 0x1; /* sect */
+ }
+
+ ata_dev_notice(dev, "Entering active power mode\n");
+
+ err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
+ if (err_mask)
+ ata_dev_err(dev, "VERIFY failed (err_mask=0x%x)\n",
+ err_mask);
+}
+
+/**
* ata_read_log_page - read a specific log page
* @dev: target device
* @log: log to read
{
const u16 *id = dev->id;
const char *lba_desc;
- char ncq_desc[24];
+ char ncq_desc[32];
int ret;
dev->flags |= ATA_DFLAG_LBA;
* been aborted by the device due to a limit timeout using the policy
* 0xD. For these commands, invoke EH to get the command sense data.
*/
- if (qc->result_tf.status & ATA_SENSE &&
- ((ata_is_ncq(qc->tf.protocol) &&
- dev->flags & ATA_DFLAG_CDL_ENABLED) ||
- (!ata_is_ncq(qc->tf.protocol) &&
- ata_id_sense_reporting_enabled(dev->id)))) {
+ if (qc->flags & ATA_QCFLAG_HAS_CDL &&
+ qc->result_tf.status & ATA_SENSE) {
/*
* Tell SCSI EH to not overwrite scmd->result even if this
* command is finished with result SAM_STAT_GOOD.
struct ata_link *link;
unsigned long flags;
- /* Previous resume operation might still be in
- * progress. Wait for PM_PENDING to clear.
+ spin_lock_irqsave(ap->lock, flags);
+
+ /*
+ * A previous PM operation might still be in progress. Wait for
+ * ATA_PFLAG_PM_PENDING to clear.
*/
if (ap->pflags & ATA_PFLAG_PM_PENDING) {
+ spin_unlock_irqrestore(ap->lock, flags);
ata_port_wait_eh(ap);
- WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
+ spin_lock_irqsave(ap->lock, flags);
}
- /* request PM ops to EH */
- spin_lock_irqsave(ap->lock, flags);
-
+ /* Request PM operation to EH */
ap->pm_mesg = mesg;
ap->pflags |= ATA_PFLAG_PM_PENDING;
ata_for_each_link(link, ap, HOST_FIRST) {
spin_unlock_irqrestore(ap->lock, flags);
- if (!async) {
+ if (!async)
ata_port_wait_eh(ap);
- WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
- }
}
/*
static void ata_port_suspend(struct ata_port *ap, pm_message_t mesg)
{
+ /*
+ * We are about to suspend the port, so we do not care about
+ * scsi_rescan_device() calls scheduled by previous resume operations.
+ * The next resume will schedule the rescan again. So cancel any rescan
+ * that is not done yet.
+ */
+ cancel_delayed_work_sync(&ap->scsi_rescan_task);
+
ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, false);
}
static void ata_port_suspend_async(struct ata_port *ap, pm_message_t mesg)
{
+ /*
+ * We are about to suspend the port, so we do not care about
+ * scsi_rescan_device() calls scheduled by previous resume operations.
+ * The next resume will schedule the rescan again. So cancel any rescan
+ * that is not done yet.
+ */
+ cancel_delayed_work_sync(&ap->scsi_rescan_task);
+
ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, true);
}
#endif
const struct device_type ata_port_type = {
- .name = "ata_port",
+ .name = ATA_PORT_TYPE_NAME,
#ifdef CONFIG_PM
.pm = &ata_port_pm_ops,
#endif
struct ata_link *link;
struct ata_device *dev;
- /* tell EH we're leaving & flush EH */
+ /* Wait for any ongoing EH */
+ ata_port_wait_eh(ap);
+
+ mutex_lock(&ap->scsi_scan_mutex);
spin_lock_irqsave(ap->lock, flags);
+
+ /* Remove scsi devices */
+ ata_for_each_link(link, ap, HOST_FIRST) {
+ ata_for_each_dev(dev, link, ALL) {
+ if (dev->sdev) {
+ spin_unlock_irqrestore(ap->lock, flags);
+ scsi_remove_device(dev->sdev);
+ spin_lock_irqsave(ap->lock, flags);
+ dev->sdev = NULL;
+ }
+ }
+ }
+
+ /* Tell EH to disable all devices */
ap->pflags |= ATA_PFLAG_UNLOADING;
ata_port_schedule_eh(ap);
+
spin_unlock_irqrestore(ap->lock, flags);
+ mutex_unlock(&ap->scsi_scan_mutex);
/* wait till EH commits suicide */
ata_port_wait_eh(ap);
.timeouts = ata_eh_other_timeouts, },
{ .commands = CMDS(ATA_CMD_FLUSH, ATA_CMD_FLUSH_EXT),
.timeouts = ata_eh_flush_timeouts },
+ { .commands = CMDS(ATA_CMD_VERIFY),
+ .timeouts = ata_eh_reset_timeouts },
};
#undef CMDS
struct ata_device *dev;
unsigned long flags;
- /* Restore SControl IPM and SPD for the next driver and
+ /*
+ * Unless we are restarting, transition all enabled devices to
+ * standby power mode.
+ */
+ if (system_state != SYSTEM_RESTART) {
+ ata_for_each_link(link, ap, PMP_FIRST) {
+ ata_for_each_dev(dev, link, ENABLED)
+ ata_dev_power_set_standby(dev);
+ }
+ }
+
+ /*
+ * Restore SControl IPM and SPD for the next driver and
* disable attached devices.
*/
ata_for_each_link(link, ap, PMP_FIRST) {
ehc->saved_xfer_mode[devno] = dev->xfer_mode;
if (ata_ncq_enabled(dev))
ehc->saved_ncq_enabled |= 1 << devno;
+
+ /* If we are resuming, wake up the device */
+ if (ap->pflags & ATA_PFLAG_RESUMING)
+ ehc->i.dev_action[devno] |= ATA_EH_SET_ACTIVE;
}
}
/* clean up */
spin_lock_irqsave(ap->lock, flags);
+ ap->pflags &= ~ATA_PFLAG_RESUMING;
+
if (ap->pflags & ATA_PFLAG_LOADING)
ap->pflags &= ~ATA_PFLAG_LOADING;
else if ((ap->pflags & ATA_PFLAG_SCSI_HOTPLUG) &&
struct ata_eh_context *ehc = &link->eh_context;
unsigned long flags;
+ /*
+ * If the device is still enabled, transition it to standby power mode
+ * (i.e. spin down HDDs).
+ */
+ if (ata_dev_enabled(dev))
+ ata_dev_power_set_standby(dev);
+
ata_dev_disable(dev);
spin_lock_irqsave(ap->lock, flags);
struct ata_eh_context *ehc = &link->eh_context;
struct ata_queued_cmd *qc;
const char *frozen, *desc;
- char tries_buf[6] = "";
+ char tries_buf[16] = "";
int tag, nr_failed = 0;
if (ehc->i.flags & ATA_EHI_QUIET)
}
}
- /*
- * Some controllers can't be frozen very well and may set spurious
- * error conditions during reset. Clear accumulated error
- * information and re-thaw the port if frozen. As reset is the
- * final recovery action and we cross check link onlineness against
- * device classification later, no hotplug event is lost by this.
- */
+ /* clear cached SError */
spin_lock_irqsave(link->ap->lock, flags);
- memset(&link->eh_info, 0, sizeof(link->eh_info));
+ link->eh_info.serror = 0;
if (slave)
- memset(&slave->eh_info, 0, sizeof(link->eh_info));
- ap->pflags &= ~ATA_PFLAG_EH_PENDING;
+ slave->eh_info.serror = 0;
spin_unlock_irqrestore(link->ap->lock, flags);
- if (ata_port_is_frozen(ap))
- ata_eh_thaw_port(ap);
-
/*
* Make sure onlineness and classification result correspond.
* Hotplug could have happened during reset and some
if (ehc->i.flags & ATA_EHI_DID_RESET)
readid_flags |= ATA_READID_POSTRESET;
+ /*
+ * When resuming, before executing any command, make sure to
+ * transition the device to the active power mode.
+ */
+ if ((action & ATA_EH_SET_ACTIVE) && ata_dev_enabled(dev)) {
+ ata_dev_power_set_active(dev);
+ ata_eh_done(link, dev, ATA_EH_SET_ACTIVE);
+ }
+
if ((action & ATA_EH_REVALIDATE) && ata_dev_enabled(dev)) {
WARN_ON(dev->class == ATA_DEV_PMP);
unsigned long flags;
int rc = 0;
struct ata_device *dev;
+ struct ata_link *link;
/* are we suspending? */
spin_lock_irqsave(ap->lock, flags);
WARN_ON(ap->pflags & ATA_PFLAG_SUSPENDED);
+ /* Set all devices attached to the port in standby mode */
+ ata_for_each_link(link, ap, HOST_FIRST) {
+ ata_for_each_dev(dev, link, ENABLED)
+ ata_dev_power_set_standby(dev);
+ }
+
/*
* If we have a ZPODD attached, check its zero
* power ready status before the port is frozen.
/* update the flags */
spin_lock_irqsave(ap->lock, flags);
ap->pflags &= ~(ATA_PFLAG_PM_PENDING | ATA_PFLAG_SUSPENDED);
+ ap->pflags |= ATA_PFLAG_RESUMING;
spin_unlock_irqrestore(ap->lock, flags);
}
#endif /* CONFIG_PM */
case ATA_LPM_MED_POWER_WITH_DIPM:
case ATA_LPM_MIN_POWER_WITH_PARTIAL:
case ATA_LPM_MIN_POWER:
- if (ata_link_nr_enabled(link) > 0)
- /* no restrictions on LPM transitions */
+ if (ata_link_nr_enabled(link) > 0) {
+ /* assume no restrictions on LPM transitions */
scontrol &= ~(0x7 << 8);
- else {
+
+ /*
+ * If the controller does not support partial, slumber,
+ * or devsleep, then disallow these transitions.
+ */
+ if (link->ap->host->flags & ATA_HOST_NO_PART)
+ scontrol |= (0x1 << 8);
+
+ if (link->ap->host->flags & ATA_HOST_NO_SSC)
+ scontrol |= (0x2 << 8);
+
+ if (link->ap->host->flags & ATA_HOST_NO_DEVSLP)
+ scontrol |= (0x4 << 8);
+ } else {
/* empty port, power off */
scontrol &= ~0xf;
scontrol |= (0x1 << 2);
}
} else {
sdev->sector_size = ata_id_logical_sector_size(dev->id);
+
/*
- * Stop the drive on suspend but do not issue START STOP UNIT
- * on resume as this is not necessary and may fail: the device
- * will be woken up by ata_port_pm_resume() with a port reset
- * and device revalidation.
+ * Ask the sd driver to issue START STOP UNIT on runtime suspend
+ * and resume only. For system level suspend/resume, devices
+ * power state is handled directly by libata EH.
*/
- sdev->manage_start_stop = 1;
- sdev->no_start_on_resume = 1;
+ sdev->manage_runtime_start_stop = true;
}
/*
}
/**
+ * ata_scsi_slave_alloc - Early setup of SCSI device
+ * @sdev: SCSI device to examine
+ *
+ * This is called from scsi_alloc_sdev() when the scsi device
+ * associated with an ATA device is scanned on a port.
+ *
+ * LOCKING:
+ * Defined by SCSI layer. We don't really care.
+ */
+
+int ata_scsi_slave_alloc(struct scsi_device *sdev)
+{
+ struct ata_port *ap = ata_shost_to_port(sdev->host);
+ struct device_link *link;
+
+ ata_scsi_sdev_config(sdev);
+
+ /*
+ * Create a link from the ata_port device to the scsi device to ensure
+ * that PM does suspend/resume in the correct order: the scsi device is
+ * consumer (child) and the ata port the supplier (parent).
+ */
+ link = device_link_add(&sdev->sdev_gendev, &ap->tdev,
+ DL_FLAG_STATELESS |
+ DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE);
+ if (!link) {
+ ata_port_err(ap, "Failed to create link to scsi device %s\n",
+ dev_name(&sdev->sdev_gendev));
+ return -ENODEV;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ata_scsi_slave_alloc);
+
+/**
* ata_scsi_slave_config - Set SCSI device attributes
* @sdev: SCSI device to examine
*
{
struct ata_port *ap = ata_shost_to_port(sdev->host);
struct ata_device *dev = __ata_scsi_find_dev(ap, sdev);
- int rc = 0;
-
- ata_scsi_sdev_config(sdev);
if (dev)
- rc = ata_scsi_dev_config(sdev, dev);
+ return ata_scsi_dev_config(sdev, dev);
- return rc;
+ return 0;
}
EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
unsigned long flags;
struct ata_device *dev;
+ device_link_remove(&sdev->sdev_gendev, &ap->tdev);
+
spin_lock_irqsave(ap->lock, flags);
dev = __ata_scsi_find_dev(ap, sdev);
if (dev && dev->sdev) {
}
if (cdb[4] & 0x1) {
- tf->nsect = 1; /* 1 sector, lba=0 */
+ tf->nsect = 1; /* 1 sector, lba=0 */
if (qc->dev->flags & ATA_DFLAG_LBA) {
tf->flags |= ATA_TFLAG_LBA;
tf->lbah = 0x0; /* cyl high */
}
- tf->command = ATA_CMD_VERIFY; /* READ VERIFY */
+ tf->command = ATA_CMD_VERIFY; /* READ VERIFY */
} else {
/* Some odd clown BIOSen issue spindown on power off (ACPI S4
* or S5) causing some drives to spin up and down again.
goto skip;
if ((qc->ap->flags & ATA_FLAG_NO_HIBERNATE_SPINDOWN) &&
- system_entering_hibernation())
+ system_entering_hibernation())
goto skip;
/* Issue ATA STANDBY IMMEDIATE command */
hdr[2] = 0x7; /* claim SPC-5 version compatibility */
}
+ if (args->dev->flags & ATA_DFLAG_CDL)
+ hdr[2] = 0xd; /* claim SPC-6 version compatibility */
+
memcpy(rbuf, hdr, sizeof(hdr));
memcpy(&rbuf[8], "ATA ", 8);
ata_id_string(args->id, &rbuf[16], ATA_ID_PROD, 16);
break;
case MAINTENANCE_IN:
- if (scsicmd[1] == MI_REPORT_SUPPORTED_OPERATION_CODES)
+ if ((scsicmd[1] & 0x1f) == MI_REPORT_SUPPORTED_OPERATION_CODES)
ata_scsi_rbuf_fill(&args, ata_scsiop_maint_in);
else
ata_scsi_set_invalid_field(dev, cmd, 1, 0xff);
struct ata_link *link;
struct ata_device *dev;
unsigned long flags;
- bool delay_rescan = false;
+ int ret = 0;
mutex_lock(&ap->scsi_scan_mutex);
spin_lock_irqsave(ap->lock, flags);
ata_for_each_dev(dev, link, ENABLED) {
struct scsi_device *sdev = dev->sdev;
+ /*
+ * If the port was suspended before this was scheduled,
+ * bail out.
+ */
+ if (ap->pflags & ATA_PFLAG_SUSPENDED)
+ goto unlock;
+
if (!sdev)
continue;
if (scsi_device_get(sdev))
continue;
- /*
- * If the rescan work was scheduled because of a resume
- * event, the port is already fully resumed, but the
- * SCSI device may not yet be fully resumed. In such
- * case, executing scsi_rescan_device() may cause a
- * deadlock with the PM code on device_lock(). Prevent
- * this by giving up and retrying rescan after a short
- * delay.
- */
- delay_rescan = sdev->sdev_gendev.power.is_suspended;
- if (delay_rescan) {
- scsi_device_put(sdev);
- break;
- }
-
spin_unlock_irqrestore(ap->lock, flags);
- scsi_rescan_device(sdev);
+ ret = scsi_rescan_device(sdev);
scsi_device_put(sdev);
spin_lock_irqsave(ap->lock, flags);
+
+ if (ret)
+ goto unlock;
}
}
+unlock:
spin_unlock_irqrestore(ap->lock, flags);
mutex_unlock(&ap->scsi_scan_mutex);
- if (delay_rescan)
+ /* Reschedule with a delay if scsi_rescan_device() returned an error */
+ if (ret)
schedule_delayed_work(&ap->scsi_rescan_task,
msecs_to_jiffies(5));
}
put_device(dev);
}
+static const struct device_type ata_port_sas_type = {
+ .name = ATA_PORT_TYPE_NAME,
+};
+
/** ata_tport_add - initialize a transport ATA port structure
*
* @parent: parent device
struct device *dev = &ap->tdev;
device_initialize(dev);
- dev->type = &ata_port_type;
+ if (ap->flags & ATA_FLAG_SAS_HOST)
+ dev->type = &ata_port_sas_type;
+ else
+ dev->type = &ata_port_type;
dev->parent = parent;
ata_host_get(ap->host);
ATA_DNXFER_QUIET = (1 << 31),
};
+#define ATA_PORT_TYPE_NAME "ata_port"
+
extern atomic_t ata_print_id;
extern int atapi_passthru16;
extern int libata_fua;
extern int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
unsigned int readid_flags);
extern int ata_dev_configure(struct ata_device *dev);
+extern void ata_dev_power_set_standby(struct ata_device *dev);
+extern void ata_dev_power_set_active(struct ata_device *dev);
extern int sata_down_spd_limit(struct ata_link *link, u32 spd_limit);
extern int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel);
extern unsigned int ata_dev_set_feature(struct ata_device *dev,
{
int l, h, r;
- r = regr + cont_map[cont];
+ r = regr + cont_map[cont];
switch (pi->mode) {
case 0:
}
static void comm_disconnect(struct pi_adapter *pi)
-
{
w2(0); w2(0); w2(0); w2(4);
w0(pi->saved_r0);
w4l(swab16(((u16 *)buf)[2 * k]) |
swab16(((u16 *)buf)[2 * k + 1]) << 16);
break;
- }
+ }
}
static void comm_log_adapter(struct pi_adapter *pi)
-
-{ char *mode_string[5] = { "4-bit", "8-bit", "EPP-8", "EPP-16", "EPP-32" };
+{
+ char *mode_string[5] = { "4-bit", "8-bit", "EPP-8", "EPP-16", "EPP-32" };
dev_info(&pi->dev,
"DataStor Commuter at 0x%x, mode %d (%s), delay %d\n",
for (b = 0; b < bytes; ) {
for (w = 0, o = 0; b < bytes && w < 4; w++) {
- o += snprintf(linebuf + o, sizeof(linebuf) - o,
- "%08x ", readl(start + b));
+ o += scnprintf(linebuf + o, sizeof(linebuf) - o,
+ "%08x ", readl(start + b));
b += sizeof(u32);
}
dev_dbg(dev, "%s: %p: %s\n",
/* subsystems can specify simple device enumeration */
else if (dev->bus && dev->bus->dev_name)
error = dev_set_name(dev, "%s%u", dev->bus->dev_name, dev->id);
+ else
+ error = -EINVAL;
if (error)
goto name_error;
static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
static int rbd_dev_refresh(struct rbd_device *rbd_dev);
-static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev);
-static int rbd_dev_header_info(struct rbd_device *rbd_dev);
-static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev);
+static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev,
+ struct rbd_image_header *header);
static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
u64 snap_id);
static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
RCU_INIT_POINTER(rbd_dev->layout.pool_ns, NULL);
}
+static void rbd_image_header_cleanup(struct rbd_image_header *header)
+{
+ kfree(header->object_prefix);
+ ceph_put_snap_context(header->snapc);
+ kfree(header->snap_sizes);
+ kfree(header->snap_names);
+
+ memset(header, 0, sizeof(*header));
+}
+
/*
* Fill an rbd image header with information from the given format 1
* on-disk header.
*/
-static int rbd_header_from_disk(struct rbd_device *rbd_dev,
- struct rbd_image_header_ondisk *ondisk)
+static int rbd_header_from_disk(struct rbd_image_header *header,
+ struct rbd_image_header_ondisk *ondisk,
+ bool first_time)
{
- struct rbd_image_header *header = &rbd_dev->header;
- bool first_time = header->object_prefix == NULL;
struct ceph_snap_context *snapc;
char *object_prefix = NULL;
char *snap_names = NULL;
if (first_time) {
header->object_prefix = object_prefix;
header->obj_order = ondisk->options.order;
- rbd_init_layout(rbd_dev);
- } else {
- ceph_put_snap_context(header->snapc);
- kfree(header->snap_names);
- kfree(header->snap_sizes);
}
/* The remaining fields always get updated (when we refresh) */
* return, the rbd_dev->header field will contain up-to-date
* information about the image.
*/
-static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
+static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev,
+ struct rbd_image_header *header,
+ bool first_time)
{
struct rbd_image_header_ondisk *ondisk = NULL;
u32 snap_count = 0;
snap_count = le32_to_cpu(ondisk->snap_count);
} while (snap_count != want_count);
- ret = rbd_header_from_disk(rbd_dev, ondisk);
+ ret = rbd_header_from_disk(header, ondisk, first_time);
out:
kfree(ondisk);
}
}
-static int rbd_dev_refresh(struct rbd_device *rbd_dev)
-{
- u64 mapping_size;
- int ret;
-
- down_write(&rbd_dev->header_rwsem);
- mapping_size = rbd_dev->mapping.size;
-
- ret = rbd_dev_header_info(rbd_dev);
- if (ret)
- goto out;
-
- /*
- * If there is a parent, see if it has disappeared due to the
- * mapped image getting flattened.
- */
- if (rbd_dev->parent) {
- ret = rbd_dev_v2_parent_info(rbd_dev);
- if (ret)
- goto out;
- }
-
- rbd_assert(!rbd_is_snap(rbd_dev));
- rbd_dev->mapping.size = rbd_dev->header.image_size;
-
-out:
- up_write(&rbd_dev->header_rwsem);
- if (!ret && mapping_size != rbd_dev->mapping.size)
- rbd_dev_update_size(rbd_dev);
-
- return ret;
-}
-
static const struct blk_mq_ops rbd_mq_ops = {
.queue_rq = rbd_queue_rq,
};
return 0;
}
-static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev)
-{
- return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
- &rbd_dev->header.obj_order,
- &rbd_dev->header.image_size);
-}
-
-static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
+static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev,
+ char **pobject_prefix)
{
size_t size;
void *reply_buf;
+ char *object_prefix;
int ret;
void *p;
goto out;
p = reply_buf;
- rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
- p + ret, NULL, GFP_NOIO);
+ object_prefix = ceph_extract_encoded_string(&p, p + ret, NULL,
+ GFP_NOIO);
+ if (IS_ERR(object_prefix)) {
+ ret = PTR_ERR(object_prefix);
+ goto out;
+ }
ret = 0;
- if (IS_ERR(rbd_dev->header.object_prefix)) {
- ret = PTR_ERR(rbd_dev->header.object_prefix);
- rbd_dev->header.object_prefix = NULL;
- } else {
- dout(" object_prefix = %s\n", rbd_dev->header.object_prefix);
- }
+ *pobject_prefix = object_prefix;
+ dout(" object_prefix = %s\n", object_prefix);
out:
kfree(reply_buf);
return 0;
}
-static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
-{
- return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
- rbd_is_ro(rbd_dev),
- &rbd_dev->header.features);
-}
-
/*
* These are generic image flags, but since they are used only for
* object map, store them in rbd_dev->object_map_flags.
u64 overlap;
};
+static void rbd_parent_info_cleanup(struct parent_image_info *pii)
+{
+ kfree(pii->pool_ns);
+ kfree(pii->image_id);
+
+ memset(pii, 0, sizeof(*pii));
+}
+
/*
* The caller is responsible for @pii.
*/
if (pii->has_overlap)
ceph_decode_64_safe(&p, end, pii->overlap, e_inval);
+ dout("%s pool_id %llu pool_ns %s image_id %s snap_id %llu has_overlap %d overlap %llu\n",
+ __func__, pii->pool_id, pii->pool_ns, pii->image_id, pii->snap_id,
+ pii->has_overlap, pii->overlap);
return 0;
e_inval:
pii->has_overlap = true;
ceph_decode_64_safe(&p, end, pii->overlap, e_inval);
+ dout("%s pool_id %llu pool_ns %s image_id %s snap_id %llu has_overlap %d overlap %llu\n",
+ __func__, pii->pool_id, pii->pool_ns, pii->image_id, pii->snap_id,
+ pii->has_overlap, pii->overlap);
return 0;
e_inval:
return -EINVAL;
}
-static int get_parent_info(struct rbd_device *rbd_dev,
- struct parent_image_info *pii)
+static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev,
+ struct parent_image_info *pii)
{
struct page *req_page, *reply_page;
void *p;
return ret;
}
-static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
+static int rbd_dev_setup_parent(struct rbd_device *rbd_dev)
{
struct rbd_spec *parent_spec;
struct parent_image_info pii = { 0 };
if (!parent_spec)
return -ENOMEM;
- ret = get_parent_info(rbd_dev, &pii);
+ ret = rbd_dev_v2_parent_info(rbd_dev, &pii);
if (ret)
goto out_err;
- dout("%s pool_id %llu pool_ns %s image_id %s snap_id %llu has_overlap %d overlap %llu\n",
- __func__, pii.pool_id, pii.pool_ns, pii.image_id, pii.snap_id,
- pii.has_overlap, pii.overlap);
-
- if (pii.pool_id == CEPH_NOPOOL || !pii.has_overlap) {
- /*
- * Either the parent never existed, or we have
- * record of it but the image got flattened so it no
- * longer has a parent. When the parent of a
- * layered image disappears we immediately set the
- * overlap to 0. The effect of this is that all new
- * requests will be treated as if the image had no
- * parent.
- *
- * If !pii.has_overlap, the parent image spec is not
- * applicable. It's there to avoid duplication in each
- * snapshot record.
- */
- if (rbd_dev->parent_overlap) {
- rbd_dev->parent_overlap = 0;
- rbd_dev_parent_put(rbd_dev);
- pr_info("%s: clone image has been flattened\n",
- rbd_dev->disk->disk_name);
- }
-
+ if (pii.pool_id == CEPH_NOPOOL || !pii.has_overlap)
goto out; /* No parent? No problem. */
- }
/* The ceph file layout needs to fit pool id in 32 bits */
}
/*
- * The parent won't change (except when the clone is
- * flattened, already handled that). So we only need to
- * record the parent spec we have not already done so.
+ * The parent won't change except when the clone is flattened,
+ * so we only need to record the parent image spec once.
*/
- if (!rbd_dev->parent_spec) {
- parent_spec->pool_id = pii.pool_id;
- if (pii.pool_ns && *pii.pool_ns) {
- parent_spec->pool_ns = pii.pool_ns;
- pii.pool_ns = NULL;
- }
- parent_spec->image_id = pii.image_id;
- pii.image_id = NULL;
- parent_spec->snap_id = pii.snap_id;
-
- rbd_dev->parent_spec = parent_spec;
- parent_spec = NULL; /* rbd_dev now owns this */
+ parent_spec->pool_id = pii.pool_id;
+ if (pii.pool_ns && *pii.pool_ns) {
+ parent_spec->pool_ns = pii.pool_ns;
+ pii.pool_ns = NULL;
}
+ parent_spec->image_id = pii.image_id;
+ pii.image_id = NULL;
+ parent_spec->snap_id = pii.snap_id;
+
+ rbd_assert(!rbd_dev->parent_spec);
+ rbd_dev->parent_spec = parent_spec;
+ parent_spec = NULL; /* rbd_dev now owns this */
/*
- * We always update the parent overlap. If it's zero we issue
- * a warning, as we will proceed as if there was no parent.
+ * Record the parent overlap. If it's zero, issue a warning as
+ * we will proceed as if there is no parent.
*/
- if (!pii.overlap) {
- if (parent_spec) {
- /* refresh, careful to warn just once */
- if (rbd_dev->parent_overlap)
- rbd_warn(rbd_dev,
- "clone now standalone (overlap became 0)");
- } else {
- /* initial probe */
- rbd_warn(rbd_dev, "clone is standalone (overlap 0)");
- }
- }
+ if (!pii.overlap)
+ rbd_warn(rbd_dev, "clone is standalone (overlap 0)");
rbd_dev->parent_overlap = pii.overlap;
out:
ret = 0;
out_err:
- kfree(pii.pool_ns);
- kfree(pii.image_id);
+ rbd_parent_info_cleanup(&pii);
rbd_spec_put(parent_spec);
return ret;
}
-static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
+static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev,
+ u64 *stripe_unit, u64 *stripe_count)
{
struct {
__le64 stripe_unit;
__le64 stripe_count;
} __attribute__ ((packed)) striping_info_buf = { 0 };
size_t size = sizeof (striping_info_buf);
- void *p;
int ret;
ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
if (ret < size)
return -ERANGE;
- p = &striping_info_buf;
- rbd_dev->header.stripe_unit = ceph_decode_64(&p);
- rbd_dev->header.stripe_count = ceph_decode_64(&p);
+ *stripe_unit = le64_to_cpu(striping_info_buf.stripe_unit);
+ *stripe_count = le64_to_cpu(striping_info_buf.stripe_count);
+ dout(" stripe_unit = %llu stripe_count = %llu\n", *stripe_unit,
+ *stripe_count);
+
return 0;
}
-static int rbd_dev_v2_data_pool(struct rbd_device *rbd_dev)
+static int rbd_dev_v2_data_pool(struct rbd_device *rbd_dev, s64 *data_pool_id)
{
- __le64 data_pool_id;
+ __le64 data_pool_buf;
int ret;
ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
&rbd_dev->header_oloc, "get_data_pool",
- NULL, 0, &data_pool_id, sizeof(data_pool_id));
+ NULL, 0, &data_pool_buf,
+ sizeof(data_pool_buf));
+ dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
if (ret < 0)
return ret;
- if (ret < sizeof(data_pool_id))
+ if (ret < sizeof(data_pool_buf))
return -EBADMSG;
- rbd_dev->header.data_pool_id = le64_to_cpu(data_pool_id);
- WARN_ON(rbd_dev->header.data_pool_id == CEPH_NOPOOL);
+ *data_pool_id = le64_to_cpu(data_pool_buf);
+ dout(" data_pool_id = %lld\n", *data_pool_id);
+ WARN_ON(*data_pool_id == CEPH_NOPOOL);
+
return 0;
}
return ret;
}
-static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
+static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev,
+ struct ceph_snap_context **psnapc)
{
size_t size;
int ret;
for (i = 0; i < snap_count; i++)
snapc->snaps[i] = ceph_decode_64(&p);
- ceph_put_snap_context(rbd_dev->header.snapc);
- rbd_dev->header.snapc = snapc;
-
+ *psnapc = snapc;
dout(" snap context seq = %llu, snap_count = %u\n",
(unsigned long long)seq, (unsigned int)snap_count);
out:
return snap_name;
}
-static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev)
+static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev,
+ struct rbd_image_header *header,
+ bool first_time)
{
- bool first_time = rbd_dev->header.object_prefix == NULL;
int ret;
- ret = rbd_dev_v2_image_size(rbd_dev);
+ ret = _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
+ first_time ? &header->obj_order : NULL,
+ &header->image_size);
if (ret)
return ret;
if (first_time) {
- ret = rbd_dev_v2_header_onetime(rbd_dev);
+ ret = rbd_dev_v2_header_onetime(rbd_dev, header);
if (ret)
return ret;
}
- ret = rbd_dev_v2_snap_context(rbd_dev);
- if (ret && first_time) {
- kfree(rbd_dev->header.object_prefix);
- rbd_dev->header.object_prefix = NULL;
- }
+ ret = rbd_dev_v2_snap_context(rbd_dev, &header->snapc);
+ if (ret)
+ return ret;
- return ret;
+ return 0;
}
-static int rbd_dev_header_info(struct rbd_device *rbd_dev)
+static int rbd_dev_header_info(struct rbd_device *rbd_dev,
+ struct rbd_image_header *header,
+ bool first_time)
{
rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
+ rbd_assert(!header->object_prefix && !header->snapc);
if (rbd_dev->image_format == 1)
- return rbd_dev_v1_header_info(rbd_dev);
+ return rbd_dev_v1_header_info(rbd_dev, header, first_time);
- return rbd_dev_v2_header_info(rbd_dev);
+ return rbd_dev_v2_header_info(rbd_dev, header, first_time);
}
/*
*/
static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
{
- struct rbd_image_header *header;
-
rbd_dev_parent_put(rbd_dev);
rbd_object_map_free(rbd_dev);
rbd_dev_mapping_clear(rbd_dev);
/* Free dynamic fields from the header, then zero it out */
- header = &rbd_dev->header;
- ceph_put_snap_context(header->snapc);
- kfree(header->snap_sizes);
- kfree(header->snap_names);
- kfree(header->object_prefix);
- memset(header, 0, sizeof (*header));
+ rbd_image_header_cleanup(&rbd_dev->header);
}
-static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev)
+static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev,
+ struct rbd_image_header *header)
{
int ret;
- ret = rbd_dev_v2_object_prefix(rbd_dev);
+ ret = rbd_dev_v2_object_prefix(rbd_dev, &header->object_prefix);
if (ret)
- goto out_err;
+ return ret;
/*
* Get the and check features for the image. Currently the
* features are assumed to never change.
*/
- ret = rbd_dev_v2_features(rbd_dev);
+ ret = _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
+ rbd_is_ro(rbd_dev), &header->features);
if (ret)
- goto out_err;
+ return ret;
/* If the image supports fancy striping, get its parameters */
- if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) {
- ret = rbd_dev_v2_striping_info(rbd_dev);
- if (ret < 0)
- goto out_err;
+ if (header->features & RBD_FEATURE_STRIPINGV2) {
+ ret = rbd_dev_v2_striping_info(rbd_dev, &header->stripe_unit,
+ &header->stripe_count);
+ if (ret)
+ return ret;
}
- if (rbd_dev->header.features & RBD_FEATURE_DATA_POOL) {
- ret = rbd_dev_v2_data_pool(rbd_dev);
+ if (header->features & RBD_FEATURE_DATA_POOL) {
+ ret = rbd_dev_v2_data_pool(rbd_dev, &header->data_pool_id);
if (ret)
- goto out_err;
+ return ret;
}
- rbd_init_layout(rbd_dev);
return 0;
-
-out_err:
- rbd_dev->header.features = 0;
- kfree(rbd_dev->header.object_prefix);
- rbd_dev->header.object_prefix = NULL;
- return ret;
}
/*
if (!depth)
down_write(&rbd_dev->header_rwsem);
- ret = rbd_dev_header_info(rbd_dev);
+ ret = rbd_dev_header_info(rbd_dev, &rbd_dev->header, true);
if (ret) {
if (ret == -ENOENT && !need_watch)
rbd_print_dne(rbd_dev, false);
goto err_out_probe;
}
+ rbd_init_layout(rbd_dev);
+
/*
* If this image is the one being mapped, we have pool name and
* id, image name and id, and snap name - need to fill snap id.
}
if (rbd_dev->header.features & RBD_FEATURE_LAYERING) {
- ret = rbd_dev_v2_parent_info(rbd_dev);
+ ret = rbd_dev_setup_parent(rbd_dev);
if (ret)
goto err_out_probe;
}
return ret;
}
+static void rbd_dev_update_header(struct rbd_device *rbd_dev,
+ struct rbd_image_header *header)
+{
+ rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
+ rbd_assert(rbd_dev->header.object_prefix); /* !first_time */
+
+ if (rbd_dev->header.image_size != header->image_size) {
+ rbd_dev->header.image_size = header->image_size;
+
+ if (!rbd_is_snap(rbd_dev)) {
+ rbd_dev->mapping.size = header->image_size;
+ rbd_dev_update_size(rbd_dev);
+ }
+ }
+
+ ceph_put_snap_context(rbd_dev->header.snapc);
+ rbd_dev->header.snapc = header->snapc;
+ header->snapc = NULL;
+
+ if (rbd_dev->image_format == 1) {
+ kfree(rbd_dev->header.snap_names);
+ rbd_dev->header.snap_names = header->snap_names;
+ header->snap_names = NULL;
+
+ kfree(rbd_dev->header.snap_sizes);
+ rbd_dev->header.snap_sizes = header->snap_sizes;
+ header->snap_sizes = NULL;
+ }
+}
+
+static void rbd_dev_update_parent(struct rbd_device *rbd_dev,
+ struct parent_image_info *pii)
+{
+ if (pii->pool_id == CEPH_NOPOOL || !pii->has_overlap) {
+ /*
+ * Either the parent never existed, or we have
+ * record of it but the image got flattened so it no
+ * longer has a parent. When the parent of a
+ * layered image disappears we immediately set the
+ * overlap to 0. The effect of this is that all new
+ * requests will be treated as if the image had no
+ * parent.
+ *
+ * If !pii.has_overlap, the parent image spec is not
+ * applicable. It's there to avoid duplication in each
+ * snapshot record.
+ */
+ if (rbd_dev->parent_overlap) {
+ rbd_dev->parent_overlap = 0;
+ rbd_dev_parent_put(rbd_dev);
+ pr_info("%s: clone has been flattened\n",
+ rbd_dev->disk->disk_name);
+ }
+ } else {
+ rbd_assert(rbd_dev->parent_spec);
+
+ /*
+ * Update the parent overlap. If it became zero, issue
+ * a warning as we will proceed as if there is no parent.
+ */
+ if (!pii->overlap && rbd_dev->parent_overlap)
+ rbd_warn(rbd_dev,
+ "clone has become standalone (overlap 0)");
+ rbd_dev->parent_overlap = pii->overlap;
+ }
+}
+
+static int rbd_dev_refresh(struct rbd_device *rbd_dev)
+{
+ struct rbd_image_header header = { 0 };
+ struct parent_image_info pii = { 0 };
+ int ret;
+
+ dout("%s rbd_dev %p\n", __func__, rbd_dev);
+
+ ret = rbd_dev_header_info(rbd_dev, &header, false);
+ if (ret)
+ goto out;
+
+ /*
+ * If there is a parent, see if it has disappeared due to the
+ * mapped image getting flattened.
+ */
+ if (rbd_dev->parent) {
+ ret = rbd_dev_v2_parent_info(rbd_dev, &pii);
+ if (ret)
+ goto out;
+ }
+
+ down_write(&rbd_dev->header_rwsem);
+ rbd_dev_update_header(rbd_dev, &header);
+ if (rbd_dev->parent)
+ rbd_dev_update_parent(rbd_dev, &pii);
+ up_write(&rbd_dev->header_rwsem);
+
+out:
+ rbd_parent_info_cleanup(&pii);
+ rbd_image_header_cleanup(&header);
+ return ret;
+}
+
static ssize_t do_rbd_add(const char *buf, size_t count)
{
struct rbd_device *rbd_dev = NULL;
SOC_2420,
SOC_2430,
SOC_3430,
+ SOC_AM35,
SOC_3630,
SOC_4430,
SOC_4460,
if (ddata->cfg.quirks & (SYSC_QUIRK_SWSUP_SIDLE |
SYSC_QUIRK_SWSUP_SIDLE_ACT)) {
best_mode = SYSC_IDLE_NO;
+
+ /* Clear WAKEUP */
+ if (regbits->enwkup_shift >= 0 &&
+ ddata->cfg.sysc_val & BIT(regbits->enwkup_shift))
+ reg &= ~BIT(regbits->enwkup_shift);
} else {
best_mode = fls(ddata->cfg.sidlemodes) - 1;
if (best_mode > SYSC_IDLE_MASK) {
}
}
+ if (ddata->cfg.quirks & SYSC_QUIRK_SWSUP_SIDLE_ACT) {
+ /* Set WAKEUP */
+ if (regbits->enwkup_shift >= 0 &&
+ ddata->cfg.sysc_val & BIT(regbits->enwkup_shift))
+ reg |= BIT(regbits->enwkup_shift);
+ }
+
reg &= ~(SYSC_IDLE_MASK << regbits->sidle_shift);
reg |= best_mode << regbits->sidle_shift;
if (regbits->autoidle_shift >= 0 &&
static const struct sysc_revision_quirk sysc_revision_quirks[] = {
/* These drivers need to be fixed to not use pm_runtime_irq_safe() */
SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000046, 0xffffffff,
- SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
+ SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000052, 0xffffffff,
- SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
+ SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
/* Uarts on omap4 and later */
SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x50411e03, 0xffff00ff,
- SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
+ SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x47422e03, 0xffffffff,
- SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
+ SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x47424e03, 0xffffffff,
- SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
+ SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
/* Quirks that need to be set based on the module address */
SYSC_QUIRK("mcpdm", 0x40132000, 0, 0x10, -ENODEV, 0x50000800, 0xffffffff,
dev_warn(ddata->dev, "%s: timed out %08x !+ %08x\n",
__func__, val, irq_mask);
- if (sysc_soc->soc == SOC_3430) {
+ if (sysc_soc->soc == SOC_3430 || sysc_soc->soc == SOC_AM35) {
/* Clear DSS_SDI_CONTROL */
sysc_write(ddata, 0x44, 0);
}
if (ddata->cfg.srst_udelay)
- usleep_range(ddata->cfg.srst_udelay,
- ddata->cfg.srst_udelay * 2);
+ fsleep(ddata->cfg.srst_udelay);
if (ddata->post_reset_quirk)
ddata->post_reset_quirk(ddata);
static const struct soc_device_attribute sysc_soc_match[] = {
SOC_FLAG("OMAP242*", SOC_2420),
SOC_FLAG("OMAP243*", SOC_2430),
+ SOC_FLAG("AM35*", SOC_AM35),
SOC_FLAG("OMAP3[45]*", SOC_3430),
SOC_FLAG("OMAP3[67]*", SOC_3630),
SOC_FLAG("OMAP443*", SOC_4430),
* can be dropped if we stop supporting old beagleboard revisions
* A to B4 at some point.
*/
- if (sysc_soc->soc == SOC_3430)
+ if (sysc_soc->soc == SOC_3430 || sysc_soc->soc == SOC_AM35)
error = -ENXIO;
else
error = -EBUSY;
static int __init
parisc_agp_init(void)
{
- extern struct sba_device *sba_list;
-
int err = -1;
struct parisc_device *sba = NULL, *lba = NULL;
struct lba_device *lbadev = NULL;
.shutdown_pre = tpm_class_shutdown,
};
const struct class tpmrm_class = {
- .name = "tmprm",
+ .name = "tpmrm",
};
dev_t tpm_devt;
unsigned int val)
{
struct i2c_client *i2c = context;
- const u8 data[3] = { reg, 1, val };
+ const u8 data[2] = { reg, val };
const int count = ARRAY_SIZE(data);
int ret;
static const struct regmap_config si521xx_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
- .cache_type = REGCACHE_NONE,
+ .cache_type = REGCACHE_FLAT,
.max_register = SI521XX_REG_DA,
.rd_table = &si521xx_readable_table,
.wr_table = &si521xx_writeable_table,
{
const u16 chip_info = (u16)(uintptr_t)device_get_match_data(&client->dev);
const struct clk_parent_data clk_parent_data = { .index = 0 };
- struct si521xx *si;
+ const u8 data[3] = { SI521XX_REG_BC, 1, 1 };
unsigned char name[6] = "DIFF0";
struct clk_init_data init = {};
+ struct si521xx *si;
int i, ret;
if (!chip_info)
"Failed to allocate register map\n");
/* Always read back 1 Byte via I2C */
- ret = regmap_write(si->regmap, SI521XX_REG_BC, 1);
+ ret = i2c_master_send(client, data, ARRAY_SIZE(data));
if (ret < 0)
return ret;
VC3_DIV5,
};
-enum vc3_clk_mux {
- VC3_DIFF2_MUX,
- VC3_DIFF1_MUX,
- VC3_SE3_MUX,
- VC3_SE2_MUX,
- VC3_SE1_MUX,
-};
-
enum vc3_clk {
- VC3_DIFF2,
- VC3_DIFF1,
- VC3_SE3,
- VC3_SE2,
- VC3_SE1,
VC3_REF,
+ VC3_SE1,
+ VC3_SE2,
+ VC3_SE3,
+ VC3_DIFF1,
+ VC3_DIFF2,
+};
+
+enum vc3_clk_mux {
+ VC3_SE1_MUX = VC3_SE1 - 1,
+ VC3_SE2_MUX = VC3_SE2 - 1,
+ VC3_SE3_MUX = VC3_SE3 - 1,
+ VC3_DIFF1_MUX = VC3_DIFF1 - 1,
+ VC3_DIFF2_MUX = VC3_DIFF2 - 1,
};
struct vc3_clk_data {
/* Determine best fractional part, which is 16 bit wide */
div_frc = rate % *parent_rate;
div_frc *= BIT(16) - 1;
- do_div(div_frc, *parent_rate);
- vc3->div_frc = (u32)div_frc;
+ vc3->div_frc = min_t(u64, div64_ul(div_frc, *parent_rate), U16_MAX);
rate = (*parent_rate *
- (vc3->div_int * VC3_2_POW_16 + div_frc) / VC3_2_POW_16);
+ (vc3->div_int * VC3_2_POW_16 + vc3->div_frc) / VC3_2_POW_16);
} else {
rate = *parent_rate * vc3->div_int;
}
};
static struct vc3_hw_data clk_mux[] = {
- [VC3_DIFF2_MUX] = {
+ [VC3_SE1_MUX] = {
.data = &(struct vc3_clk_data) {
- .offs = VC3_DIFF2_CTRL_REG,
- .bitmsk = VC3_DIFF2_CTRL_REG_DIFF2_CLK_SEL
+ .offs = VC3_SE1_DIV4_CTRL,
+ .bitmsk = VC3_SE1_DIV4_CTRL_SE1_CLK_SEL
},
.hw.init = &(struct clk_init_data){
- .name = "diff2_mux",
+ .name = "se1_mux",
.ops = &vc3_clk_mux_ops,
.parent_hws = (const struct clk_hw *[]) {
- &clk_div[VC3_DIV1].hw,
- &clk_div[VC3_DIV3].hw
+ &clk_div[VC3_DIV5].hw,
+ &clk_div[VC3_DIV4].hw
},
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT
}
},
- [VC3_DIFF1_MUX] = {
+ [VC3_SE2_MUX] = {
.data = &(struct vc3_clk_data) {
- .offs = VC3_DIFF1_CTRL_REG,
- .bitmsk = VC3_DIFF1_CTRL_REG_DIFF1_CLK_SEL
+ .offs = VC3_SE2_CTRL_REG0,
+ .bitmsk = VC3_SE2_CTRL_REG0_SE2_CLK_SEL
},
.hw.init = &(struct clk_init_data){
- .name = "diff1_mux",
+ .name = "se2_mux",
.ops = &vc3_clk_mux_ops,
.parent_hws = (const struct clk_hw *[]) {
- &clk_div[VC3_DIV1].hw,
- &clk_div[VC3_DIV3].hw
+ &clk_div[VC3_DIV5].hw,
+ &clk_div[VC3_DIV4].hw
},
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT
.flags = CLK_SET_RATE_PARENT
}
},
- [VC3_SE2_MUX] = {
+ [VC3_DIFF1_MUX] = {
.data = &(struct vc3_clk_data) {
- .offs = VC3_SE2_CTRL_REG0,
- .bitmsk = VC3_SE2_CTRL_REG0_SE2_CLK_SEL
+ .offs = VC3_DIFF1_CTRL_REG,
+ .bitmsk = VC3_DIFF1_CTRL_REG_DIFF1_CLK_SEL
},
.hw.init = &(struct clk_init_data){
- .name = "se2_mux",
+ .name = "diff1_mux",
.ops = &vc3_clk_mux_ops,
.parent_hws = (const struct clk_hw *[]) {
- &clk_div[VC3_DIV5].hw,
- &clk_div[VC3_DIV4].hw
+ &clk_div[VC3_DIV1].hw,
+ &clk_div[VC3_DIV3].hw
},
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT
}
},
- [VC3_SE1_MUX] = {
+ [VC3_DIFF2_MUX] = {
.data = &(struct vc3_clk_data) {
- .offs = VC3_SE1_DIV4_CTRL,
- .bitmsk = VC3_SE1_DIV4_CTRL_SE1_CLK_SEL
+ .offs = VC3_DIFF2_CTRL_REG,
+ .bitmsk = VC3_DIFF2_CTRL_REG_DIFF2_CLK_SEL
},
.hw.init = &(struct clk_init_data){
- .name = "se1_mux",
+ .name = "diff2_mux",
.ops = &vc3_clk_mux_ops,
.parent_hws = (const struct clk_hw *[]) {
- &clk_div[VC3_DIV5].hw,
- &clk_div[VC3_DIV4].hw
+ &clk_div[VC3_DIV1].hw,
+ &clk_div[VC3_DIV3].hw
},
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT
name, 0, CLK_SET_RATE_PARENT, 1, 1);
else
clk_out[i] = devm_clk_hw_register_fixed_factor_parent_hw(dev,
- name, &clk_mux[i].hw, CLK_SET_RATE_PARENT, 1, 1);
+ name, &clk_mux[i - 1].hw, CLK_SET_RATE_PARENT, 1, 1);
if (IS_ERR(clk_out[i]))
return PTR_ERR(clk_out[i]);
0x250, 0, 3, UMS512_MUX_FLAG);
static const struct clk_parent_data thm_parents[] = {
- { .fw_name = "ext-32m" },
+ { .fw_name = "ext-32k" },
{ .hw = &clk_250k.hw },
};
static SPRD_MUX_CLK_DATA(thm0_clk, "thm0-clk", thm_parents,
err = tegra_bpmp_clk_transfer(clk->bpmp, &msg);
if (err < 0)
- return err;
+ return 0;
return response.rate;
}
config COMEDI_PARPORT
tristate "Parallel port support"
- depends on HAS_IOPORT
help
Enable support for the standard parallel port.
A cheap and easy way to get a few more digital I/O lines. Steal
config COMEDI_SSV_DNP
tristate "SSV Embedded Systems DIL/Net-PC support"
depends on X86_32 || COMPILE_TEST
- depends on HAS_IOPORT
help
Enable support for SSV Embedded Systems DIL/Net-PC
menuconfig COMEDI_ISA_DRIVERS
bool "Comedi ISA and PC/104 drivers"
- depends on ISA
help
Enable comedi ISA and PC/104 drivers to be built
config COMEDI_PCL711
tristate "Advantech PCL-711/711b and ADlink ACL-8112 ISA card support"
- depends on HAS_IOPORT
- depends on COMEDI_8254
+ select COMEDI_8254
help
Enable support for Advantech PCL-711 and 711b, ADlink ACL-8112
config COMEDI_PCL812
tristate "Advantech PCL-812/813 and ADlink ACL-8112/8113/8113/8216"
- depends on HAS_IOPORT
select COMEDI_ISADMA if ISA_DMA_API
- depends on COMEDI_8254
+ select COMEDI_8254
help
Enable support for Advantech PCL-812/PG, PCL-813/B, ADLink
ACL-8112DG/HG/PG, ACL-8113, ACL-8216, ICP DAS A-821PGH/PGL/PGL-NDA,
config COMEDI_PCL816
tristate "Advantech PCL-814 and PCL-816 ISA card support"
- depends on HAS_IOPORT
select COMEDI_ISADMA if ISA_DMA_API
- depends on COMEDI_8254
+ select COMEDI_8254
help
Enable support for Advantech PCL-814 and PCL-816 ISA cards
config COMEDI_PCL818
tristate "Advantech PCL-718 and PCL-818 ISA card support"
- depends on HAS_IOPORT
select COMEDI_ISADMA if ISA_DMA_API
- depends on COMEDI_8254
+ select COMEDI_8254
help
Enable support for Advantech PCL-818 ISA cards
PCL-818L, PCL-818H, PCL-818HD, PCL-818HG, PCL-818 and PCL-718
config COMEDI_AMPLC_DIO200_ISA
tristate "Amplicon PC212E/PC214E/PC215E/PC218E/PC272E"
- depends on COMEDI_AMPLC_DIO200
+ select COMEDI_AMPLC_DIO200
help
Enable support for Amplicon PC212E, PC214E, PC215E, PC218E and
PC272E ISA DIO boards
config COMEDI_DAS16M1
tristate "MeasurementComputing CIO-DAS16/M1DAS-16 ISA card support"
- depends on HAS_IOPORT
- depends on COMEDI_8254
+ select COMEDI_8254
select COMEDI_8255
help
Enable support for Measurement Computing CIO-DAS16/M1 ISA cards.
config COMEDI_DAS08_ISA
tristate "DAS-08 compatible ISA and PC/104 card support"
- depends on COMEDI_DAS08
+ select COMEDI_DAS08
help
Enable support for Keithley Metrabyte/ComputerBoards DAS08
and compatible ISA and PC/104 cards:
config COMEDI_DAS16
tristate "DAS-16 compatible ISA and PC/104 card support"
- depends on HAS_IOPORT
select COMEDI_ISADMA if ISA_DMA_API
- depends on COMEDI_8254
+ select COMEDI_8254
select COMEDI_8255
help
Enable support for Keithley Metrabyte/ComputerBoards DAS16
config COMEDI_DAS800
tristate "DAS800 and compatible ISA card support"
- depends on HAS_IOPORT
- depends on COMEDI_8254
+ select COMEDI_8254
help
Enable support for Keithley Metrabyte DAS800 and compatible ISA cards
Keithley Metrabyte DAS-800, DAS-801, DAS-802
config COMEDI_DAS1800
tristate "DAS1800 and compatible ISA card support"
- depends on HAS_IOPORT
select COMEDI_ISADMA if ISA_DMA_API
- depends on COMEDI_8254
+ select COMEDI_8254
help
Enable support for DAS1800 and compatible ISA cards
Keithley Metrabyte DAS-1701ST, DAS-1701ST-DA, DAS-1701/AO,
config COMEDI_DAS6402
tristate "DAS6402 and compatible ISA card support"
- depends on HAS_IOPORT
- depends on COMEDI_8254
+ select COMEDI_8254
help
Enable support for DAS6402 and compatible ISA cards
Computerboards, Keithley Metrabyte DAS6402 and compatibles
config COMEDI_AIO_AIO12_8
tristate "I/O Products PC/104 AIO12-8 Analog I/O Board support"
- depends on HAS_IOPORT
- depends on COMEDI_8254
+ select COMEDI_8254
select COMEDI_8255
help
Enable support for I/O Products PC/104 AIO12-8 Analog I/O Board
config COMEDI_NI_AT_A2150
tristate "NI AT-A2150 ISA card support"
- depends on HAS_IOPORT
select COMEDI_ISADMA if ISA_DMA_API
- depends on COMEDI_8254
+ select COMEDI_8254
help
Enable support for National Instruments AT-A2150 cards
config COMEDI_NI_AT_AO
tristate "NI AT-AO-6/10 EISA card support"
- depends on HAS_IOPORT
- depends on COMEDI_8254
+ select COMEDI_8254
help
Enable support for National Instruments AT-AO-6/10 cards
config COMEDI_NI_LABPC_ISA
tristate "NI Lab-PC and compatibles ISA support"
- depends on COMEDI_NI_LABPC
+ select COMEDI_NI_LABPC
help
Enable support for National Instruments Lab-PC and compatibles
Lab-PC-1200, Lab-PC-1200AI, Lab-PC+.
menuconfig COMEDI_PCI_DRIVERS
tristate "Comedi PCI drivers"
- depends on PCI && HAS_IOPORT
+ depends on PCI
help
Enable support for comedi PCI drivers.
config COMEDI_ADL_PCI9111
tristate "ADLink PCI-9111HR support"
- depends on HAS_IOPORT
- depends on COMEDI_8254
+ select COMEDI_8254
help
Enable support for ADlink PCI9111 cards
config COMEDI_ADL_PCI9118
tristate "ADLink PCI-9118DG, PCI-9118HG, PCI-9118HR support"
depends on HAS_DMA
- depends on COMEDI_8254
+ select COMEDI_8254
help
Enable support for ADlink PCI-9118DG, PCI-9118HG, PCI-9118HR cards
config COMEDI_ADV_PCI1710
tristate "Advantech PCI-171x and PCI-1731 support"
- depends on HAS_IOPORT
- depends on COMEDI_8254
+ select COMEDI_8254
help
Enable support for Advantech PCI-1710, PCI-1710HG, PCI-1711,
PCI-1713 and PCI-1731
config COMEDI_ADV_PCI_DIO
tristate "Advantech PCI DIO card support"
- depends on HAS_IOPORT
- depends on COMEDI_8254
+ select COMEDI_8254
select COMEDI_8255
help
Enable support for Advantech PCI DIO cards
config COMEDI_AMPLC_DIO200_PCI
tristate "Amplicon PCI215/PCI272/PCIe215/PCIe236/PCIe296 DIO support"
- depends on COMEDI_AMPLC_DIO200
+ select COMEDI_AMPLC_DIO200
help
Enable support for Amplicon PCI215, PCI272, PCIe215, PCIe236
and PCIe296 DIO boards.
config COMEDI_AMPLC_PCI224
tristate "Amplicon PCI224 and PCI234 support"
- depends on HAS_IOPORT
- depends on COMEDI_8254
+ select COMEDI_8254
help
Enable support for Amplicon PCI224 and PCI234 AO boards
config COMEDI_AMPLC_PCI230
tristate "Amplicon PCI230 and PCI260 support"
- depends on HAS_IOPORT
- depends on COMEDI_8254
+ select COMEDI_8254
select COMEDI_8255
help
Enable support for Amplicon PCI230 and PCI260 Multifunction I/O
config COMEDI_DAS08_PCI
tristate "DAS-08 PCI support"
- depends on COMEDI_DAS08
+ select COMEDI_DAS08
help
Enable support for PCI DAS-08 cards.
config COMEDI_CB_PCIDAS
tristate "MeasurementComputing PCI-DAS support"
- depends on HAS_IOPORT
- depends on COMEDI_8254
+ select COMEDI_8254
select COMEDI_8255
help
Enable support for ComputerBoards/MeasurementComputing PCI-DAS with
config COMEDI_CB_PCIMDAS
tristate "MeasurementComputing PCIM-DAS1602/16, PCIe-DAS1602/16 support"
- depends on HAS_IOPORT
- depends on COMEDI_8254
+ select COMEDI_8254
select COMEDI_8255
help
Enable support for ComputerBoards/MeasurementComputing PCI Migration
config COMEDI_ME4000
tristate "Meilhaus ME-4000 support"
- depends on HAS_IOPORT
- depends on COMEDI_8254
+ select COMEDI_8254
help
Enable support for Meilhaus PCI data acquisition cards
ME-4650, ME-4670i, ME-4680, ME-4680i and ME-4680is
config COMEDI_NI_LABPC_PCI
tristate "NI Lab-PC PCI-1200 support"
- depends on COMEDI_NI_LABPC
+ select COMEDI_NI_LABPC
help
Enable support for National Instruments Lab-PC PCI-1200.
config COMEDI_NI_PCIMIO
tristate "NI PCI-MIO-E series and M series support"
depends on HAS_DMA
- depends on HAS_IOPORT
select COMEDI_NI_TIOCMD
select COMEDI_8255
help
config COMEDI_RTD520
tristate "Real Time Devices PCI4520/DM7520 support"
- depends on HAS_IOPORT
- depends on COMEDI_8254
+ select COMEDI_8254
help
Enable support for Real Time Devices PCI4520/DM7520
config COMEDI_CB_DAS16_CS
tristate "CB DAS16 series PCMCIA support"
- depends on HAS_IOPORT
- depends on COMEDI_8254
+ select COMEDI_8254
help
Enable support for the ComputerBoards/MeasurementComputing PCMCIA
cards DAS16/16, PCM-DAS16D/12 and PCM-DAS16s/16
config COMEDI_DAS08_CS
tristate "CB DAS08 PCMCIA support"
- depends on COMEDI_DAS08
+ select COMEDI_DAS08
help
Enable support for the ComputerBoards/MeasurementComputing DAS-08
PCMCIA card
config COMEDI_NI_DAQ_700_CS
tristate "NI DAQCard-700 PCMCIA support"
- depends on HAS_IOPORT
help
Enable support for the National Instruments PCMCIA DAQCard-700 DIO
config COMEDI_NI_DAQ_DIO24_CS
tristate "NI DAQ-Card DIO-24 PCMCIA support"
- depends on HAS_IOPORT
select COMEDI_8255
help
Enable support for the National Instruments PCMCIA DAQ-Card DIO-24
config COMEDI_NI_LABPC_CS
tristate "NI DAQCard-1200 PCMCIA support"
- depends on COMEDI_NI_LABPC
+ select COMEDI_NI_LABPC
help
Enable support for the National Instruments PCMCIA DAQCard-1200
config COMEDI_NI_MIO_CS
tristate "NI DAQCard E series PCMCIA support"
- depends on HAS_IOPORT
select COMEDI_NI_TIO
select COMEDI_8255
help
config COMEDI_QUATECH_DAQP_CS
tristate "Quatech DAQP PCMCIA data capture card support"
- depends on HAS_IOPORT
help
Enable support for the Quatech DAQP PCMCIA data capture cards
DAQP-208 and DAQP-308
config COMEDI_8254
tristate
- depends on HAS_IOPORT
config COMEDI_8255
tristate
config COMEDI_8255_SA
tristate "Standalone 8255 support"
- depends on HAS_IOPORT
select COMEDI_8255
help
Enable support for 8255 digital I/O as a standalone driver.
called kcomedilib.
config COMEDI_AMPLC_DIO200
- depends on COMEDI_8254
+ select COMEDI_8254
tristate
config COMEDI_AMPLC_PC236
config COMEDI_DAS08
tristate
- depends on COMEDI_8254
+ select COMEDI_8254
select COMEDI_8255
config COMEDI_ISADMA
config COMEDI_NI_LABPC
tristate
- depends on HAS_IOPORT
- depends on COMEDI_8254
+ select COMEDI_8254
select COMEDI_8255
config COMEDI_NI_LABPC_ISADMA
struct cxl_cxims_data {
int nr_maps;
- u64 xormaps[];
+ u64 xormaps[] __counted_by(nr_maps);
};
/*
GFP_KERNEL);
if (!cximsd)
return -ENOMEM;
+ cximsd->nr_maps = nr_maps;
memcpy(cximsd->xormaps, cxims->xormap_list,
nr_maps * sizeof(*cximsd->xormaps));
- cximsd->nr_maps = nr_maps;
cxlrd->platform_data = cximsd;
return 0;
for (i = 0; i < cel_entries; i++) {
u16 opcode = le16_to_cpu(cel_entry[i].opcode);
struct cxl_mem_command *cmd = cxl_mem_find_command(opcode);
+ int enabled = 0;
- if (!cmd && (!cxl_is_poison_command(opcode) ||
- !cxl_is_security_command(opcode))) {
- dev_dbg(dev,
- "Opcode 0x%04x unsupported by driver\n", opcode);
- continue;
- }
-
- if (cmd)
+ if (cmd) {
set_bit(cmd->info.id, mds->enabled_cmds);
+ enabled++;
+ }
- if (cxl_is_poison_command(opcode))
+ if (cxl_is_poison_command(opcode)) {
cxl_set_poison_cmd_enabled(&mds->poison, opcode);
+ enabled++;
+ }
- if (cxl_is_security_command(opcode))
+ if (cxl_is_security_command(opcode)) {
cxl_set_security_cmd_enabled(&mds->security, opcode);
+ enabled++;
+ }
- dev_dbg(dev, "Opcode 0x%04x enabled\n", opcode);
+ dev_dbg(dev, "Opcode 0x%04x %s\n", opcode,
+ enabled ? "enabled" : "unsupported by driver");
}
}
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright(c) 2020 Intel Corporation. All rights reserved. */
+#include <linux/platform_device.h>
#include <linux/memregion.h>
#include <linux/workqueue.h>
#include <linux/debugfs.h>
return cxl_setup_regs(map);
}
-static inline int cxl_port_setup_regs(struct cxl_port *port,
- resource_size_t component_reg_phys)
+static int cxl_port_setup_regs(struct cxl_port *port,
+ resource_size_t component_reg_phys)
{
+ if (dev_is_platform(port->uport_dev))
+ return 0;
return cxl_setup_comp_regs(&port->dev, &port->comp_map,
component_reg_phys);
}
-static inline int cxl_dport_setup_regs(struct cxl_dport *dport,
- resource_size_t component_reg_phys)
+static int cxl_dport_setup_regs(struct cxl_dport *dport,
+ resource_size_t component_reg_phys)
{
+ if (dev_is_platform(dport->dport_dev))
+ return 0;
return cxl_setup_comp_regs(dport->dport_dev, &dport->comp_map,
component_reg_phys);
}
return 0;
}
+static int match_auto_decoder(struct device *dev, void *data)
+{
+ struct cxl_region_params *p = data;
+ struct cxl_decoder *cxld;
+ struct range *r;
+
+ if (!is_switch_decoder(dev))
+ return 0;
+
+ cxld = to_cxl_decoder(dev);
+ r = &cxld->hpa_range;
+
+ if (p->res && p->res->start == r->start && p->res->end == r->end)
+ return 1;
+
+ return 0;
+}
+
static struct cxl_decoder *cxl_region_find_decoder(struct cxl_port *port,
struct cxl_region *cxlr)
{
struct device *dev;
int id = 0;
- dev = device_find_child(&port->dev, &id, match_free_decoder);
+ if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags))
+ dev = device_find_child(&port->dev, &cxlr->params,
+ match_auto_decoder);
+ else
+ dev = device_find_child(&port->dev, &id, match_free_decoder);
if (!dev)
return NULL;
/*
}
/*
- * If @parent_port is masking address bits, pick the next unused address
- * bit to route @port's targets.
+ * Interleave granularity is a multiple of @parent_port granularity.
+ * Multiplier is the parent port interleave ways.
*/
- if (parent_iw > 1 && cxl_rr->nr_targets > 1) {
- u32 address_bit = max(peig + peiw, eiw + peig);
-
- eig = address_bit - eiw + 1;
- } else {
- eiw = peiw;
- eig = peig;
+ rc = granularity_to_eig(parent_ig * parent_iw, &eig);
+ if (rc) {
+ dev_dbg(&cxlr->dev,
+ "%s: invalid granularity calculation (%d * %d)\n",
+ dev_name(&parent_port->dev), parent_ig, parent_iw);
+ return rc;
}
rc = eig_to_granularity(eig, &ig);
static int cxl_pci_ras_unmask(struct pci_dev *pdev)
{
- struct pci_host_bridge *host_bridge = pci_find_host_bridge(pdev->bus);
struct cxl_dev_state *cxlds = pci_get_drvdata(pdev);
void __iomem *addr;
u32 orig_val, val, mask;
return 0;
}
- /* BIOS has CXL error control */
- if (!host_bridge->native_cxl_error)
- return -ENXIO;
+ /* BIOS has PCIe AER error control */
+ if (!pcie_aer_is_native(pdev))
+ return 0;
rc = pcie_capability_read_word(pdev, PCI_EXP_DEVCTL, &cap);
if (rc)
* without actually having a link.
*/
create:
- device = kzalloc(sizeof(*device), GFP_KERNEL);
+ device = kzalloc(sizeof(*device), GFP_ATOMIC);
if (device == NULL)
break;
{
struct fw_node *node;
- node = kzalloc(struct_size(node, ports, port_count), GFP_KERNEL);
+ node = kzalloc(struct_size(node, ports, port_count), GFP_ATOMIC);
if (node == NULL)
return NULL;
*
* - power condition
* Set the power condition field in the START STOP UNIT commands sent by
- * sd_mod on suspend, resume, and shutdown (if manage_start_stop is on).
+ * sd_mod on suspend, resume, and shutdown (if manage_system_start_stop or
+ * manage_runtime_start_stop is on).
* Some disks need this to spin down or to resume properly.
*
* - override internal blacklist
sdev->use_10_for_rw = 1;
- if (sbp2_param_exclusive_login)
- sdev->manage_start_stop = 1;
+ if (sbp2_param_exclusive_login) {
+ sdev->manage_system_start_stop = true;
+ sdev->manage_runtime_start_stop = true;
+ }
if (sdev->type == TYPE_ROM)
sdev->use_10_for_ms = 1;
return num_pages;
}
+static u8 ffa_memory_attributes_get(u32 func_id)
+{
+ /*
+ * For the memory lend or donate operation, if the receiver is a PE or
+ * a proxy endpoint, the owner/sender must not specify the attributes
+ */
+ if (func_id == FFA_FN_NATIVE(MEM_LEND) ||
+ func_id == FFA_MEM_LEND)
+ return 0;
+
+ return FFA_MEM_NORMAL | FFA_MEM_WRITE_BACK | FFA_MEM_INNER_SHAREABLE;
+}
+
static int
ffa_setup_and_transmit(u32 func_id, void *buffer, u32 max_fragsize,
struct ffa_mem_ops_args *args)
mem_region->tag = args->tag;
mem_region->flags = args->flags;
mem_region->sender_id = drv_info->vm_id;
- mem_region->attributes = FFA_MEM_NORMAL | FFA_MEM_WRITE_BACK |
- FFA_MEM_INNER_SHAREABLE;
+ mem_region->attributes = ffa_memory_attributes_get(func_id);
ep_mem_access = &mem_region->ep_mem_access[0];
for (idx = 0; idx < args->nattrs; idx++, ep_mem_access++) {
if (!pinfo)
return -ENOMEM;
+ pinfo->version = version;
+
ret = scmi_perf_attributes_get(ph, pinfo);
if (ret)
return ret;
if (ret)
return ret;
- pinfo->version = version;
-
return ph->set_priv(ph, pinfo);
}
return PTR_ERR(adsp2_alg);
for (i = 0; i < n_algs; i++) {
- cs_dsp_info(dsp,
- "%d: ID %x v%d.%d.%d XM@%x YM@%x ZM@%x\n",
- i, be32_to_cpu(adsp2_alg[i].alg.id),
- (be32_to_cpu(adsp2_alg[i].alg.ver) & 0xff0000) >> 16,
- (be32_to_cpu(adsp2_alg[i].alg.ver) & 0xff00) >> 8,
- be32_to_cpu(adsp2_alg[i].alg.ver) & 0xff,
- be32_to_cpu(adsp2_alg[i].xm),
- be32_to_cpu(adsp2_alg[i].ym),
- be32_to_cpu(adsp2_alg[i].zm));
+ cs_dsp_dbg(dsp,
+ "%d: ID %x v%d.%d.%d XM@%x YM@%x ZM@%x\n",
+ i, be32_to_cpu(adsp2_alg[i].alg.id),
+ (be32_to_cpu(adsp2_alg[i].alg.ver) & 0xff0000) >> 16,
+ (be32_to_cpu(adsp2_alg[i].alg.ver) & 0xff00) >> 8,
+ be32_to_cpu(adsp2_alg[i].alg.ver) & 0xff,
+ be32_to_cpu(adsp2_alg[i].xm),
+ be32_to_cpu(adsp2_alg[i].ym),
+ be32_to_cpu(adsp2_alg[i].zm));
alg_region = cs_dsp_create_region(dsp, WMFW_ADSP2_XM,
adsp2_alg[i].alg.id,
return PTR_ERR(halo_alg);
for (i = 0; i < n_algs; i++) {
- cs_dsp_info(dsp,
- "%d: ID %x v%d.%d.%d XM@%x YM@%x\n",
- i, be32_to_cpu(halo_alg[i].alg.id),
- (be32_to_cpu(halo_alg[i].alg.ver) & 0xff0000) >> 16,
- (be32_to_cpu(halo_alg[i].alg.ver) & 0xff00) >> 8,
- be32_to_cpu(halo_alg[i].alg.ver) & 0xff,
- be32_to_cpu(halo_alg[i].xm_base),
- be32_to_cpu(halo_alg[i].ym_base));
+ cs_dsp_dbg(dsp,
+ "%d: ID %x v%d.%d.%d XM@%x YM@%x\n",
+ i, be32_to_cpu(halo_alg[i].alg.id),
+ (be32_to_cpu(halo_alg[i].alg.ver) & 0xff0000) >> 16,
+ (be32_to_cpu(halo_alg[i].alg.ver) & 0xff00) >> 8,
+ be32_to_cpu(halo_alg[i].alg.ver) & 0xff,
+ be32_to_cpu(halo_alg[i].xm_base),
+ be32_to_cpu(halo_alg[i].ym_base));
ret = cs_dsp_halo_create_regions(dsp, halo_alg[i].alg.id,
halo_alg[i].alg.ver,
return 0;
}
+/**
+ * reserve_unaccepted - Map and reserve unaccepted configuration table
+ * @unaccepted: Pointer to unaccepted memory table
+ *
+ * memblock_add() makes sure that the table is mapped in direct mapping. During
+ * normal boot it happens automatically because the table is allocated from
+ * usable memory. But during crashkernel boot only memory specifically reserved
+ * for crash scenario is mapped. memblock_add() forces the table to be mapped
+ * in crashkernel case.
+ *
+ * Align the range to the nearest page borders. Ranges smaller than page size
+ * are not going to be mapped.
+ *
+ * memblock_reserve() makes sure that future allocations will not touch the
+ * table.
+ */
+
+static __init void reserve_unaccepted(struct efi_unaccepted_memory *unaccepted)
+{
+ phys_addr_t start, size;
+
+ start = PAGE_ALIGN_DOWN(efi.unaccepted);
+ size = PAGE_ALIGN(sizeof(*unaccepted) + unaccepted->size);
+
+ memblock_add(start, size);
+ memblock_reserve(start, size);
+}
+
int __init efi_config_parse_tables(const efi_config_table_t *config_tables,
int count,
const efi_config_table_type_t *arch_tables)
unaccepted = early_memremap(efi.unaccepted, sizeof(*unaccepted));
if (unaccepted) {
- unsigned long size;
if (unaccepted->version == 1) {
- size = sizeof(*unaccepted) + unaccepted->size;
- memblock_reserve(efi.unaccepted, size);
+ reserve_unaccepted(unaccepted);
} else {
efi.unaccepted = EFI_INVALID_TABLE_ADDR;
}
bitmap_size = DIV_ROUND_UP(unaccepted_end - unaccepted_start,
EFI_UNACCEPTED_UNIT_SIZE * BITS_PER_BYTE);
- status = efi_bs_call(allocate_pool, EFI_LOADER_DATA,
+ status = efi_bs_call(allocate_pool, EFI_ACPI_RECLAIM_MEMORY,
sizeof(*unaccepted_table) + bitmap_size,
(void **)&unaccepted_table);
if (status != EFI_SUCCESS) {
dsp_chan->idx = i % 2;
dsp_chan->ch = mbox_request_channel_byname(cl, chan_name);
if (IS_ERR(dsp_chan->ch)) {
+ kfree(dsp_chan->name);
ret = PTR_ERR(dsp_chan->ch);
if (ret != -EPROBE_DEFER)
dev_err(dev, "Failed to request mbox chan %s ret %d\n",
pmic_eic->chip.set_config = sprd_pmic_eic_set_config;
pmic_eic->chip.set = sprd_pmic_eic_set;
pmic_eic->chip.get = sprd_pmic_eic_get;
+ pmic_eic->chip.can_sleep = true;
irq = &pmic_eic->chip.irq;
gpio_irq_chip_set_chip(irq, &pmic_eic_irq_chip);
#include <linux/irq.h>
#include <linux/irq_sim.h>
#include <linux/list.h>
+#include <linux/minmax.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/mutex.h>
return sprintf(page, "%c\n", live ? '1' : '0');
}
-static char **gpio_sim_make_line_names(struct gpio_sim_bank *bank,
- unsigned int *line_names_size)
+static unsigned int gpio_sim_get_line_names_size(struct gpio_sim_bank *bank)
{
- unsigned int max_offset = 0;
- bool has_line_names = false;
struct gpio_sim_line *line;
- char **line_names;
+ unsigned int size = 0;
list_for_each_entry(line, &bank->line_list, siblings) {
- if (line->offset >= bank->num_lines)
+ if (!line->name || (line->offset >= bank->num_lines))
continue;
- if (line->name) {
- if (line->offset > max_offset)
- max_offset = line->offset;
-
- /*
- * max_offset can stay at 0 so it's not an indicator
- * of whether line names were configured at all.
- */
- has_line_names = true;
- }
+ size = max(size, line->offset + 1);
}
- if (!has_line_names)
- /*
- * This is not an error - NULL means, there are no line
- * names configured.
- */
- return NULL;
-
- *line_names_size = max_offset + 1;
+ return size;
+}
- line_names = kcalloc(*line_names_size, sizeof(*line_names), GFP_KERNEL);
- if (!line_names)
- return ERR_PTR(-ENOMEM);
+static void
+gpio_sim_set_line_names(struct gpio_sim_bank *bank, char **line_names)
+{
+ struct gpio_sim_line *line;
list_for_each_entry(line, &bank->line_list, siblings) {
- if (line->offset >= bank->num_lines)
+ if (!line->name || (line->offset >= bank->num_lines))
continue;
- if (line->name && (line->offset <= max_offset))
- line_names[line->offset] = line->name;
+ line_names[line->offset] = line->name;
}
-
- return line_names;
}
static void gpio_sim_remove_hogs(struct gpio_sim_device *dev)
struct fwnode_handle *parent)
{
struct property_entry properties[GPIO_SIM_PROP_MAX];
- unsigned int prop_idx = 0, line_names_size = 0;
+ unsigned int prop_idx = 0, line_names_size;
char **line_names __free(kfree) = NULL;
memset(properties, 0, sizeof(properties));
properties[prop_idx++] = PROPERTY_ENTRY_STRING("gpio-sim,label",
bank->label);
- line_names = gpio_sim_make_line_names(bank, &line_names_size);
- if (IS_ERR(line_names))
- return ERR_CAST(line_names);
+ line_names_size = gpio_sim_get_line_names_size(bank);
+ if (line_names_size) {
+ line_names = kcalloc(line_names_size, sizeof(*line_names),
+ GFP_KERNEL);
+ if (!line_names)
+ return ERR_PTR(-ENOMEM);
+
+ gpio_sim_set_line_names(bank, line_names);
- if (line_names)
properties[prop_idx++] = PROPERTY_ENTRY_STRING_ARRAY_LEN(
"gpio-line-names",
line_names, line_names_size);
+ }
return fwnode_create_software_node(properties, parent);
}
handle_edge_irq, IRQ_NOREQUEST, IRQ_NOPROBE,
IRQ_GC_INIT_MASK_CACHE);
if (ret)
- return ret;
+ goto err_remove_domain;
gc = tb10x_gpio->domain->gc->gc[0];
gc->reg_base = tb10x_gpio->base;
}
return 0;
+
+err_remove_domain:
+ irq_domain_remove(tb10x_gpio->domain);
+ return ret;
}
static int tb10x_gpio_remove(struct platform_device *pdev)
unsigned offset, bool enabled)
{
struct timbgpio *tgpio = gpiochip_get_data(gpio);
+ unsigned long flags;
u32 reg;
- spin_lock(&tgpio->lock);
+ spin_lock_irqsave(&tgpio->lock, flags);
reg = ioread32(tgpio->membase + offset);
if (enabled)
reg &= ~(1 << index);
iowrite32(reg, tgpio->membase + offset);
- spin_unlock(&tgpio->lock);
+ spin_unlock_irqrestore(&tgpio->lock, flags);
return 0;
}
bool "Enable legacy fbdev support for your modesetting driver"
depends on DRM
select FRAMEBUFFER_CONSOLE_DETECT_PRIMARY if FRAMEBUFFER_CONSOLE
- default y
+ default FB
help
Choose this option if you have a need for the legacy fbdev
support. Note that this support also provides the linux console
void amdgpu_device_pci_config_reset(struct amdgpu_device *adev);
int amdgpu_device_pci_reset(struct amdgpu_device *adev);
bool amdgpu_device_need_post(struct amdgpu_device *adev);
-bool amdgpu_sg_display_supported(struct amdgpu_device *adev);
bool amdgpu_device_pcie_dynamic_switching_supported(void);
bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev);
bool amdgpu_device_aspm_support_quirk(void);
cu_info->cu_active_number = acu_info.number;
cu_info->cu_ao_mask = acu_info.ao_cu_mask;
memcpy(&cu_info->cu_bitmap[0], &acu_info.bitmap[0],
- sizeof(acu_info.bitmap));
+ sizeof(cu_info->cu_bitmap));
cu_info->num_shader_engines = adev->gfx.config.max_shader_engines;
cu_info->num_shader_arrays_per_engine = adev->gfx.config.max_sh_per_se;
cu_info->num_cu_per_sh = adev->gfx.config.max_cu_per_sh;
uint32_t wait_times,
uint32_t grace_period,
uint32_t *reg_offset,
- uint32_t *reg_data,
- uint32_t inst)
+ uint32_t *reg_data)
{
*reg_data = wait_times;
uint32_t wait_times,
uint32_t grace_period,
uint32_t *reg_offset,
- uint32_t *reg_data,
- uint32_t inst);
+ uint32_t *reg_data);
uint32_t wait_times,
uint32_t grace_period,
uint32_t *reg_offset,
- uint32_t *reg_data,
- uint32_t inst)
+ uint32_t *reg_data)
{
*reg_data = wait_times;
SCH_WAVE,
grace_period);
- *reg_offset = SOC15_REG_OFFSET(GC, GET_INST(GC, inst),
- mmCP_IQ_WAIT_TIME2);
+ *reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_IQ_WAIT_TIME2);
}
void kgd_gfx_v9_program_trap_handler_settings(struct amdgpu_device *adev,
uint32_t wait_times,
uint32_t grace_period,
uint32_t *reg_offset,
- uint32_t *reg_data,
- uint32_t inst);
+ uint32_t *reg_data);
struct amdgpu_device *adev = drm_to_adev(ddev);
struct atom_context *ctx = adev->mode_info.atom_context;
- return sysfs_emit(buf, "%s\n", ctx->vbios_ver_str);
+ return sysfs_emit(buf, "%s\n", ctx->vbios_pn);
}
static DEVICE_ATTR(vbios_version, 0444, amdgpu_atombios_get_vbios_version,
}
/*
- * On APUs with >= 64GB white flickering has been observed w/ SG enabled.
- * Disable S/G on such systems until we have a proper fix.
- * https://gitlab.freedesktop.org/drm/amd/-/issues/2354
- * https://gitlab.freedesktop.org/drm/amd/-/issues/2735
- */
-bool amdgpu_sg_display_supported(struct amdgpu_device *adev)
-{
- switch (amdgpu_sg_display) {
- case -1:
- break;
- case 0:
- return false;
- case 1:
- return true;
- default:
- return false;
- }
- if ((totalram_pages() << (PAGE_SHIFT - 10)) +
- (adev->gmc.real_vram_size / 1024) >= 64000000) {
- DRM_WARN("Disabling S/G due to >=64GB RAM\n");
- return false;
- }
- return true;
-}
-
-/*
* Intel hosts such as Raptor Lake and Sapphire Rapids don't support dynamic
* speed switching. Until we have confirmation from Intel that a specific host
* supports it, it's safer that we keep it disabled for all.
#define AMDGPU_GFX_LBPW_DISABLED_MODE 0x00000008L
#define AMDGPU_MAX_GC_INSTANCES 8
+#define KGD_MAX_QUEUES 128
#define AMDGPU_MAX_GFX_QUEUES KGD_MAX_QUEUES
#define AMDGPU_MAX_COMPUTE_QUEUES KGD_MAX_QUEUES
uint32_t number;
uint32_t ao_cu_mask;
uint32_t ao_cu_bitmap[4][4];
- uint32_t bitmap[4][4];
+ uint32_t bitmap[AMDGPU_MAX_GC_INSTANCES][4][4];
};
struct amdgpu_gfx_ras {
memcpy(&dev_info->cu_ao_bitmap[0], &adev->gfx.cu_info.ao_cu_bitmap[0],
sizeof(adev->gfx.cu_info.ao_cu_bitmap));
memcpy(&dev_info->cu_bitmap[0], &adev->gfx.cu_info.bitmap[0],
- sizeof(adev->gfx.cu_info.bitmap));
+ sizeof(dev_info->cu_bitmap));
dev_info->vram_type = adev->gmc.vram_type;
dev_info->vram_bit_width = adev->gmc.vram_width;
dev_info->vce_harvest_config = adev->vce.harvest_config;
struct atom_context *atom_context;
atom_context = adev->mode_info.atom_context;
- memcpy(vbios_info.name, atom_context->name, sizeof(atom_context->name));
- memcpy(vbios_info.vbios_pn, atom_context->vbios_pn, sizeof(atom_context->vbios_pn));
- vbios_info.version = atom_context->version;
- memcpy(vbios_info.vbios_ver_str, atom_context->vbios_ver_str,
- sizeof(atom_context->vbios_ver_str));
- memcpy(vbios_info.date, atom_context->date, sizeof(atom_context->date));
+ if (atom_context) {
+ memcpy(vbios_info.name, atom_context->name,
+ sizeof(atom_context->name));
+ memcpy(vbios_info.vbios_pn, atom_context->vbios_pn,
+ sizeof(atom_context->vbios_pn));
+ vbios_info.version = atom_context->version;
+ memcpy(vbios_info.vbios_ver_str, atom_context->vbios_ver_str,
+ sizeof(atom_context->vbios_ver_str));
+ memcpy(vbios_info.date, atom_context->date,
+ sizeof(atom_context->date));
+ }
return copy_to_user(out, &vbios_info,
min((size_t)size, sizeof(vbios_info))) ? -EFAULT : 0;
enable ? "enable":"disable",
get_ras_block_str(head),
amdgpu_ras_is_poison_mode_supported(adev), ret);
+ kfree(info);
return ret;
}
info->ce_count = obj->err_data.ce_count;
if (err_data.ce_count) {
- if (adev->smuio.funcs &&
+ if (!adev->aid_mask &&
+ adev->smuio.funcs &&
adev->smuio.funcs->get_socket_id &&
adev->smuio.funcs->get_die_id) {
dev_info(adev->dev, "socket: %d, die: %d "
}
}
if (err_data.ue_count) {
- if (adev->smuio.funcs &&
+ if (!adev->aid_mask &&
+ adev->smuio.funcs &&
adev->smuio.funcs->get_socket_id &&
adev->smuio.funcs->get_die_id) {
dev_info(adev->dev, "socket: %d, die: %d "
unsigned int size)
{
struct drm_suballoc *sa = drm_suballoc_new(&sa_manager->base, size,
- GFP_KERNEL, true, 0);
+ GFP_KERNEL, false, 0);
if (IS_ERR(sa)) {
*sa_bo = NULL;
gfx_v10_0_set_user_wgp_inactive_bitmap_per_sh(
adev, disable_masks[i * 2 + j]);
bitmap = gfx_v10_0_get_cu_active_bitmap_per_sh(adev);
- cu_info->bitmap[i][j] = bitmap;
+ cu_info->bitmap[0][i][j] = bitmap;
for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) {
if (bitmap & mask) {
* SE6: {SH0,SH1} --> {bitmap[2][2], bitmap[2][3]}
* SE7: {SH0,SH1} --> {bitmap[3][2], bitmap[3][3]}
*/
- cu_info->bitmap[i % 4][j + (i / 4) * 2] = bitmap;
+ cu_info->bitmap[0][i % 4][j + (i / 4) * 2] = bitmap;
for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) {
if (bitmap & mask)
gfx_v6_0_set_user_cu_inactive_bitmap(
adev, disable_masks[i * 2 + j]);
bitmap = gfx_v6_0_get_cu_enabled(adev);
- cu_info->bitmap[i][j] = bitmap;
+ cu_info->bitmap[0][i][j] = bitmap;
for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) {
if (bitmap & mask) {
gfx_v7_0_set_user_cu_inactive_bitmap(
adev, disable_masks[i * 2 + j]);
bitmap = gfx_v7_0_get_cu_active_bitmap(adev);
- cu_info->bitmap[i][j] = bitmap;
+ cu_info->bitmap[0][i][j] = bitmap;
for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) {
if (bitmap & mask) {
gfx_v8_0_set_user_cu_inactive_bitmap(
adev, disable_masks[i * 2 + j]);
bitmap = gfx_v8_0_get_cu_active_bitmap(adev);
- cu_info->bitmap[i][j] = bitmap;
+ cu_info->bitmap[0][i][j] = bitmap;
for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
if (bitmap & mask) {
amdgpu_gfx_select_se_sh(adev, i, j, 0xffffffff, 0);
for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
- if (cu_info->bitmap[i][j] & mask) {
+ if (cu_info->bitmap[0][i][j] & mask) {
if (counter == pg_always_on_cu_num)
WREG32_SOC15(GC, 0, mmRLC_PG_ALWAYS_ON_CU_MASK, cu_bitmap);
if (counter < always_on_cu_num)
* SE6,SH0 --> bitmap[2][1]
* SE7,SH0 --> bitmap[3][1]
*/
- cu_info->bitmap[i % 4][j + i / 4] = bitmap;
+ cu_info->bitmap[0][i % 4][j + i / 4] = bitmap;
for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
if (bitmap & mask) {
}
static void gfx_v9_4_3_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
- u32 bitmap)
+ u32 bitmap, int xcc_id)
{
u32 data;
data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
- WREG32_SOC15(GC, GET_INST(GC, 0), regGC_USER_SHADER_ARRAY_CONFIG, data);
+ WREG32_SOC15(GC, GET_INST(GC, xcc_id), regGC_USER_SHADER_ARRAY_CONFIG, data);
}
-static u32 gfx_v9_4_3_get_cu_active_bitmap(struct amdgpu_device *adev)
+static u32 gfx_v9_4_3_get_cu_active_bitmap(struct amdgpu_device *adev, int xcc_id)
{
u32 data, mask;
- data = RREG32_SOC15(GC, GET_INST(GC, 0), regCC_GC_SHADER_ARRAY_CONFIG);
- data |= RREG32_SOC15(GC, GET_INST(GC, 0), regGC_USER_SHADER_ARRAY_CONFIG);
+ data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCC_GC_SHADER_ARRAY_CONFIG);
+ data |= RREG32_SOC15(GC, GET_INST(GC, xcc_id), regGC_USER_SHADER_ARRAY_CONFIG);
data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
static int gfx_v9_4_3_get_cu_info(struct amdgpu_device *adev,
struct amdgpu_cu_info *cu_info)
{
- int i, j, k, counter, active_cu_number = 0;
+ int i, j, k, counter, xcc_id, active_cu_number = 0;
u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
unsigned disable_masks[4 * 4];
adev->gfx.config.max_sh_per_se);
mutex_lock(&adev->grbm_idx_mutex);
- for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
- for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
- mask = 1;
- ao_bitmap = 0;
- counter = 0;
- gfx_v9_4_3_xcc_select_se_sh(adev, i, j, 0xffffffff, 0);
- gfx_v9_4_3_set_user_cu_inactive_bitmap(
- adev, disable_masks[i * adev->gfx.config.max_sh_per_se + j]);
- bitmap = gfx_v9_4_3_get_cu_active_bitmap(adev);
-
- /*
- * The bitmap(and ao_cu_bitmap) in cu_info structure is
- * 4x4 size array, and it's usually suitable for Vega
- * ASICs which has 4*2 SE/SH layout.
- * But for Arcturus, SE/SH layout is changed to 8*1.
- * To mostly reduce the impact, we make it compatible
- * with current bitmap array as below:
- * SE4,SH0 --> bitmap[0][1]
- * SE5,SH0 --> bitmap[1][1]
- * SE6,SH0 --> bitmap[2][1]
- * SE7,SH0 --> bitmap[3][1]
- */
- cu_info->bitmap[i % 4][j + i / 4] = bitmap;
-
- for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) {
- if (bitmap & mask) {
- if (counter < adev->gfx.config.max_cu_per_sh)
- ao_bitmap |= mask;
- counter++;
+ for (xcc_id = 0; xcc_id < NUM_XCC(adev->gfx.xcc_mask); xcc_id++) {
+ for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
+ for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
+ mask = 1;
+ ao_bitmap = 0;
+ counter = 0;
+ gfx_v9_4_3_xcc_select_se_sh(adev, i, j, 0xffffffff, xcc_id);
+ gfx_v9_4_3_set_user_cu_inactive_bitmap(
+ adev,
+ disable_masks[i * adev->gfx.config.max_sh_per_se + j],
+ xcc_id);
+ bitmap = gfx_v9_4_3_get_cu_active_bitmap(adev, xcc_id);
+
+ cu_info->bitmap[xcc_id][i][j] = bitmap;
+
+ for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) {
+ if (bitmap & mask) {
+ if (counter < adev->gfx.config.max_cu_per_sh)
+ ao_bitmap |= mask;
+ counter++;
+ }
+ mask <<= 1;
}
- mask <<= 1;
+ active_cu_number += counter;
+ if (i < 2 && j < 2)
+ ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
+ cu_info->ao_cu_bitmap[i][j] = ao_bitmap;
}
- active_cu_number += counter;
- if (i < 2 && j < 2)
- ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
- cu_info->ao_cu_bitmap[i % 4][j + i / 4] = ao_bitmap;
}
+ gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
+ xcc_id);
}
- gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
- 0);
mutex_unlock(&adev->grbm_idx_mutex);
cu_info->number = active_cu_number;
data &= ~RCC_DEV0_EPF2_STRAP2__STRAP_NO_SOFT_RESET_DEV0_F2_MASK;
WREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF2_STRAP2, data);
}
+ if (amdgpu_sriov_vf(adev))
+ adev->rmmio_remap.reg_offset = SOC15_REG_OFFSET(NBIO, 0,
+ regBIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_CNTL) << 2;
}
static u32 nbio_v4_3_get_rom_offset(struct amdgpu_device *adev)
* for the purpose of expose those registers
* to process space
*/
- if (adev->nbio.funcs->remap_hdp_registers)
+ if (adev->nbio.funcs->remap_hdp_registers && !amdgpu_sriov_vf(adev))
adev->nbio.funcs->remap_hdp_registers(adev);
/* enable the doorbell aperture */
adev->nbio.funcs->enable_doorbell_aperture(adev, true);
amdgpu_amdkfd_get_cu_info(kdev->adev, &cu_info);
cu->num_simd_per_cu = cu_info.simd_per_cu;
- cu->num_simd_cores = cu_info.simd_per_cu * cu_info.cu_active_number;
+ cu->num_simd_cores = cu_info.simd_per_cu *
+ (cu_info.cu_active_number / kdev->kfd->num_nodes);
cu->max_waves_simd = cu_info.max_waves_per_simd;
cu->wave_front_size = cu_info.wave_front_size;
#define CRAT_SUBTYPE_IOLINK_AFFINITY 5
#define CRAT_SUBTYPE_MAX 6
+/*
+ * Do not change the value of CRAT_SIBLINGMAP_SIZE from 32
+ * as it breaks the ABI.
+ */
#define CRAT_SIBLINGMAP_SIZE 32
/*
if (q->wptr_bo) {
wptr_addr_off = (uint64_t)q->properties.write_ptr & (PAGE_SIZE - 1);
- queue_input.wptr_mc_addr = ((uint64_t)q->wptr_bo->tbo.resource->start << PAGE_SHIFT) + wptr_addr_off;
+ queue_input.wptr_mc_addr = amdgpu_bo_gpu_offset(q->wptr_bo) + wptr_addr_off;
}
queue_input.is_kfd_process = 1;
dqm->dev->kfd2kgd->build_grace_period_packet_info(
dqm->dev->adev, dqm->wait_times,
grace_period, ®_offset,
- &dqm->wait_times,
- ffs(dqm->dev->xcc_mask) - 1);
+ &dqm->wait_times);
}
dqm_unlock(dqm);
return NULL;
*doorbell_off = amdgpu_doorbell_index_on_bar(kfd->adev, kfd->doorbells, inx);
+ inx *= 2;
pr_debug("Get kernel queue doorbell\n"
" doorbell offset == 0x%08X\n"
unsigned int inx;
inx = (unsigned int)(db_addr - kfd->doorbell_kernel_ptr);
+ inx /= 2;
mutex_lock(&kfd->doorbell_mutex);
__clear_bit(inx, kfd->doorbell_bitmap);
void mqd_symmetrically_map_cu_mask(struct mqd_manager *mm,
const uint32_t *cu_mask, uint32_t cu_mask_count,
- uint32_t *se_mask)
+ uint32_t *se_mask, uint32_t inst)
{
struct kfd_cu_info cu_info;
uint32_t cu_per_sh[KFD_MAX_NUM_SE][KFD_MAX_NUM_SH_PER_SE] = {0};
bool wgp_mode_req = KFD_GC_VERSION(mm->dev) >= IP_VERSION(10, 0, 0);
uint32_t en_mask = wgp_mode_req ? 0x3 : 0x1;
- int i, se, sh, cu, cu_bitmap_sh_mul, inc = wgp_mode_req ? 2 : 1;
+ int i, se, sh, cu, cu_bitmap_sh_mul, cu_inc = wgp_mode_req ? 2 : 1;
+ uint32_t cu_active_per_node;
+ int inc = cu_inc * NUM_XCC(mm->dev->xcc_mask);
+ int xcc_inst = inst + ffs(mm->dev->xcc_mask) - 1;
amdgpu_amdkfd_get_cu_info(mm->dev->adev, &cu_info);
- if (cu_mask_count > cu_info.cu_active_number)
- cu_mask_count = cu_info.cu_active_number;
+ cu_active_per_node = cu_info.cu_active_number / mm->dev->kfd->num_nodes;
+ if (cu_mask_count > cu_active_per_node)
+ cu_mask_count = cu_active_per_node;
/* Exceeding these bounds corrupts the stack and indicates a coding error.
* Returning with no CU's enabled will hang the queue, which should be
for (se = 0; se < cu_info.num_shader_engines; se++)
for (sh = 0; sh < cu_info.num_shader_arrays_per_engine; sh++)
cu_per_sh[se][sh] = hweight32(
- cu_info.cu_bitmap[se % 4][sh + (se / 4) * cu_bitmap_sh_mul]);
+ cu_info.cu_bitmap[xcc_inst][se % 4][sh + (se / 4) *
+ cu_bitmap_sh_mul]);
/* Symmetrically map cu_mask to all SEs & SHs:
* se_mask programs up to 2 SH in the upper and lower 16 bits.
* cu_mask[0] bit8 -> se_mask[0] bit1 (SE0,SH0,CU1)
* ...
*
+ * For GFX 9.4.3, the following code only looks at a
+ * subset of the cu_mask corresponding to the inst parameter.
+ * If we have n XCCs under one GPU node
+ * cu_mask[0] bit0 -> XCC0 se_mask[0] bit0 (XCC0,SE0,SH0,CU0)
+ * cu_mask[0] bit1 -> XCC1 se_mask[0] bit0 (XCC1,SE0,SH0,CU0)
+ * ..
+ * cu_mask[0] bitn -> XCCn se_mask[0] bit0 (XCCn,SE0,SH0,CU0)
+ * cu_mask[0] bit n+1 -> XCC0 se_mask[1] bit0 (XCC0,SE1,SH0,CU0)
+ *
+ * For example, if there are 6 XCCs under 1 KFD node, this code
+ * running for each inst, will look at the bits as:
+ * inst, inst + 6, inst + 12...
+ *
* First ensure all CUs are disabled, then enable user specified CUs.
*/
for (i = 0; i < cu_info.num_shader_engines; i++)
se_mask[i] = 0;
- i = 0;
- for (cu = 0; cu < 16; cu += inc) {
+ i = inst;
+ for (cu = 0; cu < 16; cu += cu_inc) {
for (sh = 0; sh < cu_info.num_shader_arrays_per_engine; sh++) {
for (se = 0; se < cu_info.num_shader_engines; se++) {
if (cu_per_sh[se][sh] > cu) {
if (cu_mask[i / 32] & (en_mask << (i % 32)))
se_mask[se] |= en_mask << (cu + sh * 16);
i += inc;
- if (i == cu_mask_count)
+ if (i >= cu_mask_count)
return;
}
}
void mqd_symmetrically_map_cu_mask(struct mqd_manager *mm,
const uint32_t *cu_mask, uint32_t cu_mask_count,
- uint32_t *se_mask);
+ uint32_t *se_mask, uint32_t inst);
int kfd_hiq_load_mqd_kiq(struct mqd_manager *mm, void *mqd,
uint32_t pipe_id, uint32_t queue_id,
return;
mqd_symmetrically_map_cu_mask(mm,
- minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask);
+ minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask, 0);
m = get_mqd(mqd);
m->compute_static_thread_mgmt_se0 = se_mask[0];
return;
mqd_symmetrically_map_cu_mask(mm,
- minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask);
+ minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask, 0);
m = get_mqd(mqd);
m->compute_static_thread_mgmt_se0 = se_mask[0];
}
mqd_symmetrically_map_cu_mask(mm,
- minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask);
+ minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask, 0);
m->compute_static_thread_mgmt_se0 = se_mask[0];
m->compute_static_thread_mgmt_se1 = se_mask[1];
return 0;
}
+static void checkpoint_mqd(struct mqd_manager *mm, void *mqd, void *mqd_dst, void *ctl_stack_dst)
+{
+ struct v11_compute_mqd *m;
+
+ m = get_mqd(mqd);
+
+ memcpy(mqd_dst, m, sizeof(struct v11_compute_mqd));
+}
+
+static void restore_mqd(struct mqd_manager *mm, void **mqd,
+ struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
+ struct queue_properties *qp,
+ const void *mqd_src,
+ const void *ctl_stack_src, const u32 ctl_stack_size)
+{
+ uint64_t addr;
+ struct v11_compute_mqd *m;
+
+ m = (struct v11_compute_mqd *) mqd_mem_obj->cpu_ptr;
+ addr = mqd_mem_obj->gpu_addr;
+
+ memcpy(m, mqd_src, sizeof(*m));
+
+ *mqd = m;
+ if (gart_addr)
+ *gart_addr = addr;
+
+ m->cp_hqd_pq_doorbell_control =
+ qp->doorbell_off <<
+ CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT;
+ pr_debug("cp_hqd_pq_doorbell_control 0x%x\n",
+ m->cp_hqd_pq_doorbell_control);
+
+ qp->is_active = 0;
+}
+
+
static void init_mqd_hiq(struct mqd_manager *mm, void **mqd,
struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
struct queue_properties *q)
mqd->mqd_size = sizeof(struct v11_compute_mqd);
mqd->get_wave_state = get_wave_state;
mqd->mqd_stride = kfd_mqd_stride;
+ mqd->checkpoint_mqd = checkpoint_mqd;
+ mqd->restore_mqd = restore_mqd;
#if defined(CONFIG_DEBUG_FS)
mqd->debugfs_show_mqd = debugfs_show_mqd;
#endif
mqd->update_mqd = update_mqd_sdma;
mqd->destroy_mqd = kfd_destroy_mqd_sdma;
mqd->is_occupied = kfd_is_occupied_sdma;
+ mqd->checkpoint_mqd = checkpoint_mqd;
+ mqd->restore_mqd = restore_mqd;
mqd->mqd_size = sizeof(struct v11_sdma_mqd);
mqd->mqd_stride = kfd_mqd_stride;
#if defined(CONFIG_DEBUG_FS)
}
static void update_cu_mask(struct mqd_manager *mm, void *mqd,
- struct mqd_update_info *minfo)
+ struct mqd_update_info *minfo, uint32_t inst)
{
struct v9_mqd *m;
uint32_t se_mask[KFD_MAX_NUM_SE] = {0};
return;
mqd_symmetrically_map_cu_mask(mm,
- minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask);
+ minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask, inst);
m = get_mqd(mqd);
+
m->compute_static_thread_mgmt_se0 = se_mask[0];
m->compute_static_thread_mgmt_se1 = se_mask[1];
m->compute_static_thread_mgmt_se2 = se_mask[2];
m->compute_static_thread_mgmt_se3 = se_mask[3];
- m->compute_static_thread_mgmt_se4 = se_mask[4];
- m->compute_static_thread_mgmt_se5 = se_mask[5];
- m->compute_static_thread_mgmt_se6 = se_mask[6];
- m->compute_static_thread_mgmt_se7 = se_mask[7];
-
- pr_debug("update cu mask to %#x %#x %#x %#x %#x %#x %#x %#x\n",
- m->compute_static_thread_mgmt_se0,
- m->compute_static_thread_mgmt_se1,
- m->compute_static_thread_mgmt_se2,
- m->compute_static_thread_mgmt_se3,
- m->compute_static_thread_mgmt_se4,
- m->compute_static_thread_mgmt_se5,
- m->compute_static_thread_mgmt_se6,
- m->compute_static_thread_mgmt_se7);
+ if (KFD_GC_VERSION(mm->dev) != IP_VERSION(9, 4, 3)) {
+ m->compute_static_thread_mgmt_se4 = se_mask[4];
+ m->compute_static_thread_mgmt_se5 = se_mask[5];
+ m->compute_static_thread_mgmt_se6 = se_mask[6];
+ m->compute_static_thread_mgmt_se7 = se_mask[7];
+
+ pr_debug("update cu mask to %#x %#x %#x %#x %#x %#x %#x %#x\n",
+ m->compute_static_thread_mgmt_se0,
+ m->compute_static_thread_mgmt_se1,
+ m->compute_static_thread_mgmt_se2,
+ m->compute_static_thread_mgmt_se3,
+ m->compute_static_thread_mgmt_se4,
+ m->compute_static_thread_mgmt_se5,
+ m->compute_static_thread_mgmt_se6,
+ m->compute_static_thread_mgmt_se7);
+ } else {
+ pr_debug("inst: %u, update cu mask to %#x %#x %#x %#x\n",
+ inst, m->compute_static_thread_mgmt_se0,
+ m->compute_static_thread_mgmt_se1,
+ m->compute_static_thread_mgmt_se2,
+ m->compute_static_thread_mgmt_se3);
+ }
}
static void set_priority(struct v9_mqd *m, struct queue_properties *q)
if (mm->dev->kfd->cwsr_enabled && q->ctx_save_restore_area_address)
m->cp_hqd_ctx_save_control = 0;
- update_cu_mask(mm, mqd, minfo);
+ if (KFD_GC_VERSION(mm->dev) != IP_VERSION(9, 4, 3))
+ update_cu_mask(mm, mqd, minfo, 0);
set_priority(m, q);
q->is_active = QUEUE_IS_ACTIVE(*q);
m = get_mqd(mqd + size * xcc);
update_mqd(mm, m, q, minfo);
+ update_cu_mask(mm, mqd, minfo, xcc);
+
if (q->format == KFD_QUEUE_FORMAT_AQL) {
switch (xcc) {
case 0:
return;
mqd_symmetrically_map_cu_mask(mm,
- minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask);
+ minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask, 0);
m = get_mqd(mqd);
m->compute_static_thread_mgmt_se0 = se_mask[0];
pm->dqm->wait_times,
grace_period,
®_offset,
- ®_data,
- 0);
+ ®_data);
if (grace_period == USE_DEFAULT_GRACE_PERIOD)
reg_data = pm->dqm->wait_times;
static inline bool kfd_flush_tlb_after_unmap(struct kfd_dev *dev)
{
- return KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 3) ||
- KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2) ||
+ return KFD_GC_VERSION(dev) > IP_VERSION(9, 4, 2) ||
(KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 1) && dev->sdma_fw_version >= 18) ||
KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 0);
}
sysfs_show_32bit_prop(buffer, offs, "cpu_cores_count",
dev->node_props.cpu_cores_count);
sysfs_show_32bit_prop(buffer, offs, "simd_count",
- dev->gpu ? (dev->node_props.simd_count *
- NUM_XCC(dev->gpu->xcc_mask)) : 0);
+ dev->gpu ? dev->node_props.simd_count : 0);
sysfs_show_32bit_prop(buffer, offs, "mem_banks_count",
dev->node_props.mem_banks_count);
sysfs_show_32bit_prop(buffer, offs, "caches_count",
static int fill_in_l2_l3_pcache(struct kfd_cache_properties **props_ext,
struct kfd_gpu_cache_info *pcache_info,
struct kfd_cu_info *cu_info,
- int cache_type, unsigned int cu_processor_id)
+ int cache_type, unsigned int cu_processor_id,
+ struct kfd_node *knode)
{
unsigned int cu_sibling_map_mask;
int first_active_cu;
- int i, j, k;
+ int i, j, k, xcc, start, end;
struct kfd_cache_properties *pcache = NULL;
- cu_sibling_map_mask = cu_info->cu_bitmap[0][0];
+ start = ffs(knode->xcc_mask) - 1;
+ end = start + NUM_XCC(knode->xcc_mask);
+ cu_sibling_map_mask = cu_info->cu_bitmap[start][0][0];
cu_sibling_map_mask &=
((1 << pcache_info[cache_type].num_cu_shared) - 1);
first_active_cu = ffs(cu_sibling_map_mask);
cu_sibling_map_mask = cu_sibling_map_mask >> (first_active_cu - 1);
k = 0;
- for (i = 0; i < cu_info->num_shader_engines; i++) {
- for (j = 0; j < cu_info->num_shader_arrays_per_engine; j++) {
- pcache->sibling_map[k] = (uint8_t)(cu_sibling_map_mask & 0xFF);
- pcache->sibling_map[k+1] = (uint8_t)((cu_sibling_map_mask >> 8) & 0xFF);
- pcache->sibling_map[k+2] = (uint8_t)((cu_sibling_map_mask >> 16) & 0xFF);
- pcache->sibling_map[k+3] = (uint8_t)((cu_sibling_map_mask >> 24) & 0xFF);
- k += 4;
-
- cu_sibling_map_mask = cu_info->cu_bitmap[i % 4][j + i / 4];
- cu_sibling_map_mask &= ((1 << pcache_info[cache_type].num_cu_shared) - 1);
+ for (xcc = start; xcc < end; xcc++) {
+ for (i = 0; i < cu_info->num_shader_engines; i++) {
+ for (j = 0; j < cu_info->num_shader_arrays_per_engine; j++) {
+ pcache->sibling_map[k] = (uint8_t)(cu_sibling_map_mask & 0xFF);
+ pcache->sibling_map[k+1] = (uint8_t)((cu_sibling_map_mask >> 8) & 0xFF);
+ pcache->sibling_map[k+2] = (uint8_t)((cu_sibling_map_mask >> 16) & 0xFF);
+ pcache->sibling_map[k+3] = (uint8_t)((cu_sibling_map_mask >> 24) & 0xFF);
+ k += 4;
+
+ cu_sibling_map_mask = cu_info->cu_bitmap[xcc][i % 4][j + i / 4];
+ cu_sibling_map_mask &= ((1 << pcache_info[cache_type].num_cu_shared) - 1);
+ }
}
}
pcache->sibling_map_size = k;
static void kfd_fill_cache_non_crat_info(struct kfd_topology_device *dev, struct kfd_node *kdev)
{
struct kfd_gpu_cache_info *pcache_info = NULL;
- int i, j, k;
+ int i, j, k, xcc, start, end;
int ct = 0;
unsigned int cu_processor_id;
int ret;
* then it will consider only one CU from
* the shared unit
*/
+ start = ffs(kdev->xcc_mask) - 1;
+ end = start + NUM_XCC(kdev->xcc_mask);
+
for (ct = 0; ct < num_of_cache_types; ct++) {
cu_processor_id = gpu_processor_id;
if (pcache_info[ct].cache_level == 1) {
- for (i = 0; i < pcu_info->num_shader_engines; i++) {
- for (j = 0; j < pcu_info->num_shader_arrays_per_engine; j++) {
- for (k = 0; k < pcu_info->num_cu_per_sh; k += pcache_info[ct].num_cu_shared) {
+ for (xcc = start; xcc < end; xcc++) {
+ for (i = 0; i < pcu_info->num_shader_engines; i++) {
+ for (j = 0; j < pcu_info->num_shader_arrays_per_engine; j++) {
+ for (k = 0; k < pcu_info->num_cu_per_sh; k += pcache_info[ct].num_cu_shared) {
- ret = fill_in_l1_pcache(&props_ext, pcache_info, pcu_info,
- pcu_info->cu_bitmap[i % 4][j + i / 4], ct,
+ ret = fill_in_l1_pcache(&props_ext, pcache_info, pcu_info,
+ pcu_info->cu_bitmap[xcc][i % 4][j + i / 4], ct,
cu_processor_id, k);
- if (ret < 0)
- break;
+ if (ret < 0)
+ break;
- if (!ret) {
- num_of_entries++;
- list_add_tail(&props_ext->list, &dev->cache_props);
- }
+ if (!ret) {
+ num_of_entries++;
+ list_add_tail(&props_ext->list, &dev->cache_props);
+ }
- /* Move to next CU block */
- num_cu_shared = ((k + pcache_info[ct].num_cu_shared) <=
- pcu_info->num_cu_per_sh) ?
- pcache_info[ct].num_cu_shared :
- (pcu_info->num_cu_per_sh - k);
- cu_processor_id += num_cu_shared;
+ /* Move to next CU block */
+ num_cu_shared = ((k + pcache_info[ct].num_cu_shared) <=
+ pcu_info->num_cu_per_sh) ?
+ pcache_info[ct].num_cu_shared :
+ (pcu_info->num_cu_per_sh - k);
+ cu_processor_id += num_cu_shared;
+ }
}
}
}
} else {
ret = fill_in_l2_l3_pcache(&props_ext, pcache_info,
- pcu_info, ct, cu_processor_id);
+ pcu_info, ct, cu_processor_id, kdev);
if (ret < 0)
break;
struct attribute attr;
};
-#define CACHE_SIBLINGMAP_SIZE 64
+#define CACHE_SIBLINGMAP_SIZE 128
struct kfd_cache_properties {
struct list_head list;
pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
- page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
- page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
- page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
- page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
- page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
+ page_table_start.high_part = upper_32_bits(adev->gmc.gart_start >>
+ AMDGPU_GPU_PAGE_SHIFT);
+ page_table_start.low_part = lower_32_bits(adev->gmc.gart_start >>
+ AMDGPU_GPU_PAGE_SHIFT);
+ page_table_end.high_part = upper_32_bits(adev->gmc.gart_end >>
+ AMDGPU_GPU_PAGE_SHIFT);
+ page_table_end.low_part = lower_32_bits(adev->gmc.gart_end >>
+ AMDGPU_GPU_PAGE_SHIFT);
+ page_table_base.high_part = upper_32_bits(pt_base);
page_table_base.low_part = lower_32_bits(pt_base);
pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
}
break;
}
- if (init_data.flags.gpu_vm_support)
- init_data.flags.gpu_vm_support = amdgpu_sg_display_supported(adev);
+ if (init_data.flags.gpu_vm_support &&
+ (amdgpu_sg_display == 0))
+ init_data.flags.gpu_vm_support = false;
if (init_data.flags.gpu_vm_support)
adev->mode_info.gpu_vm_support = true;
return detect_mst_link_for_all_connectors(adev_to_drm(adev));
}
+static void resume_mst_branch_status(struct drm_dp_mst_topology_mgr *mgr)
+{
+ int ret;
+ u8 guid[16];
+ u64 tmp64;
+
+ mutex_lock(&mgr->lock);
+ if (!mgr->mst_primary)
+ goto out_fail;
+
+ if (drm_dp_read_dpcd_caps(mgr->aux, mgr->dpcd) < 0) {
+ drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n");
+ goto out_fail;
+ }
+
+ ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
+ DP_MST_EN |
+ DP_UP_REQ_EN |
+ DP_UPSTREAM_IS_SRC);
+ if (ret < 0) {
+ drm_dbg_kms(mgr->dev, "mst write failed - undocked during suspend?\n");
+ goto out_fail;
+ }
+
+ /* Some hubs forget their guids after they resume */
+ ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
+ if (ret != 16) {
+ drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n");
+ goto out_fail;
+ }
+
+ if (memchr_inv(guid, 0, 16) == NULL) {
+ tmp64 = get_jiffies_64();
+ memcpy(&guid[0], &tmp64, sizeof(u64));
+ memcpy(&guid[8], &tmp64, sizeof(u64));
+
+ ret = drm_dp_dpcd_write(mgr->aux, DP_GUID, guid, 16);
+
+ if (ret != 16) {
+ drm_dbg_kms(mgr->dev, "check mstb guid failed - undocked during suspend?\n");
+ goto out_fail;
+ }
+ }
+
+ memcpy(mgr->mst_primary->guid, guid, 16);
+
+out_fail:
+ mutex_unlock(&mgr->lock);
+}
+
static void s3_handle_mst(struct drm_device *dev, bool suspend)
{
struct amdgpu_dm_connector *aconnector;
struct drm_connector *connector;
struct drm_connector_list_iter iter;
struct drm_dp_mst_topology_mgr *mgr;
- int ret;
- bool need_hotplug = false;
drm_connector_list_iter_begin(dev, &iter);
drm_for_each_connector_iter(connector, &iter) {
if (!dp_is_lttpr_present(aconnector->dc_link))
try_to_configure_aux_timeout(aconnector->dc_link->ddc, LINK_AUX_DEFAULT_TIMEOUT_PERIOD);
- ret = drm_dp_mst_topology_mgr_resume(mgr, true);
- if (ret < 0) {
- dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx,
- aconnector->dc_link);
- need_hotplug = true;
- }
+ /* TODO: move resume_mst_branch_status() into drm mst resume again
+ * once topology probing work is pulled out from mst resume into mst
+ * resume 2nd step. mst resume 2nd step should be called after old
+ * state getting restored (i.e. drm_atomic_helper_resume()).
+ */
+ resume_mst_branch_status(mgr);
}
}
drm_connector_list_iter_end(&iter);
-
- if (need_hotplug)
- drm_kms_helper_hotplug_event(dev);
}
static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
enum dc_connection_type new_connection_type = dc_connection_none;
struct dc_state *dc_state;
- int i, r, j;
+ int i, r, j, ret;
+ bool need_hotplug = false;
if (amdgpu_in_reset(adev)) {
dc_state = dm->cached_dc_state;
continue;
/*
- * this is the case when traversing through already created
+ * this is the case when traversing through already created end sink
* MST connectors, should be skipped
*/
if (aconnector && aconnector->mst_root)
dm->cached_state = NULL;
+ /* Do mst topology probing after resuming cached state*/
+ drm_connector_list_iter_begin(ddev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
+ aconnector = to_amdgpu_dm_connector(connector);
+ if (aconnector->dc_link->type != dc_connection_mst_branch ||
+ aconnector->mst_root)
+ continue;
+
+ ret = drm_dp_mst_topology_mgr_resume(&aconnector->mst_mgr, true);
+
+ if (ret < 0) {
+ dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx,
+ aconnector->dc_link);
+ need_hotplug = true;
+ }
+ }
+ drm_connector_list_iter_end(&iter);
+
+ if (need_hotplug)
+ drm_kms_helper_hotplug_event(ddev);
+
amdgpu_dm_irq_resume_late(adev);
amdgpu_dm_smu_write_watermarks_table(adev);
if (recalculate_timing)
drm_mode_set_crtcinfo(&saved_mode, 0);
- else if (!old_stream)
- drm_mode_set_crtcinfo(&mode, 0);
/*
* If scaling is enabled and refresh rate didn't change
goto fail;
}
+ drm_mode_set_crtcinfo(mode, 0);
+
stream = create_validate_stream_for_sink(aconnector, mode,
to_dm_connector_state(connector->state),
NULL);
bundle->surface_updates[planes_count].plane_info =
&bundle->plane_infos[planes_count];
- if (acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
+ if (acrtc_state->stream->link->psr_settings.psr_feature_enabled ||
+ acrtc_state->stream->link->replay_settings.replay_feature_enabled) {
fill_dc_dirty_rects(plane, old_plane_state,
new_plane_state, new_crtc_state,
&bundle->flip_addrs[planes_count],
unsigned int max_refresh_rate_hz;
/**
- * @replay mode: Replay supported
+ * @replay_mode: Replay supported
*/
bool replay_mode;
};
/* Return first available DIG link encoder. */
static enum engine_id find_first_avail_link_enc(
const struct dc_context *ctx,
- const struct dc_state *state)
+ const struct dc_state *state,
+ enum engine_id eng_id_requested)
{
enum engine_id eng_id = ENGINE_ID_UNKNOWN;
int i;
+ if (eng_id_requested != ENGINE_ID_UNKNOWN) {
+
+ for (i = 0; i < ctx->dc->res_pool->res_cap->num_dig_link_enc; i++) {
+ eng_id = state->res_ctx.link_enc_cfg_ctx.link_enc_avail[i];
+ if (eng_id == eng_id_requested)
+ return eng_id;
+ }
+ }
+
+ eng_id = ENGINE_ID_UNKNOWN;
+
for (i = 0; i < ctx->dc->res_pool->res_cap->num_dig_link_enc; i++) {
eng_id = state->res_ctx.link_enc_cfg_ctx.link_enc_avail[i];
if (eng_id != ENGINE_ID_UNKNOWN)
struct dc_stream_state *streams[],
uint8_t stream_count)
{
- enum engine_id eng_id = ENGINE_ID_UNKNOWN;
+ enum engine_id eng_id = ENGINE_ID_UNKNOWN, eng_id_req = ENGINE_ID_UNKNOWN;
int i;
int j;
* assigned to that endpoint.
*/
link_enc = get_link_enc_used_by_link(state, stream->link);
- if (link_enc == NULL)
- eng_id = find_first_avail_link_enc(stream->ctx, state);
+ if (link_enc == NULL) {
+
+ if (stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA &&
+ stream->link->dpia_preferred_eng_id != ENGINE_ID_UNKNOWN)
+ eng_id_req = stream->link->dpia_preferred_eng_id;
+
+ eng_id = find_first_avail_link_enc(stream->ctx, state, eng_id_req);
+ }
else
eng_id = link_enc->preferred_engine;
DC_LOG_DEBUG("%s: CUR %s(%d) - enc_id(%d)\n",
__func__,
assignment.ep_id.ep_type == DISPLAY_ENDPOINT_PHY ? "PHY" : "DPIA",
- assignment.ep_id.link_id.enum_id - 1,
+ assignment.ep_id.ep_type == DISPLAY_ENDPOINT_PHY ?
+ assignment.ep_id.link_id.enum_id :
+ assignment.ep_id.link_id.enum_id - 1,
assignment.eng_id);
}
for (i = 0; i < MAX_PIPES; i++) {
DC_LOG_DEBUG("%s: NEW %s(%d) - enc_id(%d)\n",
__func__,
assignment.ep_id.ep_type == DISPLAY_ENDPOINT_PHY ? "PHY" : "DPIA",
- assignment.ep_id.link_id.enum_id - 1,
+ assignment.ep_id.ep_type == DISPLAY_ENDPOINT_PHY ?
+ assignment.ep_id.link_id.enum_id :
+ assignment.ep_id.link_id.enum_id - 1,
assignment.eng_id);
}
if (stream)
link = stream->link;
- // dm_output_to_console("%s: No link using DIG(%d).\n", __func__, eng_id);
return link;
}
* object creation.
*/
enum engine_id eng_id;
+ enum engine_id dpia_preferred_eng_id;
bool test_pattern_enabled;
enum dp_test_pattern current_test_pattern;
return;
}
- if (link->panel_cntl) {
+ if (link->panel_cntl && !(link->dpcd_sink_ext_caps.bits.oled ||
+ link->dpcd_sink_ext_caps.bits.hdr_aux_backlight_control == 1 ||
+ link->dpcd_sink_ext_caps.bits.sdr_aux_backlight_control == 1)) {
bool is_backlight_on = link->panel_cntl->funcs->is_panel_backlight_on(link->panel_cntl);
if ((enable && is_backlight_on) || (!enable && !is_backlight_on)) {
dto_params.otg_inst = tg->inst;
dto_params.timing = &pipe_ctx->stream->timing;
dp_hpo_inst = pipe_ctx->stream_res.hpo_dp_stream_enc->inst;
- dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
- dccg->funcs->disable_symclk32_se(dccg, dp_hpo_inst);
- dccg->funcs->set_dpstreamclk(dccg, REFCLK, tg->inst, dp_hpo_inst);
- } else if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST && dccg->funcs->disable_symclk_se)
+ if (dccg) {
+ dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
+ dccg->funcs->disable_symclk32_se(dccg, dp_hpo_inst);
+ dccg->funcs->set_dpstreamclk(dccg, REFCLK, tg->inst, dp_hpo_inst);
+ }
+ } else if (dccg && dccg->funcs->disable_symclk_se) {
dccg->funcs->disable_symclk_se(dccg, stream_enc->stream_enc_inst,
link_enc->transmitter - TRANSMITTER_UNIPHY_A);
+ }
if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
/* TODO: This looks like a bug to me as we are disabling HPO IO when
struct clk_mgr *dccg = dc->clk_mgr;
dce110_set_safe_displaymarks(&context->res_ctx, dc->res_pool);
-
- dccg->funcs->update_clocks(
- dccg,
- context,
- false);
+ if (dccg)
+ dccg->funcs->update_clocks(
+ dccg,
+ context,
+ false);
}
void dce110_optimize_bandwidth(
dce110_set_displaymarks(dc, context);
- dccg->funcs->update_clocks(
- dccg,
- context,
- true);
+ if (dccg)
+ dccg->funcs->update_clocks(
+ dccg,
+ context,
+ true);
}
static void dce110_program_front_end_for_pipe(
struct dce_hwseq *hws = dc->hwseq;
unsigned int k1_div = PIXEL_RATE_DIV_NA;
unsigned int k2_div = PIXEL_RATE_DIV_NA;
- struct link_encoder *link_enc = link_enc_cfg_get_link_enc(pipe_ctx->stream->link);
- struct stream_encoder *stream_enc = pipe_ctx->stream_res.stream_enc;
if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
if (dc->hwseq->funcs.setup_hpo_hw_control)
dto_params.timing = &pipe_ctx->stream->timing;
dto_params.ref_dtbclk_khz = dc->clk_mgr->funcs->get_dtb_ref_clk_frequency(dc->clk_mgr);
dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
- } else if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST && dccg->funcs->enable_symclk_se)
- dccg->funcs->enable_symclk_se(dccg,
- stream_enc->stream_enc_inst, link_enc->transmitter - TRANSMITTER_UNIPHY_A);
-
+ } else {
+ }
if (hws->funcs.calculate_dccg_k1_k2_values && dc->res_pool->dccg->funcs->set_pixel_rate_div) {
hws->funcs.calculate_dccg_k1_k2_values(pipe_ctx, &k1_div, &k2_div);
I2C_COMMON_MASK_SH_LIST_DCN30(_MASK)
};
+/* ========================================================== */
+
+/*
+ * DPIA index | Preferred Encoder | Host Router
+ * 0 | C | 0
+ * 1 | First Available | 0
+ * 2 | D | 1
+ * 3 | First Available | 1
+ */
+/* ========================================================== */
+static const enum engine_id dpia_to_preferred_enc_id_table[] = {
+ ENGINE_ID_DIGC,
+ ENGINE_ID_DIGC,
+ ENGINE_ID_DIGD,
+ ENGINE_ID_DIGD
+};
+
+static enum engine_id dcn314_get_preferred_eng_id_dpia(unsigned int dpia_index)
+{
+ return dpia_to_preferred_enc_id_table[dpia_index];
+}
+
static struct dce_i2c_hw *dcn31_i2c_hw_create(
struct dc_context *ctx,
uint32_t inst)
.update_bw_bounding_box = dcn314_update_bw_bounding_box,
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
.get_panel_config_defaults = dcn314_get_panel_config_defaults,
+ .get_preferred_eng_id_dpia = dcn314_get_preferred_eng_id_dpia,
};
static struct clock_source *dcn30_clock_source_create(
if (power_on) {
REG_UPDATE(MPCC_MCM_MEM_PWR_CTRL[mpcc_id], MPCC_MCM_1DLUT_MEM_PWR_FORCE, 0);
REG_WAIT(MPCC_MCM_MEM_PWR_CTRL[mpcc_id], MPCC_MCM_1DLUT_MEM_PWR_STATE, 0, 1, 5);
- } else {
+ } else if (!mpc->ctx->dc->debug.disable_mem_low_power) {
ASSERT(false);
/* TODO: change to mpc
* dpp_base->ctx->dc->optimized_required = true;
struct clk_bw_params;
struct resource_funcs {
+ enum engine_id (*get_preferred_eng_id_dpia)(unsigned int dpia_index);
void (*destroy)(struct resource_pool **pool);
void (*link_init)(struct dc_link *link);
struct panel_cntl*(*panel_cntl_create)(
/* Set dpia port index : 0 to number of dpia ports */
link->ddc_hw_inst = init_params->connector_index;
+ // Assign Dpia preferred eng_id
+ if (link->dc->res_pool->funcs->get_preferred_eng_id_dpia)
+ link->dpia_preferred_eng_id = link->dc->res_pool->funcs->get_preferred_eng_id_dpia(link->ddc_hw_inst);
+
/* TODO: Create link encoder */
link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
#include <linux/types.h>
#include <linux/bitmap.h>
#include <linux/dma-fence.h>
+#include "amdgpu_irq.h"
+#include "amdgpu_gfx.h"
struct pci_dev;
struct amdgpu_device;
-#define KGD_MAX_QUEUES 128
-
struct kfd_dev;
struct kgd_mem;
uint32_t wave_front_size;
uint32_t max_scratch_slots_per_cu;
uint32_t lds_size;
- uint32_t cu_bitmap[4][4];
+ uint32_t cu_bitmap[AMDGPU_MAX_GC_INSTANCES][4][4];
};
/* For getting GPU local memory information from KGD */
uint32_t wait_times,
uint32_t grace_period,
uint32_t *reg_offset,
- uint32_t *reg_data,
- uint32_t inst);
+ uint32_t *reg_data);
void (*get_cu_occupancy)(struct amdgpu_device *adev, int pasid,
int *wave_cnt, int *max_waves_per_cu, uint32_t inst);
void (*program_trap_handler_settings)(struct amdgpu_device *adev,
/* Store one-time values in driver PPTable */
if (!pptable->Init) {
- while (retry--) {
+ while (--retry) {
ret = smu_v13_0_6_get_metrics_table(smu, NULL, true);
if (ret)
return ret;
/**
* drm_mode_create_hdmi_colorspace_property - create hdmi colorspace property
* @connector: connector to create the Colorspace property on.
+ * @supported_colorspaces: bitmap of supported color spaces
*
* Called by a driver the first time it's needed, must be attached to desired
* HDMI connectors.
/**
* drm_mode_create_dp_colorspace_property - create dp colorspace property
* @connector: connector to create the Colorspace property on.
+ * @supported_colorspaces: bitmap of supported color spaces
*
* Called by a driver the first time it's needed, must be attached to desired
* DP connectors.
struct drm_gem_object *obj;
unsigned long index;
- drm_exec_for_each_locked_object(exec, index, obj) {
+ drm_exec_for_each_locked_object_reverse(exec, index, obj) {
dma_resv_unlock(obj->resv);
drm_gem_object_put(obj);
}
return map_aux_ch(devdata->i915, devdata->child.aux_channel);
}
+bool intel_bios_dp_has_shared_aux_ch(const struct intel_bios_encoder_data *devdata)
+{
+ struct drm_i915_private *i915;
+ u8 aux_channel;
+ int count = 0;
+
+ if (!devdata || !devdata->child.aux_channel)
+ return false;
+
+ i915 = devdata->i915;
+ aux_channel = devdata->child.aux_channel;
+
+ list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) {
+ if (intel_bios_encoder_supports_dp(devdata) &&
+ aux_channel == devdata->child.aux_channel)
+ count++;
+ }
+
+ return count > 1;
+}
+
int intel_bios_dp_boost_level(const struct intel_bios_encoder_data *devdata)
{
if (!devdata || devdata->i915->display.vbt.version < 196 || !devdata->child.iboost)
int intel_bios_dp_boost_level(const struct intel_bios_encoder_data *devdata);
int intel_bios_dp_max_lane_count(const struct intel_bios_encoder_data *devdata);
int intel_bios_dp_max_link_rate(const struct intel_bios_encoder_data *devdata);
+bool intel_bios_dp_has_shared_aux_ch(const struct intel_bios_encoder_data *devdata);
int intel_bios_hdmi_boost_level(const struct intel_bios_encoder_data *devdata);
int intel_bios_hdmi_ddc_pin(const struct intel_bios_encoder_data *devdata);
int intel_bios_hdmi_level_shift(const struct intel_bios_encoder_data *devdata);
/*
* VBT and straps are liars. Also check HPD as that seems
* to be the most reliable piece of information available.
+ *
+ * ... expect on devices that forgot to hook HPD up for eDP
+ * (eg. Acer Chromebook C710), so we'll check it only if multiple
+ * ports are attempting to use the same AUX CH, according to VBT.
*/
- if (!intel_digital_port_connected(encoder)) {
+ if (intel_bios_dp_has_shared_aux_ch(encoder->devdata) &&
+ !intel_digital_port_connected(encoder)) {
/*
* If this fails, presume the DPCD answer came
* from some other port using the same AUX CH.
st->nents = 0;
for (i = 0; i < page_count; i++) {
struct folio *folio;
+ unsigned long nr_pages;
const unsigned int shrink[] = {
I915_SHRINK_BOUND | I915_SHRINK_UNBOUND,
0,
}
} while (1);
+ nr_pages = min_t(unsigned long,
+ folio_nr_pages(folio), page_count - i);
if (!i ||
sg->length >= max_segment ||
folio_pfn(folio) != next_pfn) {
sg = sg_next(sg);
st->nents++;
- sg_set_folio(sg, folio, folio_size(folio), 0);
+ sg_set_folio(sg, folio, nr_pages * PAGE_SIZE, 0);
} else {
/* XXX: could overflow? */
- sg->length += folio_size(folio);
+ sg->length += nr_pages * PAGE_SIZE;
}
- next_pfn = folio_pfn(folio) + folio_nr_pages(folio);
- i += folio_nr_pages(folio) - 1;
+ next_pfn = folio_pfn(folio) + nr_pages;
+ i += nr_pages - 1;
/* Check that the i965g/gm workaround works. */
GEM_BUG_ON(gfp & __GFP_DMA32 && next_pfn >= 0x00100000UL);
DRIVER_CAPS(i915)->has_logical_contexts = true;
ewma__engine_latency_init(&engine->latency);
- seqcount_init(&engine->stats.execlists.lock);
ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier);
logical_ring_default_vfuncs(engine);
logical_ring_default_irqs(engine);
+ seqcount_init(&engine->stats.execlists.lock);
+
if (engine->flags & I915_ENGINE_HAS_RCS_REG_STATE)
rcs_submission_override(engine);
vm->clear_range(vm, vma_res->start, vma_res->vma_size);
}
+/*
+ * Reserve the top of the GuC address space for firmware images. Addresses
+ * beyond GUC_GGTT_TOP in the GuC address space are inaccessible by GuC,
+ * which makes for a suitable range to hold GuC/HuC firmware images if the
+ * size of the GGTT is 4G. However, on a 32-bit platform the size of the GGTT
+ * is limited to 2G, which is less than GUC_GGTT_TOP, but we reserve a chunk
+ * of the same size anyway, which is far more than needed, to keep the logic
+ * in uc_fw_ggtt_offset() simple.
+ */
+#define GUC_TOP_RESERVE_SIZE (SZ_4G - GUC_GGTT_TOP)
+
static int ggtt_reserve_guc_top(struct i915_ggtt *ggtt)
{
- u64 size;
+ u64 offset;
int ret;
if (!intel_uc_uses_guc(&ggtt->vm.gt->uc))
return 0;
- GEM_BUG_ON(ggtt->vm.total <= GUC_GGTT_TOP);
- size = ggtt->vm.total - GUC_GGTT_TOP;
+ GEM_BUG_ON(ggtt->vm.total <= GUC_TOP_RESERVE_SIZE);
+ offset = ggtt->vm.total - GUC_TOP_RESERVE_SIZE;
- ret = i915_gem_gtt_reserve(&ggtt->vm, NULL, &ggtt->uc_fw, size,
- GUC_GGTT_TOP, I915_COLOR_UNEVICTABLE,
- PIN_NOEVICT);
+ ret = i915_gem_gtt_reserve(&ggtt->vm, NULL, &ggtt->uc_fw,
+ GUC_TOP_RESERVE_SIZE, offset,
+ I915_COLOR_UNEVICTABLE, PIN_NOEVICT);
if (ret)
drm_dbg(&ggtt->vm.i915->drm,
"Failed to reserve top of GGTT for GuC\n");
I915_BO_ALLOC_PM_VOLATILE);
if (IS_ERR(obj)) {
obj = i915_gem_object_create_shmem(engine->i915, context_size);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
/*
* Wa_22016122933: For Media version 13.0, all Media GT shared
* memory needs to be mapped as WC on CPU side and UC (PAT
if (intel_gt_needs_wa_22016122933(engine->gt))
i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
}
- if (IS_ERR(obj))
- return ERR_CAST(obj);
vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
if (IS_ERR(vma)) {
int srcu, ret;
/*
+ * Ideally the busyness worker should take a gt pm wakeref because the
+ * worker only needs to be active while gt is awake. However, the
+ * gt_park path cancels the worker synchronously and this complicates
+ * the flow if the worker is also running at the same time. The cancel
+ * waits for the worker and when the worker releases the wakeref, that
+ * would call gt_park and would lead to a deadlock.
+ *
+ * The resolution is to take the global pm wakeref if runtime pm is
+ * already active. If not, we don't need to update the busyness stats as
+ * the stats would already be updated when the gt was parked.
+ *
+ * Note:
+ * - We do not requeue the worker if we cannot take a reference to runtime
+ * pm since intel_guc_busyness_unpark would requeue the worker in the
+ * resume path.
+ *
+ * - If the gt was parked longer than time taken for GT timestamp to roll
+ * over, we ignore those rollovers since we don't care about tracking
+ * the exact GT time. We only care about roll overs when the gt is
+ * active and running workloads.
+ *
+ * - There is a window of time between gt_park and runtime suspend,
+ * where the worker may run. This is acceptable since the worker will
+ * not find any new data to update busyness.
+ */
+ wakeref = intel_runtime_pm_get_if_active(>->i915->runtime_pm);
+ if (!wakeref)
+ return;
+
+ /*
* Synchronize with gt reset to make sure the worker does not
* corrupt the engine/guc stats. NB: can't actually block waiting
* for a reset to complete as the reset requires flushing out
*/
ret = intel_gt_reset_trylock(gt, &srcu);
if (ret)
- return;
+ goto err_trylock;
- with_intel_runtime_pm(>->i915->runtime_pm, wakeref)
- __update_guc_busyness_stats(guc);
+ __update_guc_busyness_stats(guc);
/* adjust context stats for overflow */
xa_for_each(&guc->context_lookup, index, ce)
intel_gt_reset_unlock(gt, srcu);
guc_enable_busyness_worker(guc);
+
+err_trylock:
+ intel_runtime_pm_put(>->i915->runtime_pm, wakeref);
}
static int guc_action_enable_usage_stats(struct intel_guc *guc)
return;
cec_notifier_set_phys_addr_from_edid(encoder_hdmi->cec_notifier, edid);
+
+ kfree(edid);
} else
cec_notifier_phys_addr_invalidate(encoder_hdmi->cec_notifier);
}
static inline void *
u_memcpya(uint64_t user, unsigned int nmemb, unsigned int size)
{
- void *mem;
- void __user *userptr = (void __force __user *)(uintptr_t)user;
+ void __user *userptr = u64_to_user_ptr(user);
+ size_t bytes;
- size *= nmemb;
-
- mem = kvmalloc(size, GFP_KERNEL);
- if (!mem)
- return ERR_PTR(-ENOMEM);
-
- if (copy_from_user(mem, userptr, size)) {
- u_free(mem);
- return ERR_PTR(-EFAULT);
- }
-
- return mem;
+ if (unlikely(check_mul_overflow(nmemb, size, &bytes)))
+ return ERR_PTR(-EOVERFLOW);
+ return vmemdup_user(userptr, bytes);
}
#include <nvif/object.h>
nouveau_sched_entity_fini(job->entity);
- return DRM_GPU_SCHED_STAT_ENODEV;
+ return DRM_GPU_SCHED_STAT_NOMINAL;
}
static struct nouveau_job_ops nouveau_exec_job_ops = {
int
nouveau_fence_emit(struct nouveau_fence *fence)
{
- struct nouveau_channel *chan = fence->channel;
+ struct nouveau_channel *chan = unrcu_pointer(fence->channel);
struct nouveau_fence_chan *fctx = chan->fence;
struct nouveau_fence_priv *priv = (void*)chan->drm->fence;
int ret;
static enum drm_gpu_sched_stat
nouveau_sched_timedout_job(struct drm_sched_job *sched_job)
{
+ struct drm_gpu_scheduler *sched = sched_job->sched;
struct nouveau_job *job = to_nouveau_job(sched_job);
+ enum drm_gpu_sched_stat stat = DRM_GPU_SCHED_STAT_NOMINAL;
- NV_PRINTK(warn, job->cli, "Job timed out.\n");
+ drm_sched_stop(sched, sched_job);
if (job->ops->timeout)
- return job->ops->timeout(job);
+ stat = job->ops->timeout(job);
+ else
+ NV_PRINTK(warn, job->cli, "Generic job timeout.\n");
+
+ drm_sched_start(sched, true);
- return DRM_GPU_SCHED_STAT_ENODEV;
+ return stat;
}
static void
unsigned int size, unsigned int align)
{
struct drm_suballoc *sa = drm_suballoc_new(&sa_manager->base, size,
- GFP_KERNEL, true, align);
+ GFP_KERNEL, false, align);
if (IS_ERR(sa)) {
*sa_bo = NULL;
KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert_range(test, count, size, 0, max - 1));
KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert_range(test, count, size, 0, max / 2));
KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert_range(test, count, size,
- max / 2, max / 2));
+ max / 2, max));
KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert_range(test, count, size,
max / 4 + 1, 3 * max / 4 - 1));
#define READ_STATUS_SIZE 13
#define MISC_VALUE_SIZE 4
-#define CMD_TIMEOUT msecs_to_jiffies(200)
-#define DATA_TIMEOUT msecs_to_jiffies(1000)
-#define IDLE_TIMEOUT msecs_to_jiffies(2000)
-#define FIRST_FRAME_TIMEOUT msecs_to_jiffies(2000)
+#define CMD_TIMEOUT 200
+#define DATA_TIMEOUT 1000
+#define IDLE_TIMEOUT 2000
+#define FIRST_FRAME_TIMEOUT 2000
#define MISC_REQ_GET_SET_ECO_A 0xff
#define MISC_REQ_GET_SET_ECO_B 0x35
* switches back to showing its logo.
*/
queue_delayed_work(system_long_wq, &gm12u320->fb_update.work,
- IDLE_TIMEOUT);
+ msecs_to_jiffies(IDLE_TIMEOUT));
return;
err:
submit->buf = NULL;
submit->buflist = NULL;
submit->sync_file = NULL;
- submit->out_fence = NULL;
submit->out_fence_fd = -1;
}
if (enabled)
drm_crtc_vblank_get(&out->crtc);
- mutex_lock(&out->enabled_lock);
+ spin_lock_irq(&out->lock);
old_enabled = out->composer_enabled;
out->composer_enabled = enabled;
-
- /* the composition wasn't enabled, so unlock the lock to make sure the lock
- * will be balanced even if we have a failed commit
- */
- if (!out->composer_enabled)
- mutex_unlock(&out->enabled_lock);
+ spin_unlock_irq(&out->lock);
if (old_enabled)
drm_crtc_vblank_put(&out->crtc);
struct drm_crtc *crtc = &output->crtc;
struct vkms_crtc_state *state;
u64 ret_overrun;
- bool ret, fence_cookie, composer_enabled;
+ bool ret, fence_cookie;
fence_cookie = dma_fence_begin_signalling();
if (ret_overrun != 1)
pr_warn("%s: vblank timer overrun\n", __func__);
+ spin_lock(&output->lock);
ret = drm_crtc_handle_vblank(crtc);
if (!ret)
DRM_ERROR("vkms failure on handling vblank");
state = output->composer_state;
- composer_enabled = output->composer_enabled;
- mutex_unlock(&output->enabled_lock);
+ spin_unlock(&output->lock);
- if (state && composer_enabled) {
+ if (state && output->composer_enabled) {
u64 frame = drm_crtc_accurate_vblank_count(crtc);
/* update frame_start only if a queued vkms_composer_worker()
spin_lock_init(&vkms_out->lock);
spin_lock_init(&vkms_out->composer_lock);
- mutex_init(&vkms_out->enabled_lock);
vkms_out->composer_workq = alloc_ordered_workqueue("vkms_composer", 0);
if (!vkms_out->composer_workq)
struct workqueue_struct *composer_workq;
/* protects concurrent access to composer */
spinlock_t lock;
- /* guarantees that if the composer is enabled, a job will be queued */
- struct mutex enabled_lock;
- /* protected by @enabled_lock */
+ /* protected by @lock */
bool composer_enabled;
struct vkms_crtc_state *composer_state;
struct device *dev = kobj_to_dev(kobj);
struct nct6775_data *data = dev_get_drvdata(dev);
int in = index / 5; /* voltage index */
+ int nr = index % 5; /* attribute index */
+
+ if (nr == 1 && data->ALARM_BITS[in] == -1)
+ return 0;
if (!(data->have_in & BIT(in)))
return 0;
source "drivers/i2c/muxes/Kconfig"
config I2C_ATR
- tristate "I2C Address Translator (ATR) support"
+ tristate "I2C Address Translator (ATR) support" if COMPILE_TEST
help
Enable support for I2C Address Translator (ATR) chips.
config I2C_MLXCPLD
tristate "Mellanox I2C driver"
- depends on X86_64 || ARM64 || COMPILE_TEST
+ depends on X86_64 || (ARM64 && ACPI) || COMPILE_TEST
help
This exposes the Mellanox platform I2C busses to the linux I2C layer
- for X86 based systems.
+ for X86 and ARM64/ACPI based systems.
Controller is implemented as CPLD logic.
This driver can also be built as a module. If so, the module will be
if (time_left == 0) {
/*
- * If timed out and bus is still busy in a multi master
- * environment, attempt recovery at here.
+ * In a multi-master setup, if a timeout occurs, attempt
+ * recovery. But if the bus is idle, we still need to reset the
+ * i2c controller to clear the remaining interrupts.
*/
if (bus->multi_master &&
(readl(bus->base + ASPEED_I2C_CMD_REG) &
ASPEED_I2CD_BUS_BUSY_STS))
aspeed_i2c_recover_bus(bus);
+ else
+ aspeed_i2c_reset(bus);
/*
* If timed out and the state is still pending, drop the pending
* @reset: Reset control for the device
* @quirks: flag for broken hold bit usage in r1p10
* @ctrl_reg: Cached value of the control register.
+ * @rinfo: I2C GPIO recovery information
* @ctrl_reg_diva_divb: value of fields DIV_A and DIV_B from CR register
* @slave: Registered slave instance.
* @dev_mode: I2C operating role(master/slave).
void __i2c_dw_disable(struct dw_i2c_dev *dev)
{
+ unsigned int raw_intr_stats;
+ unsigned int enable;
int timeout = 100;
+ bool abort_needed;
unsigned int status;
+ int ret;
+
+ regmap_read(dev->map, DW_IC_RAW_INTR_STAT, &raw_intr_stats);
+ regmap_read(dev->map, DW_IC_ENABLE, &enable);
+
+ abort_needed = raw_intr_stats & DW_IC_INTR_MST_ON_HOLD;
+ if (abort_needed) {
+ regmap_write(dev->map, DW_IC_ENABLE, enable | DW_IC_ENABLE_ABORT);
+ ret = regmap_read_poll_timeout(dev->map, DW_IC_ENABLE, enable,
+ !(enable & DW_IC_ENABLE_ABORT), 10,
+ 100);
+ if (ret)
+ dev_err(dev->dev, "timeout while trying to abort current transfer\n");
+ }
do {
__i2c_dw_disable_nowait(dev);
#define DW_IC_INTR_START_DET BIT(10)
#define DW_IC_INTR_GEN_CALL BIT(11)
#define DW_IC_INTR_RESTART_DET BIT(12)
+#define DW_IC_INTR_MST_ON_HOLD BIT(13)
#define DW_IC_INTR_DEFAULT_MASK (DW_IC_INTR_RX_FULL | \
DW_IC_INTR_TX_ABRT | \
DW_IC_INTR_RX_UNDER | \
DW_IC_INTR_RD_REQ)
+#define DW_IC_ENABLE_ABORT BIT(1)
+
#define DW_IC_STATUS_ACTIVITY BIT(0)
#define DW_IC_STATUS_TFE BIT(2)
#define DW_IC_STATUS_RFNE BIT(3)
"SMBus I801 adapter at %04lx", priv->smba);
err = i2c_add_adapter(&priv->adapter);
if (err) {
+ platform_device_unregister(priv->tco_pdev);
i801_acpi_remove(priv);
return err;
}
{
struct i2c_msg *msgs;
int msgs_num;
+ bool do_complete = false;
msgs = bus->msgs;
msgs_num = bus->msgs_num;
msgs[1].flags & I2C_M_RD)
msgs[1].len = info;
}
- if (completion_done(&bus->cmd_complete) == false)
- complete(&bus->cmd_complete);
- break;
-
+ do_complete = true;
+ break;
case I2C_NACK_IND:
/* MASTER transmit got a NACK before tx all bytes */
bus->cmd_err = -ENXIO;
- if (bus->master_or_slave == I2C_MASTER)
- complete(&bus->cmd_complete);
-
+ do_complete = true;
break;
case I2C_BUS_ERR_IND:
/* Bus error */
bus->cmd_err = -EAGAIN;
- if (bus->master_or_slave == I2C_MASTER)
- complete(&bus->cmd_complete);
-
+ do_complete = true;
break;
case I2C_WAKE_UP_IND:
/* I2C wake up */
if (bus->slave)
bus->master_or_slave = I2C_SLAVE;
#endif
+ if (do_complete)
+ complete(&bus->cmd_complete);
}
static u8 npcm_i2c_fifo_usage(struct npcm_i2c *bus)
* reset the IP instead of just flush fifos
*/
ret = xiic_reinit(i2c);
- if (!ret)
+ if (ret < 0)
dev_dbg(i2c->adap.dev.parent, "reinit failed\n");
if (i2c->rx_msg) {
priv->adap.lock_ops = &i2c_parent_lock_ops;
/* Sanity check on class */
- if (i2c_mux_parent_classes(parent) & class)
+ if (i2c_mux_parent_classes(parent) & class & ~I2C_CLASS_DEPRECATED)
dev_err(&parent->dev,
"Segment %d behind mux can't share classes with ancestors\n",
chan_id);
props[i].name = devm_kstrdup(&pdev->dev, "status", GFP_KERNEL);
props[i].value = devm_kstrdup(&pdev->dev, "ok", GFP_KERNEL);
+ if (!props[i].name || !props[i].value) {
+ err = -ENOMEM;
+ goto err_rollback;
+ }
props[i].length = 3;
of_changeset_init(&priv->chan[i].chgset);
} else if (is_acpi_node(child)) {
rc = acpi_get_local_address(ACPI_HANDLE_FWNODE(child), values + i);
- if (rc)
+ if (rc) {
+ fwnode_handle_put(child);
return dev_err_probe(dev, rc, "Cannot get address\n");
+ }
}
i++;
#include <linux/irqdomain.h>
#include <linux/irq.h>
#include <linux/irqchip.h>
+#include <linux/irqchip/xtensa-mx.h>
#include <linux/of.h>
#include <asm/mxregs.h>
/* a list of devices used by this table */
struct list_head devices;
+ struct rw_semaphore devices_lock;
/* events get handed up using this callback */
void (*event_fn)(void *data);
struct dm_dev_internal *dd;
struct dm_target_deps *deps;
+ down_read(&table->devices_lock);
+
deps = get_result_buffer(param, param_size, &len);
/*
needed = struct_size(deps, dev, count);
if (len < needed) {
param->flags |= DM_BUFFER_FULL_FLAG;
- return;
+ goto out;
}
/*
deps->dev[count++] = huge_encode_dev(dd->dm_dev->bdev->bd_dev);
param->data_size = param->data_start + needed;
+
+out:
+ up_read(&table->devices_lock);
}
static int table_deps(struct file *filp, struct dm_ioctl *param, size_t param_size)
return -ENOMEM;
INIT_LIST_HEAD(&t->devices);
+ init_rwsem(&t->devices_lock);
if (!num_targets)
num_targets = KEYS_PER_NODE;
if (dev == disk_devt(t->md->disk))
return -EINVAL;
+ down_write(&t->devices_lock);
+
dd = find_device(&t->devices, dev);
if (!dd) {
dd = kmalloc(sizeof(*dd), GFP_KERNEL);
- if (!dd)
- return -ENOMEM;
+ if (!dd) {
+ r = -ENOMEM;
+ goto unlock_ret_r;
+ }
r = dm_get_table_device(t->md, dev, mode, &dd->dm_dev);
if (r) {
kfree(dd);
- return r;
+ goto unlock_ret_r;
}
refcount_set(&dd->count, 1);
} else if (dd->dm_dev->mode != (mode | dd->dm_dev->mode)) {
r = upgrade_mode(dd, mode, t->md);
if (r)
- return r;
+ goto unlock_ret_r;
}
refcount_inc(&dd->count);
out:
+ up_write(&t->devices_lock);
*result = dd->dm_dev;
return 0;
+
+unlock_ret_r:
+ up_write(&t->devices_lock);
+ return r;
}
EXPORT_SYMBOL(dm_get_device);
void dm_put_device(struct dm_target *ti, struct dm_dev *d)
{
int found = 0;
- struct list_head *devices = &ti->table->devices;
+ struct dm_table *t = ti->table;
+ struct list_head *devices = &t->devices;
struct dm_dev_internal *dd;
+ down_write(&t->devices_lock);
+
list_for_each_entry(dd, devices, list) {
if (dd->dm_dev == d) {
found = 1;
}
if (!found) {
DMERR("%s: device %s not in table devices list",
- dm_device_name(ti->table->md), d->name);
- return;
+ dm_device_name(t->md), d->name);
+ goto unlock_ret;
}
if (refcount_dec_and_test(&dd->count)) {
- dm_put_table_device(ti->table->md, d);
+ dm_put_table_device(t->md, d);
list_del(&dd->list);
kfree(dd);
}
+
+unlock_ret:
+ up_write(&t->devices_lock);
}
EXPORT_SYMBOL(dm_put_device);
rcu_read_unlock();
}
-static inline struct dm_table *dm_get_live_table_bio(struct mapped_device *md,
- int *srcu_idx, blk_opf_t bio_opf)
-{
- if (bio_opf & REQ_NOWAIT)
- return dm_get_live_table_fast(md);
- else
- return dm_get_live_table(md, srcu_idx);
-}
-
-static inline void dm_put_live_table_bio(struct mapped_device *md, int srcu_idx,
- blk_opf_t bio_opf)
-{
- if (bio_opf & REQ_NOWAIT)
- dm_put_live_table_fast(md);
- else
- dm_put_live_table(md, srcu_idx);
-}
-
static char *_dm_claim_ptr = "I belong to device-mapper";
/*
struct mapped_device *md = bio->bi_bdev->bd_disk->private_data;
int srcu_idx;
struct dm_table *map;
- blk_opf_t bio_opf = bio->bi_opf;
- map = dm_get_live_table_bio(md, &srcu_idx, bio_opf);
+ map = dm_get_live_table(md, &srcu_idx);
/* If suspended, or map not yet available, queue this IO for later */
if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) ||
dm_split_and_process_bio(md, map, bio);
out:
- dm_put_live_table_bio(md, srcu_idx, bio_opf);
+ dm_put_live_table(md, srcu_idx);
}
static bool dm_poll_dm_io(struct dm_io *io, struct io_comp_batch *iob,
} else
mutex_unlock(&mddev->reconfig_mutex);
+ md_wakeup_thread(mddev->thread);
+ wake_up(&mddev->sb_wait);
+
list_for_each_entry_safe(rdev, tmp, &delete, same_set) {
list_del_init(&rdev->same_set);
kobject_del(&rdev->kobj);
export_rdev(rdev, mddev);
}
-
- md_wakeup_thread(mddev->thread);
- wake_up(&mddev->sb_wait);
}
EXPORT_SYMBOL_GPL(mddev_unlock);
if (test_bit(AutoDetected, &rdev->flags))
md_autodetect_dev(rdev->bdev->bd_dev);
#endif
- blkdev_put(rdev->bdev, mddev->external ? &claim_rdev : rdev);
+ blkdev_put(rdev->bdev,
+ test_bit(Holder, &rdev->flags) ? rdev : &claim_rdev);
rdev->bdev = NULL;
kobject_put(&rdev->kobj);
}
static struct md_rdev *md_import_device(dev_t newdev, int super_format, int super_minor)
{
struct md_rdev *rdev;
+ struct md_rdev *holder;
sector_t size;
int err;
if (err)
goto out_clear_rdev;
+ if (super_format == -2) {
+ holder = &claim_rdev;
+ } else {
+ holder = rdev;
+ set_bit(Holder, &rdev->flags);
+ }
+
rdev->bdev = blkdev_get_by_dev(newdev, BLK_OPEN_READ | BLK_OPEN_WRITE,
- super_format == -2 ? &claim_rdev : rdev, NULL);
+ holder, NULL);
if (IS_ERR(rdev->bdev)) {
pr_warn("md: could not open device unknown-block(%u,%u).\n",
MAJOR(newdev), MINOR(newdev));
return rdev;
out_blkdev_put:
- blkdev_put(rdev->bdev, super_format == -2 ? &claim_rdev : rdev);
+ blkdev_put(rdev->bdev, holder);
out_clear_rdev:
md_rdev_clear(rdev);
out_free_rdev:
spin_unlock(&all_mddevs_lock);
if (to_put)
- mddev_put(mddev);
+ mddev_put(to_put);
return next_mddev;
}
* check if there is collision between raid1
* serial bios.
*/
+ Holder, /* rdev is used as holder while opening
+ * underlying disk exclusively.
+ */
};
static inline int is_badblock(struct md_rdev *rdev, sector_t s, int sectors,
struct r1conf *conf = mddev->private;
int err = 0;
int number = rdev->raid_disk;
+ struct raid1_info *p = conf->mirrors + number;
if (unlikely(number >= conf->raid_disks))
goto abort;
- struct raid1_info *p = conf->mirrors + number;
-
if (rdev != p->rdev)
p = conf->mirrors + conf->raid_disks + number;
* different type underlying the specified range of virtual addresses.
* When the function isn't able to map a single page, it returns error.
*
+ * Note that get_vaddr_frames() cannot follow VM_IO mappings. It used
+ * to be able to do that, but that could (racily) return non-refcounted
+ * pfns.
+ *
* This function takes care of grabbing mmap_lock as necessary.
*/
int get_vaddr_frames(unsigned long start, unsigned int nr_frames, bool write,
if (likely(ret > 0))
return ret;
- /* This used to (racily) return non-refcounted pfns. Let people know */
- WARN_ONCE(1, "get_vaddr_frames() cannot follow VM_IO mapping");
vec->nr_frames = 0;
return ret ? ret : -EFAULT;
}
struct v4l2_mbus_framefmt *format;
struct v4l2_rect *crop;
- /* Initialize try_fmt */
+ /* Initialize the format. */
format = v4l2_subdev_get_pad_format(sd, state, 0);
imx219_update_pad_format(imx219, &supported_modes[0], format,
MEDIA_BUS_FMT_SRGGB10_1X10);
- /* Initialize crop rectangle. */
+ /* Initialize the crop rectangle. */
crop = v4l2_subdev_get_pad_crop(sd, state, 0);
crop->top = IMX219_PIXEL_ARRAY_TOP;
crop->left = IMX219_PIXEL_ARRAY_LEFT;
const struct imx219_mode *mode;
int exposure_max, exposure_def, hblank;
struct v4l2_mbus_framefmt *format;
+ struct v4l2_rect *crop;
mode = v4l2_find_nearest_size(supported_modes,
ARRAY_SIZE(supported_modes),
fmt->format.width, fmt->format.height);
imx219_update_pad_format(imx219, mode, &fmt->format, fmt->format.code);
+
format = v4l2_subdev_get_pad_format(sd, sd_state, 0);
+ crop = v4l2_subdev_get_pad_crop(sd, sd_state, 0);
- if (imx219->mode == mode && format->code == fmt->format.code)
- return 0;
+ *format = fmt->format;
+ *crop = mode->crop;
if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
imx219->mode = mode;
hblank);
}
- *format = fmt->format;
-
return 0;
}
static void max9286_v4l2_unregister(struct max9286_priv *priv)
{
- fwnode_handle_put(priv->sd.fwnode);
v4l2_ctrl_handler_free(&priv->ctrls);
v4l2_async_unregister_subdev(&priv->sd);
max9286_v4l2_notifier_unregister(priv);
v4l2_async_unregister_subdev(&dev->sd);
v4l2_ctrl_handler_free(&dev->ctrls);
i2c_unregister_device(dev->isp);
- fwnode_handle_put(dev->sd.fwnode);
}
static const struct of_device_id rdacm21_of_ids[] = {
sg = sglist;
for (line = 0; line < store_lines; line++) {
if ((line >= (store_lines - VCR_HACK_LINES)) &&
- (btv->opt_vcr_hack ||
- (V4L2_FIELD_HAS_BOTH(btv->field) ||
- btv->field == V4L2_FIELD_ALTERNATE)))
+ btv->opt_vcr_hack)
continue;
while (offset && offset >= sg_dma_len(sg)) {
offset -= sg_dma_len(sg);
config INTEL_VSC
tristate "Intel Visual Sensing Controller"
- depends on INTEL_MEI && ACPI
+ depends on INTEL_MEI && ACPI && VIDEO_DEV
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
+ select V4L2_ASYNC
help
This adds support for Intel Visual Sensing Controller (IVSC).
depends on V4L_PLATFORM_DRIVERS
depends on PCI && I2C && VIDEO_DEV
depends on COMMON_CLK
- select VIDEO_OV7670
+ select VIDEO_OV7670 if MEDIA_SUBDRV_AUTOSELECT && VIDEO_CAMERA_SENSOR
select VIDEOBUF2_VMALLOC
select VIDEOBUF2_DMA_CONTIG
select VIDEOBUF2_DMA_SG
depends on I2C && VIDEO_DEV
depends on ARCH_MMP || COMPILE_TEST
depends on COMMON_CLK
- select VIDEO_OV7670
+ select VIDEO_OV7670 if MEDIA_SUBDRV_AUTOSELECT && VIDEO_CAMERA_SENSOR
select I2C_GPIO
select VIDEOBUF2_VMALLOC
select VIDEOBUF2_DMA_CONTIG
v4l2_async_unregister_subdev(&csis->sd);
err_disable_clock:
mipi_csis_clk_disable(csis);
- fwnode_handle_put(csis->sd.fwnode);
return ret;
}
mipi_csis_clk_disable(csis);
v4l2_subdev_cleanup(&csis->sd);
media_entity_cleanup(&csis->sd.entity);
- fwnode_handle_put(csis->sd.fwnode);
pm_runtime_set_suspended(&pdev->dev);
}
depends on V4L_PLATFORM_DRIVERS
depends on FB_VIA && VIDEO_DEV
select VIDEOBUF2_DMA_SG
- select VIDEO_OV7670
+ select VIDEO_OV7670 if VIDEO_CAMERA_SENSOR
help
Driver support for the integrated camera controller in VIA
Chrome9 chipsets. Currently only tested on OLPC xo-1.5 systems
select VIDEO_SAA711X if MEDIA_SUBDRV_AUTOSELECT
select VIDEO_TVP5150 if MEDIA_SUBDRV_AUTOSELECT
select VIDEO_MSP3400 if MEDIA_SUBDRV_AUTOSELECT
- select VIDEO_MT9V011 if MEDIA_SUBDRV_AUTOSELECT && MEDIA_CAMERA_SUPPORT
- select VIDEO_OV2640 if MEDIA_SUBDRV_AUTOSELECT && MEDIA_CAMERA_SUPPORT
+ select VIDEO_MT9V011 if MEDIA_SUBDRV_AUTOSELECT && VIDEO_CAMERA_SENSOR
+ select VIDEO_OV2640 if MEDIA_SUBDRV_AUTOSELECT && VIDEO_CAMERA_SENSOR
help
This is a video4linux driver for Empia 28xx based TV cards.
select VIDEO_TW2804 if MEDIA_SUBDRV_AUTOSELECT
select VIDEO_TW9903 if MEDIA_SUBDRV_AUTOSELECT
select VIDEO_TW9906 if MEDIA_SUBDRV_AUTOSELECT
- select VIDEO_OV7640 if MEDIA_SUBDRV_AUTOSELECT && MEDIA_CAMERA_SUPPORT
select VIDEO_UDA1342 if MEDIA_SUBDRV_AUTOSELECT
+ select VIDEO_OV7640 if MEDIA_SUBDRV_AUTOSELECT && VIDEO_CAMERA_SENSOR
help
This is a video4linux driver for the WIS GO7007 MPEG
encoder chip.
query_menu->id = id;
query_menu->index = index;
+ if (index >= BITS_PER_TYPE(mapping->menu_mask))
+ return -EINVAL;
+
ret = mutex_lock_interruptible(&chain->ctrl_mutex);
if (ret < 0)
return -ERESTARTSYS;
}
EXPORT_NS_GPL_DEV_PM_OPS(cs42l43_pm_ops, MFD_CS42L43) = {
- SET_SYSTEM_SLEEP_PM_OPS(cs42l43_suspend, cs42l43_resume)
- SET_RUNTIME_PM_OPS(cs42l43_runtime_suspend, cs42l43_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(cs42l43_suspend, cs42l43_resume)
+ RUNTIME_PM_OPS(cs42l43_runtime_suspend, cs42l43_runtime_resume, NULL)
};
MODULE_DESCRIPTION("CS42L43 Core Driver");
static void rts5227_init_from_cfg(struct rtsx_pcr *pcr)
{
- struct pci_dev *pdev = pcr->pci;
- int l1ss;
- u32 lval;
struct rtsx_cr_option *option = &pcr->option;
- l1ss = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS);
- if (!l1ss)
- return;
-
- pci_read_config_dword(pdev, l1ss + PCI_L1SS_CTL1, &lval);
-
if (CHK_PCI_PID(pcr, 0x522A)) {
- if (0 == (lval & 0x0F))
- rtsx_pci_enable_oobs_polling(pcr);
- else
+ if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN
+ | PM_L1_1_EN | PM_L1_2_EN))
rtsx_pci_disable_oobs_polling(pcr);
+ else
+ rtsx_pci_enable_oobs_polling(pcr);
}
- if (lval & PCI_L1SS_CTL1_ASPM_L1_1)
- rtsx_set_dev_flag(pcr, ASPM_L1_1_EN);
- else
- rtsx_clear_dev_flag(pcr, ASPM_L1_1_EN);
-
- if (lval & PCI_L1SS_CTL1_ASPM_L1_2)
- rtsx_set_dev_flag(pcr, ASPM_L1_2_EN);
- else
- rtsx_clear_dev_flag(pcr, ASPM_L1_2_EN);
-
- if (lval & PCI_L1SS_CTL1_PCIPM_L1_1)
- rtsx_set_dev_flag(pcr, PM_L1_1_EN);
- else
- rtsx_clear_dev_flag(pcr, PM_L1_1_EN);
-
- if (lval & PCI_L1SS_CTL1_PCIPM_L1_2)
- rtsx_set_dev_flag(pcr, PM_L1_2_EN);
- else
- rtsx_clear_dev_flag(pcr, PM_L1_2_EN);
-
if (option->ltr_en) {
- u16 val;
-
- pcie_capability_read_word(pcr->pci, PCI_EXP_DEVCTL2, &val);
- if (val & PCI_EXP_DEVCTL2_LTR_EN) {
- option->ltr_enabled = true;
- option->ltr_active = true;
+ if (option->ltr_enabled)
rtsx_set_ltr_latency(pcr, option->ltr_active_latency);
- } else {
- option->ltr_enabled = false;
- }
}
-
- if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN
- | PM_L1_1_EN | PM_L1_2_EN))
- option->force_clkreq_0 = false;
- else
- option->force_clkreq_0 = true;
-
}
static int rts5227_extra_init_hw(struct rtsx_pcr *pcr)
}
}
- if (option->force_clkreq_0 && pcr->aspm_mode == ASPM_MODE_CFG)
+ if (option->force_clkreq_0)
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG,
FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW);
else
static void rts5228_init_from_cfg(struct rtsx_pcr *pcr)
{
- struct pci_dev *pdev = pcr->pci;
- int l1ss;
- u32 lval;
struct rtsx_cr_option *option = &pcr->option;
- l1ss = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS);
- if (!l1ss)
- return;
-
- pci_read_config_dword(pdev, l1ss + PCI_L1SS_CTL1, &lval);
-
- if (0 == (lval & 0x0F))
- rtsx_pci_enable_oobs_polling(pcr);
- else
+ if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN
+ | PM_L1_1_EN | PM_L1_2_EN))
rtsx_pci_disable_oobs_polling(pcr);
-
- if (lval & PCI_L1SS_CTL1_ASPM_L1_1)
- rtsx_set_dev_flag(pcr, ASPM_L1_1_EN);
- else
- rtsx_clear_dev_flag(pcr, ASPM_L1_1_EN);
-
- if (lval & PCI_L1SS_CTL1_ASPM_L1_2)
- rtsx_set_dev_flag(pcr, ASPM_L1_2_EN);
- else
- rtsx_clear_dev_flag(pcr, ASPM_L1_2_EN);
-
- if (lval & PCI_L1SS_CTL1_PCIPM_L1_1)
- rtsx_set_dev_flag(pcr, PM_L1_1_EN);
else
- rtsx_clear_dev_flag(pcr, PM_L1_1_EN);
-
- if (lval & PCI_L1SS_CTL1_PCIPM_L1_2)
- rtsx_set_dev_flag(pcr, PM_L1_2_EN);
- else
- rtsx_clear_dev_flag(pcr, PM_L1_2_EN);
+ rtsx_pci_enable_oobs_polling(pcr);
rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, 0xFF, 0);
- if (option->ltr_en) {
- u16 val;
- pcie_capability_read_word(pcr->pci, PCI_EXP_DEVCTL2, &val);
- if (val & PCI_EXP_DEVCTL2_LTR_EN) {
- option->ltr_enabled = true;
- option->ltr_active = true;
+ if (option->ltr_en) {
+ if (option->ltr_enabled)
rtsx_set_ltr_latency(pcr, option->ltr_active_latency);
- } else {
- option->ltr_enabled = false;
- }
}
}
static int rts5228_extra_init_hw(struct rtsx_pcr *pcr)
{
+ struct rtsx_cr_option *option = &pcr->option;
rtsx_pci_write_register(pcr, RTS5228_AUTOLOAD_CFG1,
CD_RESUME_EN_MASK, CD_RESUME_EN_MASK);
else
rtsx_pci_write_register(pcr, PETXCFG, 0x30, 0x00);
+ /*
+ * If u_force_clkreq_0 is enabled, CLKREQ# PIN will be forced
+ * to drive low, and we forcibly request clock.
+ */
+ if (option->force_clkreq_0)
+ rtsx_pci_write_register(pcr, PETXCFG,
+ FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW);
+ else
+ rtsx_pci_write_register(pcr, PETXCFG,
+ FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_HIGH);
+
rtsx_pci_write_register(pcr, PWD_SUSPEND_EN, 0xFF, 0xFB);
if (pcr->rtd3_en) {
static void rts5249_init_from_cfg(struct rtsx_pcr *pcr)
{
- struct pci_dev *pdev = pcr->pci;
- int l1ss;
struct rtsx_cr_option *option = &(pcr->option);
- u32 lval;
-
- l1ss = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS);
- if (!l1ss)
- return;
-
- pci_read_config_dword(pdev, l1ss + PCI_L1SS_CTL1, &lval);
if (CHK_PCI_PID(pcr, PID_524A) || CHK_PCI_PID(pcr, PID_525A)) {
- if (0 == (lval & 0x0F))
- rtsx_pci_enable_oobs_polling(pcr);
- else
+ if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN
+ | PM_L1_1_EN | PM_L1_2_EN))
rtsx_pci_disable_oobs_polling(pcr);
+ else
+ rtsx_pci_enable_oobs_polling(pcr);
}
-
- if (lval & PCI_L1SS_CTL1_ASPM_L1_1)
- rtsx_set_dev_flag(pcr, ASPM_L1_1_EN);
-
- if (lval & PCI_L1SS_CTL1_ASPM_L1_2)
- rtsx_set_dev_flag(pcr, ASPM_L1_2_EN);
-
- if (lval & PCI_L1SS_CTL1_PCIPM_L1_1)
- rtsx_set_dev_flag(pcr, PM_L1_1_EN);
-
- if (lval & PCI_L1SS_CTL1_PCIPM_L1_2)
- rtsx_set_dev_flag(pcr, PM_L1_2_EN);
-
if (option->ltr_en) {
- u16 val;
-
- pcie_capability_read_word(pdev, PCI_EXP_DEVCTL2, &val);
- if (val & PCI_EXP_DEVCTL2_LTR_EN) {
- option->ltr_enabled = true;
- option->ltr_active = true;
+ if (option->ltr_enabled)
rtsx_set_ltr_latency(pcr, option->ltr_active_latency);
- } else {
- option->ltr_enabled = false;
- }
}
}
-static int rts5249_init_from_hw(struct rtsx_pcr *pcr)
-{
- struct rtsx_cr_option *option = &(pcr->option);
-
- if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN
- | PM_L1_1_EN | PM_L1_2_EN))
- option->force_clkreq_0 = false;
- else
- option->force_clkreq_0 = true;
-
- return 0;
-}
-
static void rts52xa_force_power_down(struct rtsx_pcr *pcr, u8 pm_state, bool runtime)
{
/* Set relink_time to 0 */
struct rtsx_cr_option *option = &(pcr->option);
rts5249_init_from_cfg(pcr);
- rts5249_init_from_hw(pcr);
rtsx_pci_init_cmd(pcr);
}
}
+
/*
* If u_force_clkreq_0 is enabled, CLKREQ# PIN will be forced
* to drive low, and we forcibly request clock.
*/
- if (option->force_clkreq_0 && pcr->aspm_mode == ASPM_MODE_CFG)
+ if (option->force_clkreq_0)
rtsx_pci_write_register(pcr, PETXCFG,
FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW);
else
static void rts5260_init_from_cfg(struct rtsx_pcr *pcr)
{
- struct pci_dev *pdev = pcr->pci;
- int l1ss;
struct rtsx_cr_option *option = &pcr->option;
- u32 lval;
-
- l1ss = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS);
- if (!l1ss)
- return;
-
- pci_read_config_dword(pdev, l1ss + PCI_L1SS_CTL1, &lval);
-
- if (lval & PCI_L1SS_CTL1_ASPM_L1_1)
- rtsx_set_dev_flag(pcr, ASPM_L1_1_EN);
-
- if (lval & PCI_L1SS_CTL1_ASPM_L1_2)
- rtsx_set_dev_flag(pcr, ASPM_L1_2_EN);
-
- if (lval & PCI_L1SS_CTL1_PCIPM_L1_1)
- rtsx_set_dev_flag(pcr, PM_L1_1_EN);
-
- if (lval & PCI_L1SS_CTL1_PCIPM_L1_2)
- rtsx_set_dev_flag(pcr, PM_L1_2_EN);
rts5260_pwr_saving_setting(pcr);
if (option->ltr_en) {
- u16 val;
-
- pcie_capability_read_word(pdev, PCI_EXP_DEVCTL2, &val);
- if (val & PCI_EXP_DEVCTL2_LTR_EN) {
- option->ltr_enabled = true;
- option->ltr_active = true;
+ if (option->ltr_enabled)
rtsx_set_ltr_latency(pcr, option->ltr_active_latency);
- } else {
- option->ltr_enabled = false;
- }
}
}
static int rts5260_extra_init_hw(struct rtsx_pcr *pcr)
{
+ struct rtsx_cr_option *option = &pcr->option;
/* Set mcu_cnt to 7 to ensure data can be sampled properly */
rtsx_pci_write_register(pcr, 0xFC03, 0x7F, 0x07);
rts5260_init_hw(pcr);
+ /*
+ * If u_force_clkreq_0 is enabled, CLKREQ# PIN will be forced
+ * to drive low, and we forcibly request clock.
+ */
+ if (option->force_clkreq_0)
+ rtsx_pci_write_register(pcr, PETXCFG,
+ FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW);
+ else
+ rtsx_pci_write_register(pcr, PETXCFG,
+ FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_HIGH);
+
rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3, 0x10, 0x00);
return 0;
static void rts5261_init_from_cfg(struct rtsx_pcr *pcr)
{
- struct pci_dev *pdev = pcr->pci;
- int l1ss;
- u32 lval;
struct rtsx_cr_option *option = &pcr->option;
- l1ss = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS);
- if (!l1ss)
- return;
-
- pci_read_config_dword(pdev, l1ss + PCI_L1SS_CTL1, &lval);
-
- if (lval & PCI_L1SS_CTL1_ASPM_L1_1)
- rtsx_set_dev_flag(pcr, ASPM_L1_1_EN);
- else
- rtsx_clear_dev_flag(pcr, ASPM_L1_1_EN);
-
- if (lval & PCI_L1SS_CTL1_ASPM_L1_2)
- rtsx_set_dev_flag(pcr, ASPM_L1_2_EN);
- else
- rtsx_clear_dev_flag(pcr, ASPM_L1_2_EN);
-
- if (lval & PCI_L1SS_CTL1_PCIPM_L1_1)
- rtsx_set_dev_flag(pcr, PM_L1_1_EN);
- else
- rtsx_clear_dev_flag(pcr, PM_L1_1_EN);
-
- if (lval & PCI_L1SS_CTL1_PCIPM_L1_2)
- rtsx_set_dev_flag(pcr, PM_L1_2_EN);
- else
- rtsx_clear_dev_flag(pcr, PM_L1_2_EN);
-
- rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, 0xFF, 0);
if (option->ltr_en) {
- u16 val;
-
- pcie_capability_read_word(pdev, PCI_EXP_DEVCTL2, &val);
- if (val & PCI_EXP_DEVCTL2_LTR_EN) {
- option->ltr_enabled = true;
- option->ltr_active = true;
+ if (option->ltr_enabled)
rtsx_set_ltr_latency(pcr, option->ltr_active_latency);
- } else {
- option->ltr_enabled = false;
- }
}
}
static int rts5261_extra_init_hw(struct rtsx_pcr *pcr)
{
+ struct rtsx_cr_option *option = &pcr->option;
u32 val;
rtsx_pci_write_register(pcr, RTS5261_AUTOLOAD_CFG1,
else
rtsx_pci_write_register(pcr, PETXCFG, 0x30, 0x00);
+ /*
+ * If u_force_clkreq_0 is enabled, CLKREQ# PIN will be forced
+ * to drive low, and we forcibly request clock.
+ */
+ if (option->force_clkreq_0)
+ rtsx_pci_write_register(pcr, PETXCFG,
+ FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW);
+ else
+ rtsx_pci_write_register(pcr, PETXCFG,
+ FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_HIGH);
+
rtsx_pci_write_register(pcr, PWD_SUSPEND_EN, 0xFF, 0xFB);
if (pcr->rtd3_en) {
return err;
}
- if (pcr->aspm_mode == ASPM_MODE_REG) {
+ if (pcr->aspm_mode == ASPM_MODE_REG)
rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, 0x30, 0x30);
- rtsx_pci_write_register(pcr, PETXCFG,
- FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_HIGH);
- }
/* No CD interrupt if probing driver with card inserted.
* So we need to initialize pcr->card_exist here.
static int rtsx_pci_init_chip(struct rtsx_pcr *pcr)
{
- int err;
+ struct rtsx_cr_option *option = &(pcr->option);
+ int err, l1ss;
+ u32 lval;
u16 cfg_val;
u8 val;
pcr->aspm_enabled = true;
}
+ l1ss = pci_find_ext_capability(pcr->pci, PCI_EXT_CAP_ID_L1SS);
+ if (l1ss) {
+ pci_read_config_dword(pcr->pci, l1ss + PCI_L1SS_CTL1, &lval);
+
+ if (lval & PCI_L1SS_CTL1_ASPM_L1_1)
+ rtsx_set_dev_flag(pcr, ASPM_L1_1_EN);
+ else
+ rtsx_clear_dev_flag(pcr, ASPM_L1_1_EN);
+
+ if (lval & PCI_L1SS_CTL1_ASPM_L1_2)
+ rtsx_set_dev_flag(pcr, ASPM_L1_2_EN);
+ else
+ rtsx_clear_dev_flag(pcr, ASPM_L1_2_EN);
+
+ if (lval & PCI_L1SS_CTL1_PCIPM_L1_1)
+ rtsx_set_dev_flag(pcr, PM_L1_1_EN);
+ else
+ rtsx_clear_dev_flag(pcr, PM_L1_1_EN);
+
+ if (lval & PCI_L1SS_CTL1_PCIPM_L1_2)
+ rtsx_set_dev_flag(pcr, PM_L1_2_EN);
+ else
+ rtsx_clear_dev_flag(pcr, PM_L1_2_EN);
+
+ pcie_capability_read_word(pcr->pci, PCI_EXP_DEVCTL2, &cfg_val);
+ if (cfg_val & PCI_EXP_DEVCTL2_LTR_EN) {
+ option->ltr_enabled = true;
+ option->ltr_active = true;
+ } else {
+ option->ltr_enabled = false;
+ }
+
+ if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN
+ | PM_L1_1_EN | PM_L1_2_EN))
+ option->force_clkreq_0 = false;
+ else
+ option->force_clkreq_0 = true;
+ } else {
+ option->ltr_enabled = false;
+ option->force_clkreq_0 = true;
+ }
+
if (pcr->ops->fetch_vendor_settings)
pcr->ops->fetch_vendor_settings(pcr);
* the switch doesn't confuse them with one another.
*/
struct mutex mgmt_lock;
+ /* Serializes accesses to the FDB */
+ struct mutex fdb_lock;
/* PTP two-step TX timestamp ID, and its serialization lock */
spinlock_t ts_id_lock;
u8 ts_id;
static int
sja1105_dynamic_config_poll_valid(struct sja1105_private *priv,
- struct sja1105_dyn_cmd *cmd,
- const struct sja1105_dynamic_table_ops *ops)
+ const struct sja1105_dynamic_table_ops *ops,
+ void *entry, bool check_valident,
+ bool check_errors)
{
u8 packed_buf[SJA1105_MAX_DYN_CMD_SIZE] = {};
+ struct sja1105_dyn_cmd cmd = {};
int rc;
- /* We don't _need_ to read the full entry, just the command area which
- * is a fixed SJA1105_SIZE_DYN_CMD. But our cmd_packing() API expects a
- * buffer that contains the full entry too. Additionally, our API
- * doesn't really know how many bytes into the buffer does the command
- * area really begin. So just read back the whole entry.
- */
+ /* Read back the whole entry + command structure. */
rc = sja1105_xfer_buf(priv, SPI_READ, ops->addr, packed_buf,
ops->packed_size);
if (rc)
/* Unpack the command structure, and return it to the caller in case it
* needs to perform further checks on it (VALIDENT).
*/
- memset(cmd, 0, sizeof(*cmd));
- ops->cmd_packing(packed_buf, cmd, UNPACK);
+ ops->cmd_packing(packed_buf, &cmd, UNPACK);
/* Hardware hasn't cleared VALID => still working on it */
- return cmd->valid ? -EAGAIN : 0;
+ if (cmd.valid)
+ return -EAGAIN;
+
+ if (check_valident && !cmd.valident && !(ops->access & OP_VALID_ANYWAY))
+ return -ENOENT;
+
+ if (check_errors && cmd.errors)
+ return -EINVAL;
+
+ /* Don't dereference possibly NULL pointer - maybe caller
+ * only wanted to see whether the entry existed or not.
+ */
+ if (entry)
+ ops->entry_packing(packed_buf, entry, UNPACK);
+
+ return 0;
}
/* Poll the dynamic config entry's control area until the hardware has
*/
static int
sja1105_dynamic_config_wait_complete(struct sja1105_private *priv,
- struct sja1105_dyn_cmd *cmd,
- const struct sja1105_dynamic_table_ops *ops)
+ const struct sja1105_dynamic_table_ops *ops,
+ void *entry, bool check_valident,
+ bool check_errors)
{
- int rc;
-
- return read_poll_timeout(sja1105_dynamic_config_poll_valid,
- rc, rc != -EAGAIN,
- SJA1105_DYNAMIC_CONFIG_SLEEP_US,
- SJA1105_DYNAMIC_CONFIG_TIMEOUT_US,
- false, priv, cmd, ops);
+ int err, rc;
+
+ err = read_poll_timeout(sja1105_dynamic_config_poll_valid,
+ rc, rc != -EAGAIN,
+ SJA1105_DYNAMIC_CONFIG_SLEEP_US,
+ SJA1105_DYNAMIC_CONFIG_TIMEOUT_US,
+ false, priv, ops, entry, check_valident,
+ check_errors);
+ return err < 0 ? err : rc;
}
/* Provides read access to the settings through the dynamic interface
mutex_lock(&priv->dynamic_config_lock);
rc = sja1105_xfer_buf(priv, SPI_WRITE, ops->addr, packed_buf,
ops->packed_size);
- if (rc < 0) {
- mutex_unlock(&priv->dynamic_config_lock);
- return rc;
- }
-
- rc = sja1105_dynamic_config_wait_complete(priv, &cmd, ops);
- mutex_unlock(&priv->dynamic_config_lock);
if (rc < 0)
- return rc;
+ goto out;
- if (!cmd.valident && !(ops->access & OP_VALID_ANYWAY))
- return -ENOENT;
+ rc = sja1105_dynamic_config_wait_complete(priv, ops, entry, true, false);
+out:
+ mutex_unlock(&priv->dynamic_config_lock);
- /* Don't dereference possibly NULL pointer - maybe caller
- * only wanted to see whether the entry existed or not.
- */
- if (entry)
- ops->entry_packing(packed_buf, entry, UNPACK);
- return 0;
+ return rc;
}
int sja1105_dynamic_config_write(struct sja1105_private *priv,
mutex_lock(&priv->dynamic_config_lock);
rc = sja1105_xfer_buf(priv, SPI_WRITE, ops->addr, packed_buf,
ops->packed_size);
- if (rc < 0) {
- mutex_unlock(&priv->dynamic_config_lock);
- return rc;
- }
-
- rc = sja1105_dynamic_config_wait_complete(priv, &cmd, ops);
- mutex_unlock(&priv->dynamic_config_lock);
if (rc < 0)
- return rc;
+ goto out;
- cmd = (struct sja1105_dyn_cmd) {0};
- ops->cmd_packing(packed_buf, &cmd, UNPACK);
- if (cmd.errors)
- return -EINVAL;
+ rc = sja1105_dynamic_config_wait_complete(priv, ops, NULL, false, true);
+out:
+ mutex_unlock(&priv->dynamic_config_lock);
- return 0;
+ return rc;
}
static u8 sja1105_crc8_add(u8 crc, u8 byte, u8 poly)
struct dsa_db db)
{
struct sja1105_private *priv = ds->priv;
+ int rc;
if (!vid) {
switch (db.type) {
}
}
- return priv->info->fdb_add_cmd(ds, port, addr, vid);
+ mutex_lock(&priv->fdb_lock);
+ rc = priv->info->fdb_add_cmd(ds, port, addr, vid);
+ mutex_unlock(&priv->fdb_lock);
+
+ return rc;
}
-static int sja1105_fdb_del(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid,
- struct dsa_db db)
+static int __sja1105_fdb_del(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
{
struct sja1105_private *priv = ds->priv;
return priv->info->fdb_del_cmd(ds, port, addr, vid);
}
+static int sja1105_fdb_del(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
+{
+ struct sja1105_private *priv = ds->priv;
+ int rc;
+
+ mutex_lock(&priv->fdb_lock);
+ rc = __sja1105_fdb_del(ds, port, addr, vid, db);
+ mutex_unlock(&priv->fdb_lock);
+
+ return rc;
+}
+
static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
dsa_fdb_dump_cb_t *cb, void *data)
{
if (!(l2_lookup.destports & BIT(port)))
continue;
- /* We need to hide the FDB entry for unknown multicast */
- if (l2_lookup.macaddr == SJA1105_UNKNOWN_MULTICAST &&
- l2_lookup.mask_macaddr == SJA1105_UNKNOWN_MULTICAST)
- continue;
-
u64_to_ether_addr(l2_lookup.macaddr, macaddr);
+ /* Hardware FDB is shared for fdb and mdb, "bridge fdb show"
+ * only wants to see unicast
+ */
+ if (is_multicast_ether_addr(macaddr))
+ continue;
+
/* We need to hide the dsa_8021q VLANs from the user. */
if (vid_is_dsa_8021q(l2_lookup.vlanid))
l2_lookup.vlanid = 0;
};
int i;
+ mutex_lock(&priv->fdb_lock);
+
for (i = 0; i < SJA1105_MAX_L2_LOOKUP_COUNT; i++) {
struct sja1105_l2_lookup_entry l2_lookup = {0};
u8 macaddr[ETH_ALEN];
if (rc) {
dev_err(ds->dev, "Failed to read FDB: %pe\n",
ERR_PTR(rc));
- return;
+ break;
}
if (!(l2_lookup.destports & BIT(port)))
u64_to_ether_addr(l2_lookup.macaddr, macaddr);
- rc = sja1105_fdb_del(ds, port, macaddr, l2_lookup.vlanid, db);
+ rc = __sja1105_fdb_del(ds, port, macaddr, l2_lookup.vlanid, db);
if (rc) {
dev_err(ds->dev,
"Failed to delete FDB entry %pM vid %lld: %pe\n",
macaddr, l2_lookup.vlanid, ERR_PTR(rc));
- return;
+ break;
}
}
+
+ mutex_unlock(&priv->fdb_lock);
}
static int sja1105_mdb_add(struct dsa_switch *ds, int port,
int rc, i;
s64 now;
+ mutex_lock(&priv->fdb_lock);
mutex_lock(&priv->mgmt_lock);
mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
goto out;
out:
mutex_unlock(&priv->mgmt_lock);
+ mutex_unlock(&priv->fdb_lock);
return rc;
}
{
struct sja1105_l2_lookup_entry *l2_lookup;
struct sja1105_table *table;
- int match;
+ int match, rc;
+
+ mutex_lock(&priv->fdb_lock);
table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP];
l2_lookup = table->entries;
if (match == table->entry_count) {
NL_SET_ERR_MSG_MOD(extack,
"Could not find FDB entry for unknown multicast");
- return -ENOSPC;
+ rc = -ENOSPC;
+ goto out;
}
if (flags.val & BR_MCAST_FLOOD)
else
l2_lookup[match].destports &= ~BIT(to);
- return sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
- l2_lookup[match].index,
- &l2_lookup[match],
- true);
+ rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
+ l2_lookup[match].index,
+ &l2_lookup[match], true);
+out:
+ mutex_unlock(&priv->fdb_lock);
+
+ return rc;
}
static int sja1105_port_pre_bridge_flags(struct dsa_switch *ds, int port,
mutex_init(&priv->ptp_data.lock);
mutex_init(&priv->dynamic_config_lock);
mutex_init(&priv->mgmt_lock);
+ mutex_init(&priv->fdb_lock);
spin_lock_init(&priv->ts_id_lock);
rc = sja1105_parse_dt(priv);
return -ENOMEM;
other_port = priv->ports[!port_priv->nr];
- port_rules = adin1110_port_rules(port_priv, false, true);
+ port_rules = adin1110_port_rules(other_port, false, true);
eth_broadcast_addr(mask);
return adin1110_write_mac_address(other_port, mac_nr, (u8 *)fdb->addr,
return work_done;
error:
+ if (xdp_flags & ENA_XDP_REDIRECT)
+ xdp_do_flush();
+
adapter = netdev_priv(rx_ring->netdev);
if (rc == -ENOSPC) {
ASP_RX_FILTER_BLK_CTRL);
}
-void bcmasp_netfilt_get_all_active(struct bcmasp_intf *intf, u32 *rule_locs,
- u32 *rule_cnt)
+int bcmasp_netfilt_get_all_active(struct bcmasp_intf *intf, u32 *rule_locs,
+ u32 *rule_cnt)
{
struct bcmasp_priv *priv = intf->parent;
int j = 0, i;
for (i = 0; i < NUM_NET_FILTERS; i++) {
+ if (j == *rule_cnt)
+ return -EMSGSIZE;
+
if (!priv->net_filters[i].claimed ||
priv->net_filters[i].port != intf->port)
continue;
}
*rule_cnt = j;
+
+ return 0;
}
int bcmasp_netfilt_get_active(struct bcmasp_intf *intf)
if (!intf) {
dev_err(dev, "Cannot create eth interface %d\n", i);
bcmasp_remove_intfs(priv);
+ of_node_put(intf_node);
goto of_put_exit;
}
list_add_tail(&intf->list, &priv->intfs);
int bcmasp_netfilt_get_active(struct bcmasp_intf *intf);
-void bcmasp_netfilt_get_all_active(struct bcmasp_intf *intf, u32 *rule_locs,
- u32 *rule_cnt);
+int bcmasp_netfilt_get_all_active(struct bcmasp_intf *intf, u32 *rule_locs,
+ u32 *rule_cnt);
void bcmasp_netfilt_suspend(struct bcmasp_intf *intf);
err = bcmasp_flow_get(intf, cmd);
break;
case ETHTOOL_GRXCLSRLALL:
- bcmasp_netfilt_get_all_active(intf, rule_locs, &cmd->rule_cnt);
+ err = bcmasp_netfilt_get_all_active(intf, rule_locs, &cmd->rule_cnt);
cmd->data = NUM_NET_FILTERS;
break;
default:
struct rx_cmp_ext *rxcmp1;
u32 cp_cons, tmp_raw_cons;
u32 raw_cons = cpr->cp_raw_cons;
+ bool flush_xdp = false;
u32 rx_pkts = 0;
u8 event = 0;
rx_pkts++;
else if (rc == -EBUSY) /* partial completion */
break;
+ if (event & BNXT_REDIRECT_EVENT)
+ flush_xdp = true;
} else if (unlikely(TX_CMP_TYPE(txcmp) ==
CMPL_BASE_TYPE_HWRM_DONE)) {
bnxt_hwrm_handler(bp, txcmp);
if (event & BNXT_AGG_EVENT)
bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
+ if (flush_xdp)
+ xdp_do_flush();
if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
napi_complete_done(napi, rx_pkts);
if (rx_pause)
ctrl |= MACB_BIT(PAE);
- macb_set_tx_clk(bp, speed);
-
/* Initialize rings & buffers as clearing MACB_BIT(TE) in link down
* cleared the pipeline and control registers.
*/
spin_unlock_irqrestore(&bp->lock, flags);
+ if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC))
+ macb_set_tx_clk(bp, speed);
+
/* Enable Rx and Tx; Enable PTP unicast */
ctrl = macb_readl(bp, NCR);
if (gem_has_ptp(bp))
{
struct tsnep_adapter *adapter = netdev_priv(netdev);
- ch->max_rx = adapter->num_rx_queues;
- ch->max_tx = adapter->num_tx_queues;
- ch->rx_count = adapter->num_rx_queues;
- ch->tx_count = adapter->num_tx_queues;
+ ch->max_combined = adapter->num_queues;
+ ch->combined_count = adapter->num_queues;
}
static int tsnep_ethtool_get_ts_info(struct net_device *netdev,
/* handle TX/RX queue 0 interrupt */
if ((active & adapter->queue[0].irq_mask) != 0) {
- tsnep_disable_irq(adapter, adapter->queue[0].irq_mask);
- napi_schedule(&adapter->queue[0].napi);
+ if (napi_schedule_prep(&adapter->queue[0].napi)) {
+ tsnep_disable_irq(adapter, adapter->queue[0].irq_mask);
+ /* schedule after masking to avoid races */
+ __napi_schedule(&adapter->queue[0].napi);
+ }
}
return IRQ_HANDLED;
struct tsnep_queue *queue = arg;
/* handle TX/RX queue interrupt */
- tsnep_disable_irq(queue->adapter, queue->irq_mask);
- napi_schedule(&queue->napi);
+ if (napi_schedule_prep(&queue->napi)) {
+ tsnep_disable_irq(queue->adapter, queue->irq_mask);
+ /* schedule after masking to avoid races */
+ __napi_schedule(&queue->napi);
+ }
return IRQ_HANDLED;
}
if (queue->tx)
complete = tsnep_tx_poll(queue->tx, budget);
+ /* handle case where we are called by netpoll with a budget of 0 */
+ if (unlikely(budget <= 0))
+ return budget;
+
if (queue->rx) {
done = queue->rx->xsk_pool ?
tsnep_rx_poll_zc(queue->rx, napi, budget) :
NETIF_F_HW_TC);
netdev->hw_enc_features |= netdev->vlan_features | NETIF_F_TSO_MANGLEID;
+
+ /* The device_version V3 hardware can't offload the checksum for IP in
+ * GRE packets, but can do it for NvGRE. So default to disable the
+ * checksum and GSO offload for GRE.
+ */
+ if (ae_dev->dev_version > HNAE3_DEVICE_VERSION_V2) {
+ netdev->features &= ~NETIF_F_GSO_GRE;
+ netdev->features &= ~NETIF_F_GSO_GRE_CSUM;
+ }
}
static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
u32 regclr)
{
+#define HCLGE_IMP_RESET_DELAY 5
+
switch (event_type) {
case HCLGE_VECTOR0_EVENT_PTP:
case HCLGE_VECTOR0_EVENT_RST:
+ if (regclr == BIT(HCLGE_VECTOR0_IMPRESET_INT_B))
+ mdelay(HCLGE_IMP_RESET_DELAY);
+
hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
break;
case HCLGE_VECTOR0_EVENT_MBX:
ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, rule->location,
NULL, false);
if (ret) {
+ /* if tcam config fail, set rule state to TO_DEL,
+ * so the rule will be deleted when periodic
+ * task being scheduled.
+ */
+ hclge_update_fd_list(hdev, HCLGE_FD_TO_DEL, rule->location, NULL);
+ set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
spin_unlock_bh(&hdev->fd_rule_lock);
return ret;
}
if (mac_type == HCLGE_MAC_ADDR_UC) {
if (is_all_added)
vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE;
- else
+ else if (hclge_is_umv_space_full(vport, true))
vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE;
} else {
if (is_all_added)
unsigned long delta = round_jiffies_relative(HZ);
struct hnae3_handle *handle = &hdev->nic;
- if (test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state))
+ if (test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state) ||
+ test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state))
return;
if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
u16 out_size = sizeof(vlan_filter);
int err;
- if (!hwdev)
- return -EINVAL;
-
vlan_filter.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
vlan_filter.enable = en;
goto error_pvid;
i40e_vlan_stripping_enable(vsi);
- i40e_vc_reset_vf(vf, true);
- /* During reset the VF got a new VSI, so refresh a pointer. */
- vsi = pf->vsi[vf->lan_vsi_idx];
+
/* Locked once because multiple functions below iterate list */
spin_lock_bh(&vsi->mac_filter_hash_lock);
*/
vf->port_vlan_id = le16_to_cpu(vsi->info.pvid);
+ i40e_vc_reset_vf(vf, true);
+ /* During reset the VF got a new VSI, so refresh a pointer. */
+ vsi = pf->vsi[vf->lan_vsi_idx];
+
ret = i40e_config_vf_promiscuous_mode(vf, vsi->id, allmulti, alluni);
if (ret) {
dev_err(&pf->pdev->dev, "Unable to config vf promiscuous mode\n");
int iavf_process_config(struct iavf_adapter *adapter);
int iavf_parse_vf_resource_msg(struct iavf_adapter *adapter);
void iavf_schedule_reset(struct iavf_adapter *adapter, u64 flags);
-void iavf_schedule_request_stats(struct iavf_adapter *adapter);
+void iavf_schedule_aq_request(struct iavf_adapter *adapter, u64 flags);
void iavf_schedule_finish_config(struct iavf_adapter *adapter);
void iavf_reset(struct iavf_adapter *adapter);
void iavf_set_ethtool_ops(struct net_device *netdev);
unsigned int i;
/* Explicitly request stats refresh */
- iavf_schedule_request_stats(adapter);
+ iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_REQUEST_STATS);
iavf_add_ethtool_stats(&data, adapter, iavf_gstrings_stats);
}
/**
- * iavf_schedule_request_stats - Set the flags and schedule statistics request
+ * iavf_schedule_aq_request - Set the flags and schedule aq request
* @adapter: board private structure
- *
- * Sets IAVF_FLAG_AQ_REQUEST_STATS flag so iavf_watchdog_task() will explicitly
- * request and refresh ethtool stats
+ * @flags: requested aq flags
**/
-void iavf_schedule_request_stats(struct iavf_adapter *adapter)
+void iavf_schedule_aq_request(struct iavf_adapter *adapter, u64 flags)
{
- adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_STATS;
+ adapter->aq_required |= flags;
mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
}
list_add_tail(&f->list, &adapter->vlan_filter_list);
f->state = IAVF_VLAN_ADD;
adapter->num_vlan_filters++;
- adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER;
+ iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_ADD_VLAN_FILTER);
}
clearout:
f = iavf_find_vlan(adapter, vlan);
if (f) {
f->state = IAVF_VLAN_REMOVE;
- adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER;
+ iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_DEL_VLAN_FILTER);
}
spin_unlock_bh(&adapter->mac_vlan_list_lock);
iavf_clear_fdir_filters(adapter);
iavf_clear_adv_rss_conf(adapter);
- if (!(adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)) {
+ if (!(adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) &&
+ !(test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section))) {
/* cancel any current operation */
adapter->current_op = VIRTCHNL_OP_UNKNOWN;
/* Schedule operations to close down the HW. Don't wait
}
/* only call pci_enable_sriov() if no VFs are allocated already */
- if (!old_vfs)
+ if (!old_vfs) {
err = pci_enable_sriov(pdev, adapter->vfs_allocated_count);
+ if (err)
+ goto err_out;
+ }
goto out;
spin_unlock(&adapter->stats64_lock);
}
+static int igc_ethtool_get_previous_rx_coalesce(struct igc_adapter *adapter)
+{
+ return (adapter->rx_itr_setting <= 3) ?
+ adapter->rx_itr_setting : adapter->rx_itr_setting >> 2;
+}
+
+static int igc_ethtool_get_previous_tx_coalesce(struct igc_adapter *adapter)
+{
+ return (adapter->tx_itr_setting <= 3) ?
+ adapter->tx_itr_setting : adapter->tx_itr_setting >> 2;
+}
+
static int igc_ethtool_get_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec,
struct kernel_ethtool_coalesce *kernel_coal,
{
struct igc_adapter *adapter = netdev_priv(netdev);
- if (adapter->rx_itr_setting <= 3)
- ec->rx_coalesce_usecs = adapter->rx_itr_setting;
- else
- ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2;
-
- if (!(adapter->flags & IGC_FLAG_QUEUE_PAIRS)) {
- if (adapter->tx_itr_setting <= 3)
- ec->tx_coalesce_usecs = adapter->tx_itr_setting;
- else
- ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2;
- }
+ ec->rx_coalesce_usecs = igc_ethtool_get_previous_rx_coalesce(adapter);
+ ec->tx_coalesce_usecs = igc_ethtool_get_previous_tx_coalesce(adapter);
return 0;
}
ec->tx_coalesce_usecs == 2)
return -EINVAL;
- if ((adapter->flags & IGC_FLAG_QUEUE_PAIRS) && ec->tx_coalesce_usecs)
+ if ((adapter->flags & IGC_FLAG_QUEUE_PAIRS) &&
+ ec->tx_coalesce_usecs != igc_ethtool_get_previous_tx_coalesce(adapter)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Queue Pair mode enabled, both Rx and Tx coalescing controlled by rx-usecs");
return -EINVAL;
+ }
/* If ITR is disabled, disable DMAC */
if (ec->rx_coalesce_usecs == 0) {
struct igc_ring *ring;
int i, drops;
- if (unlikely(test_bit(__IGC_DOWN, &adapter->state)))
+ if (unlikely(!netif_carrier_ok(dev)))
return -ENETDOWN;
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
u32 tsync_tx_ctl = IXGBE_TSYNCTXCTL_ENABLED;
u32 tsync_rx_ctl = IXGBE_TSYNCRXCTL_ENABLED;
u32 tsync_rx_mtrl = PTP_EV_PORT << 16;
+ u32 aflags = adapter->flags;
bool is_l2 = false;
u32 regval;
case HWTSTAMP_FILTER_NONE:
tsync_rx_ctl = 0;
tsync_rx_mtrl = 0;
- adapter->flags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
- IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
+ aflags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
+ IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
break;
case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1;
tsync_rx_mtrl |= IXGBE_RXMTRL_V1_SYNC_MSG;
- adapter->flags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
- IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
+ aflags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
+ IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
break;
case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1;
tsync_rx_mtrl |= IXGBE_RXMTRL_V1_DELAY_REQ_MSG;
- adapter->flags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
- IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
+ aflags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
+ IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
break;
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_EVENT_V2;
is_l2 = true;
config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
- adapter->flags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
- IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
+ aflags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
+ IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
break;
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
case HWTSTAMP_FILTER_NTP_ALL:
if (hw->mac.type >= ixgbe_mac_X550) {
tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_ALL;
config->rx_filter = HWTSTAMP_FILTER_ALL;
- adapter->flags |= IXGBE_FLAG_RX_HWTSTAMP_ENABLED;
+ aflags |= IXGBE_FLAG_RX_HWTSTAMP_ENABLED;
break;
}
fallthrough;
* Delay_Req messages and hardware does not support
* timestamping all packets => return error
*/
- adapter->flags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
- IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
config->rx_filter = HWTSTAMP_FILTER_NONE;
return -ERANGE;
}
IXGBE_TSYNCRXCTL_TYPE_ALL |
IXGBE_TSYNCRXCTL_TSIP_UT_EN;
config->rx_filter = HWTSTAMP_FILTER_ALL;
- adapter->flags |= IXGBE_FLAG_RX_HWTSTAMP_ENABLED;
- adapter->flags &= ~IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER;
+ aflags |= IXGBE_FLAG_RX_HWTSTAMP_ENABLED;
+ aflags &= ~IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER;
is_l2 = true;
break;
default:
IXGBE_WRITE_FLUSH(hw);
+ /* configure adapter flags only when HW is actually configured */
+ adapter->flags = aflags;
+
/* clear TX/RX time stamp registers, just to be sure */
ixgbe_ptp_clear_tx_timestamp(adapter);
IXGBE_READ_REG(hw, IXGBE_RXSTMPH);
break;
case ETHTOOL_GRXCLSRLALL:
for (i = 0; i < MVPP2_N_RFS_ENTRIES_PER_FLOW; i++) {
+ if (loc == info->rule_cnt) {
+ ret = -EMSGSIZE;
+ break;
+ }
+
if (port->rfs_rules[i])
rules[loc++] = i;
}
dma_map_sg_err:
if (si > 0) {
dma_unmap_single(iq->dev, sglist[0].dma_ptr[0],
- sglist[0].len[0], DMA_TO_DEVICE);
- sglist[0].len[0] = 0;
+ sglist[0].len[3], DMA_TO_DEVICE);
+ sglist[0].len[3] = 0;
}
while (si > 1) {
dma_unmap_page(iq->dev, sglist[si >> 2].dma_ptr[si & 3],
- sglist[si >> 2].len[si & 3], DMA_TO_DEVICE);
- sglist[si >> 2].len[si & 3] = 0;
+ sglist[si >> 2].len[3 - (si & 3)], DMA_TO_DEVICE);
+ sglist[si >> 2].len[3 - (si & 3)] = 0;
si--;
}
tx_buffer->gather = 0;
compl_sg++;
dma_unmap_single(iq->dev, tx_buffer->sglist[0].dma_ptr[0],
- tx_buffer->sglist[0].len[0], DMA_TO_DEVICE);
+ tx_buffer->sglist[0].len[3], DMA_TO_DEVICE);
i = 1; /* entry 0 is main skb, unmapped above */
while (frags--) {
dma_unmap_page(iq->dev, tx_buffer->sglist[i >> 2].dma_ptr[i & 3],
- tx_buffer->sglist[i >> 2].len[i & 3], DMA_TO_DEVICE);
+ tx_buffer->sglist[i >> 2].len[3 - (i & 3)], DMA_TO_DEVICE);
i++;
}
dma_unmap_single(iq->dev,
tx_buffer->sglist[0].dma_ptr[0],
- tx_buffer->sglist[0].len[0],
+ tx_buffer->sglist[0].len[3],
DMA_TO_DEVICE);
i = 1; /* entry 0 is main skb, unmapped above */
while (frags--) {
dma_unmap_page(iq->dev, tx_buffer->sglist[i >> 2].dma_ptr[i & 3],
- tx_buffer->sglist[i >> 2].len[i & 3], DMA_TO_DEVICE);
+ tx_buffer->sglist[i >> 2].len[3 - (i & 3)], DMA_TO_DEVICE);
i++;
}
#define TX_BUFTYPE_NET_SG 2
#define NUM_TX_BUFTYPES 3
-/* Hardware format for Scatter/Gather list */
+/* Hardware format for Scatter/Gather list
+ *
+ * 63 48|47 32|31 16|15 0
+ * -----------------------------------------
+ * | Len 0 | Len 1 | Len 2 | Len 3 |
+ * -----------------------------------------
+ * | Ptr 0 |
+ * -----------------------------------------
+ * | Ptr 1 |
+ * -----------------------------------------
+ * | Ptr 2 |
+ * -----------------------------------------
+ * | Ptr 3 |
+ * -----------------------------------------
+ */
struct octep_tx_sglist_desc {
u16 len[4];
dma_addr_t dma_ptr[4];
}
#define NPA_MAX_BURST 16
-void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
+int cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
{
struct otx2_nic *pfvf = dev;
+ int cnt = cq->pool_ptrs;
u64 ptrs[NPA_MAX_BURST];
- int num_ptrs = 1;
dma_addr_t bufptr;
+ int num_ptrs = 1;
/* Refill pool with new buffers */
while (cq->pool_ptrs) {
num_ptrs = 1;
}
}
+ return cnt - cq->pool_ptrs;
}
void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx)
return weight;
}
-void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq);
+int cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq);
void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx);
int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura);
int cn10k_lmtst_init(struct otx2_nic *pfvf);
int otx2_alloc_buffer(struct otx2_nic *pfvf, struct otx2_cq_queue *cq,
dma_addr_t *dma)
{
- if (unlikely(__otx2_alloc_rbuf(pfvf, cq->rbpool, dma))) {
- struct refill_work *work;
- struct delayed_work *dwork;
-
- work = &pfvf->refill_wrk[cq->cq_idx];
- dwork = &work->pool_refill_work;
- /* Schedule a task if no other task is running */
- if (!cq->refill_task_sched) {
- cq->refill_task_sched = true;
- schedule_delayed_work(dwork,
- msecs_to_jiffies(100));
- }
+ if (unlikely(__otx2_alloc_rbuf(pfvf, cq->rbpool, dma)))
return -ENOMEM;
- }
return 0;
}
static void otx2_pool_refill_task(struct work_struct *work)
{
struct otx2_cq_queue *cq;
- struct otx2_pool *rbpool;
struct refill_work *wrk;
- int qidx, free_ptrs = 0;
struct otx2_nic *pfvf;
- dma_addr_t bufptr;
+ int qidx;
wrk = container_of(work, struct refill_work, pool_refill_work.work);
pfvf = wrk->pf;
qidx = wrk - pfvf->refill_wrk;
cq = &pfvf->qset.cq[qidx];
- rbpool = cq->rbpool;
- free_ptrs = cq->pool_ptrs;
- while (cq->pool_ptrs) {
- if (otx2_alloc_rbuf(pfvf, rbpool, &bufptr)) {
- /* Schedule a WQ if we fails to free atleast half of the
- * pointers else enable napi for this RQ.
- */
- if (!((free_ptrs - cq->pool_ptrs) > free_ptrs / 2)) {
- struct delayed_work *dwork;
-
- dwork = &wrk->pool_refill_work;
- schedule_delayed_work(dwork,
- msecs_to_jiffies(100));
- } else {
- cq->refill_task_sched = false;
- }
- return;
- }
- pfvf->hw_ops->aura_freeptr(pfvf, qidx, bufptr + OTX2_HEAD_ROOM);
- cq->pool_ptrs--;
- }
cq->refill_task_sched = false;
+
+ local_bh_disable();
+ napi_schedule(wrk->napi);
+ local_bh_enable();
}
int otx2_config_nix_queues(struct otx2_nic *pfvf)
struct refill_work {
struct delayed_work pool_refill_work;
struct otx2_nic *pf;
+ struct napi_struct *napi;
};
/* PTPv2 originTimestamp structure */
int (*sq_aq_init)(void *dev, u16 qidx, u16 sqb_aura);
void (*sqe_flush)(void *dev, struct otx2_snd_queue *sq,
int size, int qidx);
- void (*refill_pool_ptrs)(void *dev, struct otx2_cq_queue *cq);
+ int (*refill_pool_ptrs)(void *dev, struct otx2_cq_queue *cq);
void (*aura_freeptr)(void *dev, int aura, u64 buf);
};
netif_tx_disable(netdev);
+ for (wrk = 0; wrk < pf->qset.cq_cnt; wrk++)
+ cancel_delayed_work_sync(&pf->refill_wrk[wrk].pool_refill_work);
+ devm_kfree(pf->dev, pf->refill_wrk);
+
otx2_free_hw_resources(pf);
otx2_free_cints(pf, pf->hw.cint_cnt);
otx2_disable_napi(pf);
for (qidx = 0; qidx < netdev->num_tx_queues; qidx++)
netdev_tx_reset_queue(netdev_get_tx_queue(netdev, qidx));
- for (wrk = 0; wrk < pf->qset.cq_cnt; wrk++)
- cancel_delayed_work_sync(&pf->refill_wrk[wrk].pool_refill_work);
- devm_kfree(pf->dev, pf->refill_wrk);
kfree(qset->sq);
kfree(qset->cq);
static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
struct bpf_prog *prog,
struct nix_cqe_rx_s *cqe,
- struct otx2_cq_queue *cq);
+ struct otx2_cq_queue *cq,
+ bool *need_xdp_flush);
static int otx2_nix_cq_op_status(struct otx2_nic *pfvf,
struct otx2_cq_queue *cq)
static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf,
struct napi_struct *napi,
struct otx2_cq_queue *cq,
- struct nix_cqe_rx_s *cqe)
+ struct nix_cqe_rx_s *cqe, bool *need_xdp_flush)
{
struct nix_rx_parse_s *parse = &cqe->parse;
struct nix_rx_sg_s *sg = &cqe->sg;
}
if (pfvf->xdp_prog)
- if (otx2_xdp_rcv_pkt_handler(pfvf, pfvf->xdp_prog, cqe, cq))
+ if (otx2_xdp_rcv_pkt_handler(pfvf, pfvf->xdp_prog, cqe, cq, need_xdp_flush))
return;
skb = napi_get_frags(napi);
struct napi_struct *napi,
struct otx2_cq_queue *cq, int budget)
{
+ bool need_xdp_flush = false;
struct nix_cqe_rx_s *cqe;
int processed_cqe = 0;
cq->cq_head++;
cq->cq_head &= (cq->cqe_cnt - 1);
- otx2_rcv_pkt_handler(pfvf, napi, cq, cqe);
+ otx2_rcv_pkt_handler(pfvf, napi, cq, cqe, &need_xdp_flush);
cqe->hdr.cqe_type = NIX_XQE_TYPE_INVALID;
cqe->sg.seg_addr = 0x00;
processed_cqe++;
cq->pend_cqe--;
}
+ if (need_xdp_flush)
+ xdp_do_flush();
/* Free CQEs to HW */
otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR,
return processed_cqe;
}
-void otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
+int otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
{
struct otx2_nic *pfvf = dev;
+ int cnt = cq->pool_ptrs;
dma_addr_t bufptr;
while (cq->pool_ptrs) {
otx2_aura_freeptr(pfvf, cq->cq_idx, bufptr + OTX2_HEAD_ROOM);
cq->pool_ptrs--;
}
+
+ return cnt - cq->pool_ptrs;
}
static int otx2_tx_napi_handler(struct otx2_nic *pfvf,
struct otx2_cq_queue *cq;
struct otx2_qset *qset;
struct otx2_nic *pfvf;
+ int filled_cnt = -1;
cq_poll = container_of(napi, struct otx2_cq_poll, napi);
pfvf = (struct otx2_nic *)cq_poll->dev;
}
if (rx_cq && rx_cq->pool_ptrs)
- pfvf->hw_ops->refill_pool_ptrs(pfvf, rx_cq);
+ filled_cnt = pfvf->hw_ops->refill_pool_ptrs(pfvf, rx_cq);
/* Clear the IRQ */
otx2_write64(pfvf, NIX_LF_CINTX_INT(cq_poll->cint_idx), BIT_ULL(0));
otx2_config_irq_coalescing(pfvf, i);
}
- /* Re-enable interrupts */
- otx2_write64(pfvf, NIX_LF_CINTX_ENA_W1S(cq_poll->cint_idx),
- BIT_ULL(0));
+ if (unlikely(!filled_cnt)) {
+ struct refill_work *work;
+ struct delayed_work *dwork;
+
+ work = &pfvf->refill_wrk[cq->cq_idx];
+ dwork = &work->pool_refill_work;
+ /* Schedule a task if no other task is running */
+ if (!cq->refill_task_sched) {
+ work->napi = napi;
+ cq->refill_task_sched = true;
+ schedule_delayed_work(dwork,
+ msecs_to_jiffies(100));
+ }
+ } else {
+ /* Re-enable interrupts */
+ otx2_write64(pfvf,
+ NIX_LF_CINTX_ENA_W1S(cq_poll->cint_idx),
+ BIT_ULL(0));
+ }
}
return workdone;
}
static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
struct bpf_prog *prog,
struct nix_cqe_rx_s *cqe,
- struct otx2_cq_queue *cq)
+ struct otx2_cq_queue *cq,
+ bool *need_xdp_flush)
{
unsigned char *hard_start, *data;
int qidx = cq->cq_idx;
otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize,
DMA_FROM_DEVICE);
- if (!err)
+ if (!err) {
+ *need_xdp_flush = true;
return true;
+ }
put_page(page);
break;
default:
int size, int qidx);
void otx2_sqe_flush(void *dev, struct otx2_snd_queue *sq,
int size, int qidx);
-void otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq);
-void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq);
+int otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq);
+int cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq);
#endif /* OTX2_TXRX_H */
u8 *data, *new_data;
struct mtk_rx_dma_v2 *rxd, trxd;
int done = 0, bytes = 0;
+ dma_addr_t dma_addr = DMA_MAPPING_ERROR;
while (done < budget) {
unsigned int pktlen, *rxdcsum;
struct net_device *netdev;
- dma_addr_t dma_addr;
u32 hash, reason;
int mac = 0;
else
rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size);
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA))
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA) &&
+ likely(dma_addr != DMA_MAPPING_ERROR))
rxd->rxd2 |= RX_DMA_PREP_ADDR64(dma_addr);
ring->calc_idx = idx;
int i;
for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
+ if (cnt == cmd->rule_cnt)
+ return -EMSGSIZE;
+
if (mac->hwlro_ip[i]) {
rule_locs[cnt] = i;
cnt++;
dsa_port = mtk_flow_get_dsa_port(&dev);
if (dev == eth->netdev[0])
- pse_port = 1;
+ pse_port = PSE_GDM1_PORT;
else if (dev == eth->netdev[1])
- pse_port = 2;
+ pse_port = PSE_GDM2_PORT;
+ else if (dev == eth->netdev[2])
+ pse_port = PSE_GDM3_PORT;
else
return -EOPNOTSUPP;
list_for_each_entry(ckf, &ri->data.keyfields, ctrl.list) {
newckf = kmemdup(ckf, sizeof(*newckf), GFP_KERNEL);
if (!newckf)
- return ERR_PTR(-ENOMEM);
+ goto err;
list_add_tail(&newckf->ctrl.list, &duprule->data.keyfields);
}
list_for_each_entry(caf, &ri->data.actionfields, ctrl.list) {
newcaf = kmemdup(caf, sizeof(*newcaf), GFP_KERNEL);
if (!newcaf)
- return ERR_PTR(-ENOMEM);
+ goto err;
list_add_tail(&newcaf->ctrl.list, &duprule->data.actionfields);
}
return duprule;
+
+err:
+ list_for_each_entry_safe(ckf, newckf, &duprule->data.keyfields, ctrl.list) {
+ list_del(&ckf->ctrl.list);
+ kfree(ckf);
+ }
+
+ list_for_each_entry_safe(caf, newcaf, &duprule->data.actionfields, ctrl.list) {
+ list_del(&caf->ctrl.list);
+ kfree(caf);
+ }
+
+ kfree(duprule);
+ return ERR_PTR(-ENOMEM);
}
static void vcap_apply_width(u8 *dst, int width, int bytes)
}
/* Helper function to create a rule of a specific size */
-static struct vcap_rule *
-test_vcap_xn_rule_creator(struct kunit *test, int cid, enum vcap_user user,
- u16 priority,
- int id, int size, int expected_addr)
+static void test_vcap_xn_rule_creator(struct kunit *test, int cid,
+ enum vcap_user user, u16 priority,
+ int id, int size, int expected_addr)
{
struct vcap_rule *rule;
struct vcap_rule_internal *ri;
ret = vcap_add_rule(rule);
KUNIT_EXPECT_EQ(test, 0, ret);
KUNIT_EXPECT_EQ(test, expected_addr, ri->addr);
- return rule;
+ vcap_free_rule(rule);
}
/* Prepare testing rule deletion */
KUNIT_EXPECT_EQ(test, (u32)0x00000000, actwords[11]);
}
+static void vcap_free_ckf(struct vcap_rule *rule)
+{
+ struct vcap_client_keyfield *ckf, *next_ckf;
+
+ list_for_each_entry_safe(ckf, next_ckf, &rule->keyfields, ctrl.list) {
+ list_del(&ckf->ctrl.list);
+ kfree(ckf);
+ }
+}
+
static void vcap_api_rule_add_keyvalue_test(struct kunit *test)
{
struct vcap_admin admin = {
KUNIT_EXPECT_EQ(test, VCAP_FIELD_BIT, kf->ctrl.type);
KUNIT_EXPECT_EQ(test, 0x0, kf->data.u1.value);
KUNIT_EXPECT_EQ(test, 0x1, kf->data.u1.mask);
+ vcap_free_ckf(rule);
INIT_LIST_HEAD(&rule->keyfields);
ret = vcap_rule_add_key_bit(rule, VCAP_KF_LOOKUP_FIRST_IS, VCAP_BIT_1);
KUNIT_EXPECT_EQ(test, VCAP_FIELD_BIT, kf->ctrl.type);
KUNIT_EXPECT_EQ(test, 0x1, kf->data.u1.value);
KUNIT_EXPECT_EQ(test, 0x1, kf->data.u1.mask);
+ vcap_free_ckf(rule);
INIT_LIST_HEAD(&rule->keyfields);
ret = vcap_rule_add_key_bit(rule, VCAP_KF_LOOKUP_FIRST_IS,
KUNIT_EXPECT_EQ(test, VCAP_FIELD_BIT, kf->ctrl.type);
KUNIT_EXPECT_EQ(test, 0x0, kf->data.u1.value);
KUNIT_EXPECT_EQ(test, 0x0, kf->data.u1.mask);
+ vcap_free_ckf(rule);
INIT_LIST_HEAD(&rule->keyfields);
ret = vcap_rule_add_key_u32(rule, VCAP_KF_TYPE, 0x98765432, 0xff00ffab);
KUNIT_EXPECT_EQ(test, VCAP_FIELD_U32, kf->ctrl.type);
KUNIT_EXPECT_EQ(test, 0x98765432, kf->data.u32.value);
KUNIT_EXPECT_EQ(test, 0xff00ffab, kf->data.u32.mask);
+ vcap_free_ckf(rule);
INIT_LIST_HEAD(&rule->keyfields);
ret = vcap_rule_add_key_u128(rule, VCAP_KF_L3_IP6_SIP, &dip);
KUNIT_EXPECT_EQ(test, dip.value[idx], kf->data.u128.value[idx]);
for (idx = 0; idx < ARRAY_SIZE(dip.mask); ++idx)
KUNIT_EXPECT_EQ(test, dip.mask[idx], kf->data.u128.mask[idx]);
+ vcap_free_ckf(rule);
+}
+
+static void vcap_free_caf(struct vcap_rule *rule)
+{
+ struct vcap_client_actionfield *caf, *next_caf;
+
+ list_for_each_entry_safe(caf, next_caf,
+ &rule->actionfields, ctrl.list) {
+ list_del(&caf->ctrl.list);
+ kfree(caf);
+ }
}
static void vcap_api_rule_add_actionvalue_test(struct kunit *test)
KUNIT_EXPECT_EQ(test, VCAP_AF_POLICE_ENA, af->ctrl.action);
KUNIT_EXPECT_EQ(test, VCAP_FIELD_BIT, af->ctrl.type);
KUNIT_EXPECT_EQ(test, 0x0, af->data.u1.value);
+ vcap_free_caf(rule);
INIT_LIST_HEAD(&rule->actionfields);
ret = vcap_rule_add_action_bit(rule, VCAP_AF_POLICE_ENA, VCAP_BIT_1);
KUNIT_EXPECT_EQ(test, VCAP_AF_POLICE_ENA, af->ctrl.action);
KUNIT_EXPECT_EQ(test, VCAP_FIELD_BIT, af->ctrl.type);
KUNIT_EXPECT_EQ(test, 0x1, af->data.u1.value);
+ vcap_free_caf(rule);
INIT_LIST_HEAD(&rule->actionfields);
ret = vcap_rule_add_action_bit(rule, VCAP_AF_POLICE_ENA, VCAP_BIT_ANY);
KUNIT_EXPECT_EQ(test, VCAP_AF_POLICE_ENA, af->ctrl.action);
KUNIT_EXPECT_EQ(test, VCAP_FIELD_BIT, af->ctrl.type);
KUNIT_EXPECT_EQ(test, 0x0, af->data.u1.value);
+ vcap_free_caf(rule);
INIT_LIST_HEAD(&rule->actionfields);
ret = vcap_rule_add_action_u32(rule, VCAP_AF_TYPE, 0x98765432);
KUNIT_EXPECT_EQ(test, VCAP_AF_TYPE, af->ctrl.action);
KUNIT_EXPECT_EQ(test, VCAP_FIELD_U32, af->ctrl.type);
KUNIT_EXPECT_EQ(test, 0x98765432, af->data.u32.value);
+ vcap_free_caf(rule);
INIT_LIST_HEAD(&rule->actionfields);
ret = vcap_rule_add_action_u32(rule, VCAP_AF_MASK_MODE, 0xaabbccdd);
KUNIT_EXPECT_EQ(test, VCAP_AF_MASK_MODE, af->ctrl.action);
KUNIT_EXPECT_EQ(test, VCAP_FIELD_U32, af->ctrl.type);
KUNIT_EXPECT_EQ(test, 0xaabbccdd, af->data.u32.value);
+ vcap_free_caf(rule);
}
static void vcap_api_rule_find_keyset_basic_test(struct kunit *test)
ret = list_empty(&is2_admin.rules);
KUNIT_EXPECT_EQ(test, false, ret);
KUNIT_EXPECT_EQ(test, 0, ret);
+
+ vcap_enable_lookups(&test_vctrl, &test_netdev, 0, 0,
+ rule->cookie, false);
+
vcap_free_rule(rule);
/* Check that the rule has been freed: tricky to access since this
KUNIT_EXPECT_EQ(test, true, ret);
ret = list_empty(&rule->actionfields);
KUNIT_EXPECT_EQ(test, true, ret);
+
+ vcap_del_rule(&test_vctrl, &test_netdev, id);
}
static void vcap_api_set_rule_counter_test(struct kunit *test)
test_vcap_xn_rule_creator(test, 10000, VCAP_USER_QOS, 20, 400, 6, 774);
test_vcap_xn_rule_creator(test, 10000, VCAP_USER_QOS, 30, 300, 3, 771);
test_vcap_xn_rule_creator(test, 10000, VCAP_USER_QOS, 40, 200, 2, 768);
+
+ vcap_del_rule(&test_vctrl, &test_netdev, 200);
+ vcap_del_rule(&test_vctrl, &test_netdev, 300);
+ vcap_del_rule(&test_vctrl, &test_netdev, 400);
+ vcap_del_rule(&test_vctrl, &test_netdev, 500);
}
static void vcap_api_rule_insert_reverse_order_test(struct kunit *test)
++idx;
}
KUNIT_EXPECT_EQ(test, 768, admin.last_used_addr);
+
+ vcap_del_rule(&test_vctrl, &test_netdev, 500);
+ vcap_del_rule(&test_vctrl, &test_netdev, 400);
+ vcap_del_rule(&test_vctrl, &test_netdev, 300);
+ vcap_del_rule(&test_vctrl, &test_netdev, 200);
}
static void vcap_api_rule_remove_at_end_test(struct kunit *test)
KUNIT_EXPECT_EQ(test, 786, test_init_start);
KUNIT_EXPECT_EQ(test, 8, test_init_count);
KUNIT_EXPECT_EQ(test, 794, admin.last_used_addr);
+
+ vcap_del_rule(&test_vctrl, &test_netdev, 200);
+ vcap_del_rule(&test_vctrl, &test_netdev, 300);
}
static struct kunit_case vcap_api_rule_remove_test_cases[] = {
struct ionic_desc_info *desc_info,
struct ionic_cq_info *cq_info, void *cb_arg);
+#define IONIC_MAX_BUF_LEN ((u16)-1)
#define IONIC_PAGE_SIZE PAGE_SIZE
#define IONIC_PAGE_SPLIT_SZ (PAGE_SIZE / 2)
#define IONIC_PAGE_GFP_MASK (GFP_ATOMIC | __GFP_NOWARN |\
return NULL;
}
- frag_len = min_t(u16, len, IONIC_PAGE_SIZE - buf_info->page_offset);
+ frag_len = min_t(u16, len, min_t(u32, IONIC_MAX_BUF_LEN,
+ IONIC_PAGE_SIZE - buf_info->page_offset));
len -= frag_len;
dma_sync_single_for_cpu(dev,
/* fill main descriptor - buf[0] */
desc->addr = cpu_to_le64(buf_info->dma_addr + buf_info->page_offset);
- frag_len = min_t(u16, len, IONIC_PAGE_SIZE - buf_info->page_offset);
+ frag_len = min_t(u16, len, min_t(u32, IONIC_MAX_BUF_LEN,
+ IONIC_PAGE_SIZE - buf_info->page_offset));
desc->len = cpu_to_le16(frag_len);
remain_len -= frag_len;
buf_info++;
}
sg_elem->addr = cpu_to_le64(buf_info->dma_addr + buf_info->page_offset);
- frag_len = min_t(u16, remain_len, IONIC_PAGE_SIZE - buf_info->page_offset);
+ frag_len = min_t(u16, remain_len, min_t(u32, IONIC_MAX_BUF_LEN,
+ IONIC_PAGE_SIZE -
+ buf_info->page_offset));
sg_elem->len = cpu_to_le16(frag_len);
remain_len -= frag_len;
buf_info++;
struct net_device *ndev = napi->dev;
struct rswitch_private *priv;
struct rswitch_device *rdev;
+ unsigned long flags;
int quota = budget;
rdev = netdev_priv(ndev);
netif_wake_subqueue(ndev, 0);
- napi_complete(napi);
-
- rswitch_enadis_data_irq(priv, rdev->tx_queue->index, true);
- rswitch_enadis_data_irq(priv, rdev->rx_queue->index, true);
+ if (napi_complete_done(napi, budget - quota)) {
+ spin_lock_irqsave(&priv->lock, flags);
+ rswitch_enadis_data_irq(priv, rdev->tx_queue->index, true);
+ rswitch_enadis_data_irq(priv, rdev->rx_queue->index, true);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ }
out:
return budget - quota;
struct rswitch_device *rdev = netdev_priv(ndev);
if (napi_schedule_prep(&rdev->napi)) {
+ spin_lock(&rdev->priv->lock);
rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, false);
rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, false);
+ spin_unlock(&rdev->priv->lock);
__napi_schedule(&rdev->napi);
}
}
static int rswitch_open(struct net_device *ndev)
{
struct rswitch_device *rdev = netdev_priv(ndev);
+ unsigned long flags;
phy_start(ndev->phydev);
napi_enable(&rdev->napi);
netif_start_queue(ndev);
+ spin_lock_irqsave(&rdev->priv->lock, flags);
rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, true);
rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, true);
+ spin_unlock_irqrestore(&rdev->priv->lock, flags);
if (bitmap_empty(rdev->priv->opened_ports, RSWITCH_NUM_PORTS))
iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDIE);
{
struct rswitch_device *rdev = netdev_priv(ndev);
struct rswitch_gwca_ts_info *ts_info, *ts_info2;
+ unsigned long flags;
netif_tx_stop_all_queues(ndev);
bitmap_clear(rdev->priv->opened_ports, rdev->port, 1);
kfree(ts_info);
}
+ spin_lock_irqsave(&rdev->priv->lock, flags);
rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, false);
rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, false);
+ spin_unlock_irqrestore(&rdev->priv->lock, flags);
phy_stop(ndev->phydev);
napi_disable(&rdev->napi);
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
+ spin_lock_init(&priv->lock);
attr = soc_device_match(rswitch_soc_no_speed_change);
if (attr)
struct rswitch_etha etha[RSWITCH_NUM_PORTS];
struct rswitch_mfwd mfwd;
+ spinlock_t lock; /* lock interrupt registers' control */
+
bool etha_no_runtime_change;
bool gwca_halt;
};
if (old) {
/* don't need our new entry */
kfree(ped);
+ if (IS_ERR(old)) /* oh dear, it's actually an error */
+ return ERR_CAST(old);
if (!refcount_inc_not_zero(&old->ref))
return ERR_PTR(-EAGAIN);
/* existing entry found, ref taken */
kfree(encap);
if (pseudo) /* don't need our new pseudo either */
efx_tc_flower_release_encap_match(efx, pseudo);
+ if (IS_ERR(old)) /* oh dear, it's actually an error */
+ return PTR_ERR(old);
/* check old and new em_types are compatible */
switch (old->type) {
case EFX_TC_EM_DIRECT:
if (old) {
/* don't need our new entry */
kfree(rid);
+ if (IS_ERR(old)) /* oh dear, it's actually an error */
+ return ERR_CAST(old);
if (!refcount_inc_not_zero(&old->ref))
return ERR_PTR(-EAGAIN);
/* existing entry found */
old = rhashtable_lookup_get_insert_fast(&efx->tc->match_action_ht,
&rule->linkage,
efx_tc_match_action_ht_params);
- if (old) {
+ if (IS_ERR(old)) {
+ rc = PTR_ERR(old);
+ goto release;
+ } else if (old) {
netif_dbg(efx, drv, efx->net_dev,
"Ignoring already-offloaded rule (cookie %lx)\n",
tc->cookie);
old = rhashtable_lookup_get_insert_fast(&efx->tc->lhs_rule_ht,
&rule->linkage,
efx_tc_lhs_rule_ht_params);
- if (old) {
+ if (IS_ERR(old)) {
+ rc = PTR_ERR(old);
+ goto release;
+ } else if (old) {
netif_dbg(efx, drv, efx->net_dev,
"Already offloaded rule (cookie %lx)\n", tc->cookie);
rc = -EEXIST;
old = rhashtable_lookup_get_insert_fast(&efx->tc->match_action_ht,
&rule->linkage,
efx_tc_match_action_ht_params);
- if (old) {
+ if (IS_ERR(old)) {
+ rc = PTR_ERR(old);
+ goto release;
+ } else if (old) {
netif_dbg(efx, drv, efx->net_dev,
"Already offloaded rule (cookie %lx)\n", tc->cookie);
NL_SET_ERR_MSG_MOD(extack, "Rule already offloaded");
old = rhashtable_lookup_get_insert_fast(&efx->tc->ct_ht,
&conn->linkage,
efx_tc_ct_ht_params);
- if (old) {
+ if (IS_ERR(old)) {
+ rc = PTR_ERR(old);
+ goto release;
+ } else if (old) {
netif_dbg(efx, drv, efx->net_dev,
"Already offloaded conntrack (cookie %lx)\n", tc->cookie);
rc = -EEXIST;
if (old) {
/* don't need our new entry */
kfree(ct_zone);
+ if (IS_ERR(old)) /* oh dear, it's actually an error */
+ return ERR_CAST(old);
if (!refcount_inc_not_zero(&old->ref))
return ERR_PTR(-EAGAIN);
/* existing entry found */
if (old) {
/* don't need our new entry */
kfree(ctr);
+ if (IS_ERR(old)) /* oh dear, it's actually an error */
+ return ERR_CAST(old);
if (!refcount_inc_not_zero(&old->ref))
return ERR_PTR(-EAGAIN);
/* existing entry found */
/* don't need our new entry */
put_net_track(neigh->net, &neigh->ns_tracker);
kfree(neigh);
+ if (IS_ERR(old)) /* oh dear, it's actually an error */
+ return PTR_ERR(old);
if (!refcount_inc_not_zero(&old->ref))
return -EAGAIN;
/* existing entry found, ref taken */
if (old) {
/* don't need our new entry */
kfree(encap);
+ if (IS_ERR(old)) /* oh dear, it's actually an error */
+ return ERR_CAST(old);
if (!refcount_inc_not_zero(&old->ref))
return ERR_PTR(-EAGAIN);
/* existing entry found, ref taken */
u64 tx_tso_frames;
u64 tx_tso_nfrags;
struct u64_stats_sync syncp;
-};
+} ____cacheline_aligned_in_smp;
struct stmmac_rxq_stats {
u64 rx_bytes;
u64 rx_normal_irq_n;
u64 napi_poll;
struct u64_stats_sync syncp;
-};
+} ____cacheline_aligned_in_smp;
/* Extra statistic and debug information exposed by ethtool */
struct stmmac_extra_stats {
unsigned long mtl_est_hlbf;
unsigned long mtl_est_btre;
unsigned long mtl_est_btrlm;
+ /* per queue statistics */
+ struct stmmac_txq_stats txq_stats[MTL_MAX_TX_QUEUES];
+ struct stmmac_rxq_stats rxq_stats[MTL_MAX_RX_QUEUES];
unsigned long rx_dropped;
unsigned long rx_errors;
unsigned long tx_dropped;
struct stmmac_extra_stats *x, u32 chan,
u32 dir)
{
- struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
- struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
+ struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[chan];
+ struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[chan];
int ret = 0;
u32 v;
if (v & EMAC_TX_INT) {
ret |= handle_tx;
- u64_stats_update_begin(&tx_q->txq_stats.syncp);
- tx_q->txq_stats.tx_normal_irq_n++;
- u64_stats_update_end(&tx_q->txq_stats.syncp);
+ u64_stats_update_begin(&txq_stats->syncp);
+ txq_stats->tx_normal_irq_n++;
+ u64_stats_update_end(&txq_stats->syncp);
}
if (v & EMAC_TX_DMA_STOP_INT)
if (v & EMAC_RX_INT) {
ret |= handle_rx;
- u64_stats_update_begin(&rx_q->rxq_stats.syncp);
- rx_q->rxq_stats.rx_normal_irq_n++;
- u64_stats_update_end(&rx_q->rxq_stats.syncp);
+ u64_stats_update_begin(&rxq_stats->syncp);
+ rxq_stats->rx_normal_irq_n++;
+ u64_stats_update_end(&rxq_stats->syncp);
}
if (v & EMAC_RX_BUF_UA_INT)
const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
u32 intr_status = readl(ioaddr + DMA_CHAN_STATUS(dwmac4_addrs, chan));
u32 intr_en = readl(ioaddr + DMA_CHAN_INTR_ENA(dwmac4_addrs, chan));
- struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
- struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
+ struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[chan];
+ struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[chan];
int ret = 0;
if (dir == DMA_DIR_RX)
}
/* TX/RX NORMAL interrupts */
if (likely(intr_status & DMA_CHAN_STATUS_RI)) {
- u64_stats_update_begin(&rx_q->rxq_stats.syncp);
- rx_q->rxq_stats.rx_normal_irq_n++;
- u64_stats_update_end(&rx_q->rxq_stats.syncp);
+ u64_stats_update_begin(&rxq_stats->syncp);
+ rxq_stats->rx_normal_irq_n++;
+ u64_stats_update_end(&rxq_stats->syncp);
ret |= handle_rx;
}
if (likely(intr_status & DMA_CHAN_STATUS_TI)) {
- u64_stats_update_begin(&tx_q->txq_stats.syncp);
- tx_q->txq_stats.tx_normal_irq_n++;
- u64_stats_update_end(&tx_q->txq_stats.syncp);
+ u64_stats_update_begin(&txq_stats->syncp);
+ txq_stats->tx_normal_irq_n++;
+ u64_stats_update_end(&txq_stats->syncp);
ret |= handle_tx;
}
int dwmac_dma_interrupt(struct stmmac_priv *priv, void __iomem *ioaddr,
struct stmmac_extra_stats *x, u32 chan, u32 dir)
{
- struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
- struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
+ struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[chan];
+ struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[chan];
int ret = 0;
/* read the status register (CSR5) */
u32 intr_status = readl(ioaddr + DMA_STATUS);
u32 value = readl(ioaddr + DMA_INTR_ENA);
/* to schedule NAPI on real RIE event. */
if (likely(value & DMA_INTR_ENA_RIE)) {
- u64_stats_update_begin(&rx_q->rxq_stats.syncp);
- rx_q->rxq_stats.rx_normal_irq_n++;
- u64_stats_update_end(&rx_q->rxq_stats.syncp);
+ u64_stats_update_begin(&rxq_stats->syncp);
+ rxq_stats->rx_normal_irq_n++;
+ u64_stats_update_end(&rxq_stats->syncp);
ret |= handle_rx;
}
}
if (likely(intr_status & DMA_STATUS_TI)) {
- u64_stats_update_begin(&tx_q->txq_stats.syncp);
- tx_q->txq_stats.tx_normal_irq_n++;
- u64_stats_update_end(&tx_q->txq_stats.syncp);
+ u64_stats_update_begin(&txq_stats->syncp);
+ txq_stats->tx_normal_irq_n++;
+ u64_stats_update_end(&txq_stats->syncp);
ret |= handle_tx;
}
if (unlikely(intr_status & DMA_STATUS_ERI))
struct stmmac_extra_stats *x, u32 chan,
u32 dir)
{
- struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
- struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
+ struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[chan];
+ struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[chan];
u32 intr_status = readl(ioaddr + XGMAC_DMA_CH_STATUS(chan));
u32 intr_en = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan));
int ret = 0;
/* TX/RX NORMAL interrupts */
if (likely(intr_status & XGMAC_NIS)) {
if (likely(intr_status & XGMAC_RI)) {
- u64_stats_update_begin(&rx_q->rxq_stats.syncp);
- rx_q->rxq_stats.rx_normal_irq_n++;
- u64_stats_update_end(&rx_q->rxq_stats.syncp);
+ u64_stats_update_begin(&rxq_stats->syncp);
+ rxq_stats->rx_normal_irq_n++;
+ u64_stats_update_end(&rxq_stats->syncp);
ret |= handle_rx;
}
if (likely(intr_status & (XGMAC_TI | XGMAC_TBU))) {
- u64_stats_update_begin(&tx_q->txq_stats.syncp);
- tx_q->txq_stats.tx_normal_irq_n++;
- u64_stats_update_end(&tx_q->txq_stats.syncp);
+ u64_stats_update_begin(&txq_stats->syncp);
+ txq_stats->tx_normal_irq_n++;
+ u64_stats_update_end(&txq_stats->syncp);
ret |= handle_tx;
}
}
dma_addr_t dma_tx_phy;
dma_addr_t tx_tail_addr;
u32 mss;
- struct stmmac_txq_stats txq_stats;
};
struct stmmac_rx_buffer {
unsigned int len;
unsigned int error;
} state;
- struct stmmac_rxq_stats rxq_stats;
};
struct stmmac_channel {
pos = data;
for (q = 0; q < tx_cnt; q++) {
- struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[q];
+ struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q];
struct stmmac_txq_stats snapshot;
data = pos;
do {
- start = u64_stats_fetch_begin(&tx_q->txq_stats.syncp);
- snapshot = tx_q->txq_stats;
- } while (u64_stats_fetch_retry(&tx_q->txq_stats.syncp, start));
+ start = u64_stats_fetch_begin(&txq_stats->syncp);
+ snapshot = *txq_stats;
+ } while (u64_stats_fetch_retry(&txq_stats->syncp, start));
p = (char *)&snapshot + offsetof(struct stmmac_txq_stats, tx_pkt_n);
for (stat = 0; stat < STMMAC_TXQ_STATS; stat++) {
pos = data;
for (q = 0; q < rx_cnt; q++) {
- struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[q];
+ struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q];
struct stmmac_rxq_stats snapshot;
data = pos;
do {
- start = u64_stats_fetch_begin(&rx_q->rxq_stats.syncp);
- snapshot = rx_q->rxq_stats;
- } while (u64_stats_fetch_retry(&rx_q->rxq_stats.syncp, start));
+ start = u64_stats_fetch_begin(&rxq_stats->syncp);
+ snapshot = *rxq_stats;
+ } while (u64_stats_fetch_retry(&rxq_stats->syncp, start));
p = (char *)&snapshot + offsetof(struct stmmac_rxq_stats, rx_pkt_n);
for (stat = 0; stat < STMMAC_RXQ_STATS; stat++) {
pos = j;
for (i = 0; i < rx_queues_count; i++) {
- struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[i];
+ struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[i];
struct stmmac_rxq_stats snapshot;
j = pos;
do {
- start = u64_stats_fetch_begin(&rx_q->rxq_stats.syncp);
- snapshot = rx_q->rxq_stats;
- } while (u64_stats_fetch_retry(&rx_q->rxq_stats.syncp, start));
+ start = u64_stats_fetch_begin(&rxq_stats->syncp);
+ snapshot = *rxq_stats;
+ } while (u64_stats_fetch_retry(&rxq_stats->syncp, start));
data[j++] += snapshot.rx_pkt_n;
data[j++] += snapshot.rx_normal_irq_n;
pos = j;
for (i = 0; i < tx_queues_count; i++) {
- struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[i];
+ struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[i];
struct stmmac_txq_stats snapshot;
j = pos;
do {
- start = u64_stats_fetch_begin(&tx_q->txq_stats.syncp);
- snapshot = tx_q->txq_stats;
- } while (u64_stats_fetch_retry(&tx_q->txq_stats.syncp, start));
+ start = u64_stats_fetch_begin(&txq_stats->syncp);
+ snapshot = *txq_stats;
+ } while (u64_stats_fetch_retry(&txq_stats->syncp, start));
data[j++] += snapshot.tx_pkt_n;
data[j++] += snapshot.tx_normal_irq_n;
{
struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
+ struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
struct xsk_buff_pool *pool = tx_q->xsk_pool;
unsigned int entry = tx_q->cur_tx;
struct dma_desc *tx_desc = NULL;
tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
entry = tx_q->cur_tx;
}
- flags = u64_stats_update_begin_irqsave(&tx_q->txq_stats.syncp);
- tx_q->txq_stats.tx_set_ic_bit += tx_set_ic_bit;
- u64_stats_update_end_irqrestore(&tx_q->txq_stats.syncp, flags);
+ flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
+ txq_stats->tx_set_ic_bit += tx_set_ic_bit;
+ u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
if (tx_desc) {
stmmac_flush_tx_descriptors(priv, queue);
static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
{
struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
+ struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
unsigned int bytes_compl = 0, pkts_compl = 0;
unsigned int entry, xmits = 0, count = 0;
u32 tx_packets = 0, tx_errors = 0;
/* We still have pending packets, let's call for a new scheduling */
if (tx_q->dirty_tx != tx_q->cur_tx)
- hrtimer_start(&tx_q->txtimer,
- STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]),
- HRTIMER_MODE_REL);
+ stmmac_tx_timer_arm(priv, queue);
- flags = u64_stats_update_begin_irqsave(&tx_q->txq_stats.syncp);
- tx_q->txq_stats.tx_packets += tx_packets;
- tx_q->txq_stats.tx_pkt_n += tx_packets;
- tx_q->txq_stats.tx_clean++;
- u64_stats_update_end_irqrestore(&tx_q->txq_stats.syncp, flags);
+ flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
+ txq_stats->tx_packets += tx_packets;
+ txq_stats->tx_pkt_n += tx_packets;
+ txq_stats->tx_clean++;
+ u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
priv->xstats.tx_errors += tx_errors;
static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
{
struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
+ u32 tx_coal_timer = priv->tx_coal_timer[queue];
+
+ if (!tx_coal_timer)
+ return;
hrtimer_start(&tx_q->txtimer,
- STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]),
+ STMMAC_COAL_TIMER(tx_coal_timer),
HRTIMER_MODE_REL);
}
int nfrags = skb_shinfo(skb)->nr_frags;
u32 queue = skb_get_queue_mapping(skb);
unsigned int first_entry, tx_packets;
+ struct stmmac_txq_stats *txq_stats;
int tmp_pay_len = 0, first_tx;
struct stmmac_tx_queue *tx_q;
bool has_vlan, set_ic;
int i;
tx_q = &priv->dma_conf.tx_queue[queue];
+ txq_stats = &priv->xstats.txq_stats[queue];
first_tx = tx_q->cur_tx;
/* Compute header lengths */
netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
}
- flags = u64_stats_update_begin_irqsave(&tx_q->txq_stats.syncp);
- tx_q->txq_stats.tx_bytes += skb->len;
- tx_q->txq_stats.tx_tso_frames++;
- tx_q->txq_stats.tx_tso_nfrags += nfrags;
+ flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
+ txq_stats->tx_bytes += skb->len;
+ txq_stats->tx_tso_frames++;
+ txq_stats->tx_tso_nfrags += nfrags;
if (set_ic)
- tx_q->txq_stats.tx_set_ic_bit++;
- u64_stats_update_end_irqrestore(&tx_q->txq_stats.syncp, flags);
+ txq_stats->tx_set_ic_bit++;
+ u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
if (priv->sarc_type)
stmmac_set_desc_sarc(priv, first, priv->sarc_type);
u32 queue = skb_get_queue_mapping(skb);
int nfrags = skb_shinfo(skb)->nr_frags;
int gso = skb_shinfo(skb)->gso_type;
+ struct stmmac_txq_stats *txq_stats;
struct dma_edesc *tbs_desc = NULL;
struct dma_desc *desc, *first;
struct stmmac_tx_queue *tx_q;
dma_addr_t des;
tx_q = &priv->dma_conf.tx_queue[queue];
+ txq_stats = &priv->xstats.txq_stats[queue];
first_tx = tx_q->cur_tx;
if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
}
- flags = u64_stats_update_begin_irqsave(&tx_q->txq_stats.syncp);
- tx_q->txq_stats.tx_bytes += skb->len;
+ flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
+ txq_stats->tx_bytes += skb->len;
if (set_ic)
- tx_q->txq_stats.tx_set_ic_bit++;
- u64_stats_update_end_irqrestore(&tx_q->txq_stats.syncp, flags);
+ txq_stats->tx_set_ic_bit++;
+ u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
if (priv->sarc_type)
stmmac_set_desc_sarc(priv, first, priv->sarc_type);
static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
struct xdp_frame *xdpf, bool dma_map)
{
+ struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
unsigned int entry = tx_q->cur_tx;
struct dma_desc *tx_desc;
unsigned long flags;
tx_q->tx_count_frames = 0;
stmmac_set_tx_ic(priv, tx_desc);
- flags = u64_stats_update_begin_irqsave(&tx_q->txq_stats.syncp);
- tx_q->txq_stats.tx_set_ic_bit++;
- u64_stats_update_end_irqrestore(&tx_q->txq_stats.syncp, flags);
+ flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
+ txq_stats->tx_set_ic_bit++;
+ u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
}
stmmac_enable_dma_transmission(priv, priv->ioaddr);
struct dma_desc *p, struct dma_desc *np,
struct xdp_buff *xdp)
{
- struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
+ struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
struct stmmac_channel *ch = &priv->channel[queue];
unsigned int len = xdp->data_end - xdp->data;
enum pkt_hash_types hash_type;
skb_record_rx_queue(skb, queue);
napi_gro_receive(&ch->rxtx_napi, skb);
- flags = u64_stats_update_begin_irqsave(&rx_q->rxq_stats.syncp);
- rx_q->rxq_stats.rx_pkt_n++;
- rx_q->rxq_stats.rx_bytes += len;
- u64_stats_update_end_irqrestore(&rx_q->rxq_stats.syncp, flags);
+ flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp);
+ rxq_stats->rx_pkt_n++;
+ rxq_stats->rx_bytes += len;
+ u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags);
}
static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
{
+ struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
unsigned int count = 0, error = 0, len = 0;
int dirty = stmmac_rx_dirty(priv, queue);
stmmac_finalize_xdp_rx(priv, xdp_status);
- flags = u64_stats_update_begin_irqsave(&rx_q->rxq_stats.syncp);
- rx_q->rxq_stats.rx_pkt_n += count;
- u64_stats_update_end_irqrestore(&rx_q->rxq_stats.syncp, flags);
+ flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp);
+ rxq_stats->rx_pkt_n += count;
+ u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags);
priv->xstats.rx_dropped += rx_dropped;
priv->xstats.rx_errors += rx_errors;
static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
{
u32 rx_errors = 0, rx_dropped = 0, rx_bytes = 0, rx_packets = 0;
+ struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
struct stmmac_channel *ch = &priv->channel[queue];
unsigned int count = 0, error = 0, len = 0;
stmmac_rx_refill(priv, queue);
- flags = u64_stats_update_begin_irqsave(&rx_q->rxq_stats.syncp);
- rx_q->rxq_stats.rx_packets += rx_packets;
- rx_q->rxq_stats.rx_bytes += rx_bytes;
- rx_q->rxq_stats.rx_pkt_n += count;
- u64_stats_update_end_irqrestore(&rx_q->rxq_stats.syncp, flags);
+ flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp);
+ rxq_stats->rx_packets += rx_packets;
+ rxq_stats->rx_bytes += rx_bytes;
+ rxq_stats->rx_pkt_n += count;
+ u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags);
priv->xstats.rx_dropped += rx_dropped;
priv->xstats.rx_errors += rx_errors;
struct stmmac_channel *ch =
container_of(napi, struct stmmac_channel, rx_napi);
struct stmmac_priv *priv = ch->priv_data;
- struct stmmac_rx_queue *rx_q;
+ struct stmmac_rxq_stats *rxq_stats;
u32 chan = ch->index;
unsigned long flags;
int work_done;
- rx_q = &priv->dma_conf.rx_queue[chan];
- flags = u64_stats_update_begin_irqsave(&rx_q->rxq_stats.syncp);
- rx_q->rxq_stats.napi_poll++;
- u64_stats_update_end_irqrestore(&rx_q->rxq_stats.syncp, flags);
+ rxq_stats = &priv->xstats.rxq_stats[chan];
+ flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp);
+ rxq_stats->napi_poll++;
+ u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags);
work_done = stmmac_rx(priv, budget, chan);
if (work_done < budget && napi_complete_done(napi, work_done)) {
struct stmmac_channel *ch =
container_of(napi, struct stmmac_channel, tx_napi);
struct stmmac_priv *priv = ch->priv_data;
- struct stmmac_tx_queue *tx_q;
+ struct stmmac_txq_stats *txq_stats;
u32 chan = ch->index;
unsigned long flags;
int work_done;
- tx_q = &priv->dma_conf.tx_queue[chan];
- flags = u64_stats_update_begin_irqsave(&tx_q->txq_stats.syncp);
- tx_q->txq_stats.napi_poll++;
- u64_stats_update_end_irqrestore(&tx_q->txq_stats.syncp, flags);
+ txq_stats = &priv->xstats.txq_stats[chan];
+ flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
+ txq_stats->napi_poll++;
+ u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
work_done = stmmac_tx_clean(priv, budget, chan);
work_done = min(work_done, budget);
container_of(napi, struct stmmac_channel, rxtx_napi);
struct stmmac_priv *priv = ch->priv_data;
int rx_done, tx_done, rxtx_done;
- struct stmmac_rx_queue *rx_q;
- struct stmmac_tx_queue *tx_q;
+ struct stmmac_rxq_stats *rxq_stats;
+ struct stmmac_txq_stats *txq_stats;
u32 chan = ch->index;
unsigned long flags;
- rx_q = &priv->dma_conf.rx_queue[chan];
- flags = u64_stats_update_begin_irqsave(&rx_q->rxq_stats.syncp);
- rx_q->rxq_stats.napi_poll++;
- u64_stats_update_end_irqrestore(&rx_q->rxq_stats.syncp, flags);
+ rxq_stats = &priv->xstats.rxq_stats[chan];
+ flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp);
+ rxq_stats->napi_poll++;
+ u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags);
- tx_q = &priv->dma_conf.tx_queue[chan];
- flags = u64_stats_update_begin_irqsave(&tx_q->txq_stats.syncp);
- tx_q->txq_stats.napi_poll++;
- u64_stats_update_end_irqrestore(&tx_q->txq_stats.syncp, flags);
+ txq_stats = &priv->xstats.txq_stats[chan];
+ flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
+ txq_stats->napi_poll++;
+ u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
tx_done = stmmac_tx_clean(priv, budget, chan);
tx_done = min(tx_done, budget);
int q;
for (q = 0; q < tx_cnt; q++) {
- struct stmmac_txq_stats *txq_stats = &priv->dma_conf.tx_queue[q].txq_stats;
+ struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q];
u64 tx_packets;
u64 tx_bytes;
}
for (q = 0; q < rx_cnt; q++) {
- struct stmmac_rxq_stats *rxq_stats = &priv->dma_conf.rx_queue[q].rxq_stats;
+ struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q];
u64 rx_packets;
u64 rx_bytes;
priv->dev = ndev;
for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
- u64_stats_init(&priv->dma_conf.rx_queue[i].rxq_stats.syncp);
+ u64_stats_init(&priv->xstats.rxq_stats[i].syncp);
for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
- u64_stats_init(&priv->dma_conf.tx_queue[i].txq_stats.syncp);
+ u64_stats_init(&priv->xstats.txq_stats[i].syncp);
stmmac_set_ethtool_ops(ndev);
priv->pause = pause;
config TI_ICSS_IEP
tristate "TI PRU ICSS IEP driver"
+ depends on PTP_1588_CLOCK_OPTIONAL
depends on TI_PRUSS
default TI_PRUSS
help
static void team_setup_by_port(struct net_device *dev,
struct net_device *port_dev)
{
- dev->header_ops = port_dev->header_ops;
+ struct team *team = netdev_priv(dev);
+
+ if (port_dev->type == ARPHRD_ETHER)
+ dev->header_ops = team->header_ops_cache;
+ else
+ dev->header_ops = port_dev->header_ops;
dev->type = port_dev->type;
dev->hard_header_len = port_dev->hard_header_len;
dev->needed_headroom = port_dev->needed_headroom;
static void team_setup(struct net_device *dev)
{
+ struct team *team = netdev_priv(dev);
+
ether_setup(dev);
dev->max_mtu = ETH_MAX_MTU;
+ team->header_ops_cache = dev->header_ops;
dev->netdev_ops = &team_netdev_ops;
dev->ethtool_ops = &team_ethtool_ops;
*tucso = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
ip_hdr(skb)->daddr, 0,
ip_hdr(skb)->protocol, 0);
- } else if (skb_is_gso_v6(skb)) {
+ } else if (skb_is_gso(skb) && skb_is_gso_v6(skb)) {
tucso = dest + ((void *)&(tcp_hdr(skb)->check) - data);
*tucso = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
&ipv6_hdr(skb)->daddr, 0,
IPPROTO_TCP, 0);
- return false;
} else if (protocol == htons(ETH_P_IPV6)) {
tucso = dest + skb_checksum_start_offset(skb) + skb->csum_offset;
*tucso = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
struct r8152 *tp = container_of(napi, struct r8152, napi);
int work_done;
+ if (!budget)
+ return 0;
+
work_done = rx_bottom(tp, budget);
if (work_done < budget) {
netif_carrier_on(peer);
}
+ veth_set_xdp_features(dev);
+
return 0;
}
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_REMCSUM_TX */
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_REMCSUM_RX */
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_LOCALBYPASS */
+ nla_total_size(0) + /* IFLA_VXLAN_GBP */
+ nla_total_size(0) + /* IFLA_VXLAN_GPE */
+ nla_total_size(0) + /* IFLA_VXLAN_REMCSUM_NOPARTIAL */
+ nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_VNIFILTER */
0;
}
else
ctrl->ctrl_config = NVME_CC_CSS_NVM;
- if (ctrl->cap & NVME_CAP_CRMS_CRWMS) {
- u32 crto;
-
- ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CRTO, &crto);
- if (ret) {
- dev_err(ctrl->device, "Reading CRTO failed (%d)\n",
- ret);
- return ret;
- }
-
- if (ctrl->cap & NVME_CAP_CRMS_CRIMS) {
- ctrl->ctrl_config |= NVME_CC_CRIME;
- timeout = NVME_CRTO_CRIMT(crto);
- } else {
- timeout = NVME_CRTO_CRWMT(crto);
- }
- } else {
- timeout = NVME_CAP_TIMEOUT(ctrl->cap);
- }
+ if (ctrl->cap & NVME_CAP_CRMS_CRWMS && ctrl->cap & NVME_CAP_CRMS_CRIMS)
+ ctrl->ctrl_config |= NVME_CC_CRIME;
ctrl->ctrl_config |= (NVME_CTRL_PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT;
ctrl->ctrl_config |= NVME_CC_AMS_RR | NVME_CC_SHN_NONE;
if (ret)
return ret;
+ /* CAP value may change after initial CC write */
+ ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &ctrl->cap);
+ if (ret)
+ return ret;
+
+ timeout = NVME_CAP_TIMEOUT(ctrl->cap);
+ if (ctrl->cap & NVME_CAP_CRMS_CRWMS) {
+ u32 crto, ready_timeout;
+
+ ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CRTO, &crto);
+ if (ret) {
+ dev_err(ctrl->device, "Reading CRTO failed (%d)\n",
+ ret);
+ return ret;
+ }
+
+ /*
+ * CRTO should always be greater or equal to CAP.TO, but some
+ * devices are known to get this wrong. Use the larger of the
+ * two values.
+ */
+ if (ctrl->ctrl_config & NVME_CC_CRIME)
+ ready_timeout = NVME_CRTO_CRIMT(crto);
+ else
+ ready_timeout = NVME_CRTO_CRWMT(crto);
+
+ if (ready_timeout < timeout)
+ dev_warn_once(ctrl->device, "bad crto:%x cap:%llx\n",
+ crto, ctrl->cap);
+ else
+ timeout = ready_timeout;
+ }
+
ctrl->ctrl_config |= NVME_CC_ENABLE;
ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
if (ret)
struct nvme_fc_fcp_op *op = fcp_req_to_fcp_op(req);
struct request *rq = op->rq;
- if (!IS_ENABLED(CONFIG_BLK_CGROUP_FC_APPID) || !rq->bio)
+ if (!IS_ENABLED(CONFIG_BLK_CGROUP_FC_APPID) || !rq || !rq->bio)
return NULL;
return blkcg_get_fc_appid(rq->bio);
}
return 0;
}
-static const struct hwmon_channel_info *nvme_hwmon_info[] = {
+static const struct hwmon_channel_info *const nvme_hwmon_info[] = {
HWMON_CHANNEL_INFO(chip, HWMON_C_REGISTER_TZ),
HWMON_CHANNEL_INFO(temp,
HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN |
struct nvme_dev *dev;
int ret = -ENOMEM;
- if (node == NUMA_NO_NODE)
- set_dev_node(&pdev->dev, first_memory_node);
-
dev = kzalloc_node(sizeof(*dev), GFP_KERNEL, node);
if (!dev)
return ERR_PTR(-ENOMEM);
while (length) {
u32 iov_len = min_t(u32, length, sg->length - sg_offset);
- bvec_set_page(iov, sg_page(sg), sg->length,
+ bvec_set_page(iov, sg_page(sg), iov_len,
sg->offset + sg_offset);
length -= iov_len;
struct ioc {
struct ioa_registers __iomem *ioc_regs; /* I/O MMU base address */
u8 *res_map; /* resource map, bit == pdir entry */
- u64 *pdir_base; /* physical base address */
+ __le64 *pdir_base; /* physical base address */
u32 pdir_size; /* bytes, function of IOV Space size */
u32 res_hint; /* next available IOVP -
circular search */
BUG_ON(pages_needed == 0);
BUG_ON((pages_needed * IOVP_SIZE) > DMA_CHUNK_SIZE);
- DBG_RES("%s() size: %d pages_needed %d\n",
+ DBG_RES("%s() size: %zu pages_needed %d\n",
__func__, size, pages_needed);
/*
BUG_ON((pages_mapped * IOVP_SIZE) > DMA_CHUNK_SIZE);
BUG_ON(pages_mapped > BITS_PER_LONG);
- DBG_RES("%s(): res_idx: %d pages_mapped %d\n",
+ DBG_RES("%s(): res_idx: %d pages_mapped %lu\n",
__func__, res_idx, pages_mapped);
#ifdef CCIO_COLLECT_STATS
* index are bits 12:19 of the value returned by LCI.
*/
static void
-ccio_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba,
+ccio_io_pdir_entry(__le64 *pdir_ptr, space_t sid, unsigned long vba,
unsigned long hints)
{
register unsigned long pa;
unsigned long flags;
dma_addr_t iovp;
dma_addr_t offset;
- u64 *pdir_start;
+ __le64 *pdir_start;
unsigned long hint = hint_lookup[(int)direction];
BUG_ON(!dev);
pdir_start = &(ioc->pdir_base[idx]);
- DBG_RUN("%s() 0x%p -> 0x%lx size: %0x%x\n",
- __func__, addr, (long)iovp | offset, size);
+ DBG_RUN("%s() %px -> %#lx size: %zu\n",
+ __func__, addr, (long)(iovp | offset), size);
/* If not cacheline aligned, force SAFE_DMA on the whole mess */
if((size % L1_CACHE_BYTES) || ((unsigned long)addr % L1_CACHE_BYTES))
return;
}
- DBG_RUN("%s() iovp 0x%lx/%x\n",
+ DBG_RUN("%s() iovp %#lx/%zx\n",
__func__, (long)iova, size);
iova ^= offset; /* clear offset bits */
iova_space_size>>20,
iov_order + PAGE_SHIFT);
- ioc->pdir_base = (u64 *)__get_free_pages(GFP_KERNEL,
+ ioc->pdir_base = (__le64 *)__get_free_pages(GFP_KERNEL,
get_order(ioc->pdir_size));
if(NULL == ioc->pdir_base) {
panic("%s() could not allocate I/O Page Table\n", __func__);
static inline unsigned int
iommu_fill_pdir(struct ioc *ioc, struct scatterlist *startsg, int nents,
unsigned long hint,
- void (*iommu_io_pdir_entry)(u64 *, space_t, unsigned long,
+ void (*iommu_io_pdir_entry)(__le64 *, space_t, unsigned long,
unsigned long))
{
struct scatterlist *dma_sg = startsg; /* pointer to current DMA */
unsigned int n_mappings = 0;
unsigned long dma_offset = 0, dma_len = 0;
- u64 *pdirp = NULL;
+ __le64 *pdirp = NULL;
/* Horrible hack. For efficiency's sake, dma_sg starts one
* entry below the true start (it is immediately incremented
unsigned long vaddr;
long size;
- DBG_RUN_SG(" %d : %08lx/%05x %p/%05x\n", nents,
- (unsigned long)sg_dma_address(startsg), cnt,
+ DBG_RUN_SG(" %d : %08lx %p/%05x\n", nents,
+ (unsigned long)sg_dma_address(startsg),
sg_virt(startsg), startsg->length
);
static DEFINE_SPINLOCK(iosapic_lock);
-static inline void iosapic_eoi(void __iomem *addr, unsigned int data)
+static inline void iosapic_eoi(__le32 __iomem *addr, __le32 data)
{
- __raw_writel(data, addr);
+ __raw_writel((__force u32)data, addr);
}
/*
struct vector_info {
struct iosapic_info *iosapic; /* I/O SAPIC this vector is on */
struct irt_entry *irte; /* IRT entry */
- u32 __iomem *eoi_addr; /* precalculate EOI reg address */
- u32 eoi_data; /* IA64: ? PA: swapped txn_data */
+ __le32 __iomem *eoi_addr; /* precalculate EOI reg address */
+ __le32 eoi_data; /* IA64: ? PA: swapped txn_data */
int txn_irq; /* virtual IRQ number for processor */
ulong txn_addr; /* IA64: id_eid PA: partial HPA */
u32 txn_data; /* CPU interrupt bit */
#include <linux/module.h>
#include <asm/ropes.h>
-#include <asm/mckinley.h> /* for proc_mckinley_root */
-#include <asm/runway.h> /* for proc_runway_root */
#include <asm/page.h> /* for PAGE0 */
#include <asm/pdc.h> /* for PDC_MODEL_* */
#include <asm/pdcpat.h> /* for is_pdc_pat() */
#endif
static struct proc_dir_entry *proc_runway_root __ro_after_init;
-struct proc_dir_entry *proc_mckinley_root __ro_after_init;
+static struct proc_dir_entry *proc_mckinley_root __ro_after_init;
/************************************
** SBA register read and write support
sba_dump_pdir_entry(struct ioc *ioc, char *msg, uint pide)
{
/* start printing from lowest pde in rval */
- u64 *ptr = &(ioc->pdir_base[pide & (~0U * BITS_PER_LONG)]);
+ __le64 *ptr = &(ioc->pdir_base[pide & (~0U * BITS_PER_LONG)]);
unsigned long *rptr = (unsigned long *) &(ioc->res_map[(pide >>3) & ~(sizeof(unsigned long) - 1)]);
uint rcnt;
*/
static void
-sba_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba,
+sba_io_pdir_entry(__le64 *pdir_ptr, space_t sid, unsigned long vba,
unsigned long hint)
{
u64 pa; /* physical address */
sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
{
u32 iovp = (u32) SBA_IOVP(ioc,iova);
- u64 *pdir_ptr = &ioc->pdir_base[PDIR_INDEX(iovp)];
+ __le64 *pdir_ptr = &ioc->pdir_base[PDIR_INDEX(iovp)];
#ifdef ASSERT_PDIR_SANITY
/* Assert first pdir entry is set.
unsigned long flags;
dma_addr_t iovp;
dma_addr_t offset;
- u64 *pdir_start;
+ __le64 *pdir_start;
int pide;
ioc = GET_IOC(dev);
ioc->pdir_size = pdir_size = (iova_space_size/IOVP_SIZE) * sizeof(u64);
- DBG_INIT("%s() hpa 0x%lx mem %ldMB IOV %dMB (%d bits)\n",
+ DBG_INIT("%s() hpa %px mem %ldMB IOV %dMB (%d bits)\n",
__func__,
ioc->ioc_hpa,
(unsigned long) totalram_pages() >> (20 - PAGE_SHIFT),
ioc->iovp_mask = ~(iova_space_mask + PAGE_SIZE - 1);
#endif
- DBG_INIT("%s() IOV base 0x%lx mask 0x%0lx\n",
+ DBG_INIT("%s() IOV base %#lx mask %#0lx\n",
__func__, ioc->ibase, ioc->imask);
/*
if (!IS_PLUTO(sba_dev->dev)) {
ioc_ctl = READ_REG(sba_dev->sba_hpa+IOC_CTRL);
- DBG_INIT("%s() hpa 0x%lx ioc_ctl 0x%Lx ->",
+ DBG_INIT("%s() hpa %px ioc_ctl 0x%Lx ->",
__func__, sba_dev->sba_hpa, ioc_ctl);
ioc_ctl &= ~(IOC_CTRL_RM | IOC_CTRL_NC | IOC_CTRL_CE);
ioc_ctl |= IOC_CTRL_DD | IOC_CTRL_D4 | IOC_CTRL_TC;
/* flush out the last writes */
READ_REG(sba_dev->ioc[i].ioc_hpa + ROPE7_CTL);
- DBG_INIT(" ioc[%d] ROPE_CFG 0x%Lx ROPE_DBG 0x%Lx\n",
+ DBG_INIT(" ioc[%d] ROPE_CFG %#lx ROPE_DBG %lx\n",
i,
- READ_REG(sba_dev->ioc[i].ioc_hpa + 0x40),
- READ_REG(sba_dev->ioc[i].ioc_hpa + 0x50)
+ (unsigned long) READ_REG(sba_dev->ioc[i].ioc_hpa + 0x40),
+ (unsigned long) READ_REG(sba_dev->ioc[i].ioc_hpa + 0x50)
);
- DBG_INIT(" STATUS_CONTROL 0x%Lx FLUSH_CTRL 0x%Lx\n",
- READ_REG(sba_dev->ioc[i].ioc_hpa + 0x108),
- READ_REG(sba_dev->ioc[i].ioc_hpa + 0x400)
+ DBG_INIT(" STATUS_CONTROL %#lx FLUSH_CTRL %#lx\n",
+ (unsigned long) READ_REG(sba_dev->ioc[i].ioc_hpa + 0x108),
+ (unsigned long) READ_REG(sba_dev->ioc[i].ioc_hpa + 0x400)
);
if (IS_PLUTO(sba_dev->dev)) {
#ifdef ASSERT_PDIR_SANITY
/* Mark first bit busy - ie no IOVA 0 */
sba_dev->ioc[i].res_map[0] = 0x80;
- sba_dev->ioc[i].pdir_base[0] = 0xeeffc0addbba0080ULL;
+ sba_dev->ioc[i].pdir_base[0] = (__force __le64) 0xeeffc0addbba0080ULL;
#endif
/* Third (and last) part of PIRANHA BUG */
int i;
char *version;
void __iomem *sba_addr = ioremap(dev->hpa.start, SBA_FUNC_SIZE);
-#ifdef CONFIG_PROC_FS
- struct proc_dir_entry *root;
-#endif
+ struct proc_dir_entry *root __maybe_unused;
sba_dump_ranges(sba_addr);
hppa_dma_ops = &sba_ops;
-#ifdef CONFIG_PROC_FS
switch (dev->id.hversion) {
case PLUTO_MCKINLEY_PORT:
if (!proc_mckinley_root)
proc_create_single("sba_iommu", 0, root, sba_proc_info);
proc_create_single("sba_iommu-bitmap", 0, root, sba_proc_bitmap_info);
-#endif
return 0;
}
return pcie_ports_native || host->native_aer;
}
+EXPORT_SYMBOL_NS_GPL(pcie_aer_is_native, CXL);
static int pci_enable_pcie_error_reporting(struct pci_dev *dev)
{
#ifdef CONFIG_PCIEAER
int pcie_aer_init(void);
-int pcie_aer_is_native(struct pci_dev *dev);
#else
static inline int pcie_aer_init(void) { return 0; }
-static inline int pcie_aer_is_native(struct pci_dev *dev) { return 0; }
#endif
#ifdef CONFIG_HOTPLUG_PCI_PCIE
tristate "Mellanox BlueField Firmware Boot Control driver"
depends on ARM64
depends on ACPI
+ depends on NET
help
The Mellanox BlueField firmware implements functionality to
request swapping the primary and alternate eMMC boot partition,
config NVSW_SN2201
tristate "Nvidia SN2201 platform driver support"
- depends on HWMON
- depends on I2C
+ depends on HWMON && I2C
+ depends on ACPI || COMPILE_TEST
select REGMAP_I2C
help
This driver provides support for the Nvidia SN2201 platform.
};
static const struct mlxbf_pmc_events mlxbf_pmc_trio_events_1[] = {
+ { 0x0, "DISABLE" },
{ 0xa0, "TPIO_DATA_BEAT" },
{ 0xa1, "TDMA_DATA_BEAT" },
{ 0xa2, "MAP_DATA_BEAT" },
};
static const struct mlxbf_pmc_events mlxbf_pmc_trio_events_2[] = {
+ { 0x0, "DISABLE" },
{ 0xa0, "TPIO_DATA_BEAT" },
{ 0xa1, "TDMA_DATA_BEAT" },
{ 0xa2, "MAP_DATA_BEAT" },
};
static const struct mlxbf_pmc_events mlxbf_pmc_ecc_events[] = {
+ { 0x0, "DISABLE" },
{ 0x100, "ECC_SINGLE_ERROR_CNT" },
{ 0x104, "ECC_DOUBLE_ERROR_CNT" },
{ 0x114, "SERR_INJ" },
};
static const struct mlxbf_pmc_events mlxbf_pmc_mss_events[] = {
+ { 0x0, "DISABLE" },
{ 0xc0, "RXREQ_MSS" },
{ 0xc1, "RXDAT_MSS" },
{ 0xc2, "TXRSP_MSS" },
};
static const struct mlxbf_pmc_events mlxbf_pmc_hnf_events[] = {
+ { 0x0, "DISABLE" },
{ 0x45, "HNF_REQUESTS" },
{ 0x46, "HNF_REJECTS" },
{ 0x47, "ALL_BUSY" },
};
static const struct mlxbf_pmc_events mlxbf_pmc_hnfnet_events[] = {
+ { 0x0, "DISABLE" },
{ 0x12, "CDN_REQ" },
{ 0x13, "DDN_REQ" },
{ 0x14, "NDN_REQ" },
uint64_t *result)
{
uint32_t perfcfg_offset, perfval_offset;
- uint64_t perfmon_cfg, perfevt, perfctl;
+ uint64_t perfmon_cfg, perfevt;
if (cnt_num >= pmc->block[blk_num].counters)
return -EINVAL;
/* Set counter in "read" mode */
perfmon_cfg = FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_ADDR,
- MLXBF_PMC_PERFCTL);
- perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_STROBE, 1);
- perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_WR_R_B, 0);
-
- if (mlxbf_pmc_write(pmc->block[blk_num].mmio_base + perfcfg_offset,
- MLXBF_PMC_WRITE_REG_64, perfmon_cfg))
- return -EFAULT;
-
- /* Check if the counter is enabled */
-
- if (mlxbf_pmc_read(pmc->block[blk_num].mmio_base + perfval_offset,
- MLXBF_PMC_READ_REG_64, &perfctl))
- return -EFAULT;
-
- if (!FIELD_GET(MLXBF_PMC_PERFCTL_EN0, perfctl))
- return -EINVAL;
-
- /* Set counter in "read" mode */
- perfmon_cfg = FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_ADDR,
MLXBF_PMC_PERFEVT);
perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_STROBE, 1);
perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_WR_R_B, 0);
} else
return -EINVAL;
- return sprintf(buf, "0x%llx\n", value);
+ return sysfs_emit(buf, "0x%llx\n", value);
}
/* Store function for "counter" sysfs files */
err = mlxbf_pmc_read_event(blk_num, cnt_num, is_l3, &evt_num);
if (err)
- return sprintf(buf, "No event being monitored\n");
+ return sysfs_emit(buf, "No event being monitored\n");
evt_name = mlxbf_pmc_get_event_name(pmc->block_name[blk_num], evt_num);
if (!evt_name)
return -EINVAL;
- return sprintf(buf, "0x%llx: %s\n", evt_num, evt_name);
+ return sysfs_emit(buf, "0x%llx: %s\n", evt_num, evt_name);
}
/* Store function for "event" sysfs files */
return -EINVAL;
for (i = 0, buf[0] = '\0'; i < size; ++i) {
- len += sprintf(e_info, "0x%x: %s\n", events[i].evt_num,
- events[i].evt_name);
- if (len > PAGE_SIZE)
+ len += snprintf(e_info, sizeof(e_info), "0x%x: %s\n",
+ events[i].evt_num, events[i].evt_name);
+ if (len >= PAGE_SIZE)
break;
strcat(buf, e_info);
ret = len;
value = FIELD_GET(MLXBF_PMC_L3C_PERF_CNT_CFG_EN, perfcnt_cfg);
- return sprintf(buf, "%d\n", value);
+ return sysfs_emit(buf, "%d\n", value);
}
/* Store function for "enable" sysfs files - only for l3cache */
* @vq: pointer to the virtio virtqueue
* @desc: current descriptor of the pending packet
* @desc_head: head descriptor of the pending packet
+ * @drop_desc: dummy desc for packet dropping
* @cur_len: processed length of the current descriptor
* @rem_len: remaining length of the pending packet
* @pkt_len: total length of the pending packet
struct virtqueue *vq;
struct vring_desc *desc;
struct vring_desc *desc_head;
+ struct vring_desc drop_desc;
int cur_len;
int rem_len;
u32 pkt_len;
struct mlxbf_tmfifo *fifo;
};
+/* Check whether vring is in drop mode. */
+#define IS_VRING_DROP(_r) ({ \
+ typeof(_r) (r) = (_r); \
+ (r->desc_head == &r->drop_desc ? true : false); })
+
+/* A stub length to drop maximum length packet. */
+#define VRING_DROP_DESC_MAX_LEN GENMASK(15, 0)
+
/* Interrupt types. */
enum {
MLXBF_TM_RX_LWM_IRQ,
static efi_char16_t mlxbf_tmfifo_efi_name[] = L"RshimMacAddr";
/* Maximum L2 header length. */
-#define MLXBF_TMFIFO_NET_L2_OVERHEAD 36
+#define MLXBF_TMFIFO_NET_L2_OVERHEAD (ETH_HLEN + VLAN_HLEN)
/* Supported virtio-net features. */
#define MLXBF_TMFIFO_NET_FEATURES \
vring->align = SMP_CACHE_BYTES;
vring->index = i;
vring->vdev_id = tm_vdev->vdev.id.device;
+ vring->drop_desc.len = VRING_DROP_DESC_MAX_LEN;
dev = &tm_vdev->vdev.dev;
size = vring_size(vring->num, vring->align);
return len;
}
-static void mlxbf_tmfifo_release_pending_pkt(struct mlxbf_tmfifo_vring *vring)
+static void mlxbf_tmfifo_release_pkt(struct mlxbf_tmfifo_vring *vring)
{
struct vring_desc *desc_head;
u32 len = 0;
if (vring->cur_len + sizeof(u64) <= len) {
/* The whole word. */
- if (is_rx)
- memcpy(addr + vring->cur_len, &data, sizeof(u64));
- else
- memcpy(&data, addr + vring->cur_len, sizeof(u64));
+ if (!IS_VRING_DROP(vring)) {
+ if (is_rx)
+ memcpy(addr + vring->cur_len, &data,
+ sizeof(u64));
+ else
+ memcpy(&data, addr + vring->cur_len,
+ sizeof(u64));
+ }
vring->cur_len += sizeof(u64);
} else {
/* Leftover bytes. */
- if (is_rx)
- memcpy(addr + vring->cur_len, &data,
- len - vring->cur_len);
- else
- memcpy(&data, addr + vring->cur_len,
- len - vring->cur_len);
+ if (!IS_VRING_DROP(vring)) {
+ if (is_rx)
+ memcpy(addr + vring->cur_len, &data,
+ len - vring->cur_len);
+ else
+ memcpy(&data, addr + vring->cur_len,
+ len - vring->cur_len);
+ }
vring->cur_len = len;
}
* flag is set.
*/
static void mlxbf_tmfifo_rxtx_header(struct mlxbf_tmfifo_vring *vring,
- struct vring_desc *desc,
+ struct vring_desc **desc,
bool is_rx, bool *vring_change)
{
struct mlxbf_tmfifo *fifo = vring->fifo;
struct virtio_net_config *config;
struct mlxbf_tmfifo_msg_hdr hdr;
int vdev_id, hdr_len;
+ bool drop_rx = false;
/* Read/Write packet header. */
if (is_rx) {
if (ntohs(hdr.len) >
__virtio16_to_cpu(virtio_legacy_is_little_endian(),
config->mtu) +
- MLXBF_TMFIFO_NET_L2_OVERHEAD)
- return;
+ MLXBF_TMFIFO_NET_L2_OVERHEAD)
+ drop_rx = true;
} else {
vdev_id = VIRTIO_ID_CONSOLE;
hdr_len = 0;
if (!tm_dev2)
return;
- vring->desc = desc;
+ vring->desc = *desc;
vring = &tm_dev2->vrings[MLXBF_TMFIFO_VRING_RX];
*vring_change = true;
}
+
+ if (drop_rx && !IS_VRING_DROP(vring)) {
+ if (vring->desc_head)
+ mlxbf_tmfifo_release_pkt(vring);
+ *desc = &vring->drop_desc;
+ vring->desc_head = *desc;
+ vring->desc = *desc;
+ }
+
vring->pkt_len = ntohs(hdr.len) + hdr_len;
} else {
/* Network virtio has an extra header. */
hdr_len = (vring->vdev_id == VIRTIO_ID_NET) ?
sizeof(struct virtio_net_hdr) : 0;
- vring->pkt_len = mlxbf_tmfifo_get_pkt_len(vring, desc);
+ vring->pkt_len = mlxbf_tmfifo_get_pkt_len(vring, *desc);
hdr.type = (vring->vdev_id == VIRTIO_ID_NET) ?
VIRTIO_ID_NET : VIRTIO_ID_CONSOLE;
hdr.len = htons(vring->pkt_len - hdr_len);
/* Get the descriptor of the next packet. */
if (!vring->desc) {
desc = mlxbf_tmfifo_get_next_pkt(vring, is_rx);
- if (!desc)
- return false;
+ if (!desc) {
+ /* Drop next Rx packet to avoid stuck. */
+ if (is_rx) {
+ desc = &vring->drop_desc;
+ vring->desc_head = desc;
+ vring->desc = desc;
+ } else {
+ return false;
+ }
+ }
} else {
desc = vring->desc;
}
/* Beginning of a packet. Start to Rx/Tx packet header. */
if (vring->pkt_len == 0) {
- mlxbf_tmfifo_rxtx_header(vring, desc, is_rx, &vring_change);
+ mlxbf_tmfifo_rxtx_header(vring, &desc, is_rx, &vring_change);
(*avail)--;
/* Return if new packet is for another ring. */
vring->rem_len -= len;
/* Get the next desc on the chain. */
- if (vring->rem_len > 0 &&
+ if (!IS_VRING_DROP(vring) && vring->rem_len > 0 &&
(virtio16_to_cpu(vdev, desc->flags) & VRING_DESC_F_NEXT)) {
idx = virtio16_to_cpu(vdev, desc->next);
desc = &vr->desc[idx];
goto mlxbf_tmfifo_desc_done;
}
- /* Done and release the pending packet. */
- mlxbf_tmfifo_release_pending_pkt(vring);
+ /* Done and release the packet. */
desc = NULL;
fifo->vring[is_rx] = NULL;
+ if (!IS_VRING_DROP(vring)) {
+ mlxbf_tmfifo_release_pkt(vring);
+ } else {
+ vring->pkt_len = 0;
+ vring->desc_head = NULL;
+ vring->desc = NULL;
+ return false;
+ }
/*
* Make sure the load/store are in order before
/* Release the pending packet. */
if (vring->desc)
- mlxbf_tmfifo_release_pending_pkt(vring);
+ mlxbf_tmfifo_release_pkt(vring);
vq = vring->vq;
if (vq) {
vring->vq = NULL;
},
{
.callback = dmi_matched,
+ .ident = "ASUS ROG FLOW X16",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "GV601V"),
+ },
+ .driver_data = &quirk_asus_tablet_mode,
+ },
+ {
+ .callback = dmi_matched,
.ident = "ASUS VivoBook E410MA",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/slab.h>
/* Wait till scu status is busy */
static inline int busy_loop(struct intel_scu_ipc_dev *scu)
{
- unsigned long end = jiffies + IPC_TIMEOUT;
-
- do {
- u32 status;
-
- status = ipc_read_status(scu);
- if (!(status & IPC_STATUS_BUSY))
- return (status & IPC_STATUS_ERR) ? -EIO : 0;
+ u8 status;
+ int err;
- usleep_range(50, 100);
- } while (time_before(jiffies, end));
+ err = readx_poll_timeout(ipc_read_status, scu, status, !(status & IPC_STATUS_BUSY),
+ 100, jiffies_to_usecs(IPC_TIMEOUT));
+ if (err)
+ return err;
- return -ETIMEDOUT;
+ return (status & IPC_STATUS_ERR) ? -EIO : 0;
}
/* Wait till ipc ioc interrupt is received or timeout in 10 HZ */
{
int status;
- if (!wait_for_completion_timeout(&scu->cmd_complete, IPC_TIMEOUT))
- return -ETIMEDOUT;
+ wait_for_completion_timeout(&scu->cmd_complete, IPC_TIMEOUT);
status = ipc_read_status(scu);
+ if (status & IPC_STATUS_BUSY)
+ return -ETIMEDOUT;
+
if (status & IPC_STATUS_ERR)
return -EIO;
return scu->irq > 0 ? ipc_wait_for_interrupt(scu) : busy_loop(scu);
}
+static struct intel_scu_ipc_dev *intel_scu_ipc_get(struct intel_scu_ipc_dev *scu)
+{
+ u8 status;
+
+ if (!scu)
+ scu = ipcdev;
+ if (!scu)
+ return ERR_PTR(-ENODEV);
+
+ status = ipc_read_status(scu);
+ if (status & IPC_STATUS_BUSY) {
+ dev_dbg(&scu->dev, "device is busy\n");
+ return ERR_PTR(-EBUSY);
+ }
+
+ return scu;
+}
+
/* Read/Write power control(PMIC in Langwell, MSIC in PenWell) registers */
static int pwr_reg_rdwr(struct intel_scu_ipc_dev *scu, u16 *addr, u8 *data,
u32 count, u32 op, u32 id)
memset(cbuf, 0, sizeof(cbuf));
mutex_lock(&ipclock);
- if (!scu)
- scu = ipcdev;
- if (!scu) {
+ scu = intel_scu_ipc_get(scu);
+ if (IS_ERR(scu)) {
mutex_unlock(&ipclock);
- return -ENODEV;
+ return PTR_ERR(scu);
}
for (nc = 0; nc < count; nc++, offset += 2) {
int err;
mutex_lock(&ipclock);
- if (!scu)
- scu = ipcdev;
- if (!scu) {
+ scu = intel_scu_ipc_get(scu);
+ if (IS_ERR(scu)) {
mutex_unlock(&ipclock);
- return -ENODEV;
+ return PTR_ERR(scu);
}
- scu = ipcdev;
+
cmdval = sub << 12 | cmd;
ipc_command(scu, cmdval);
err = intel_scu_ipc_check_status(scu);
return -EINVAL;
mutex_lock(&ipclock);
- if (!scu)
- scu = ipcdev;
- if (!scu) {
+ scu = intel_scu_ipc_get(scu);
+ if (IS_ERR(scu)) {
mutex_unlock(&ipclock);
- return -ENODEV;
+ return PTR_ERR(scu);
}
memcpy(inbuf, in, inlen);
{
tpacpi_disable_brightness_delay();
+ mutex_lock(&hotkey_mutex);
if (hotkey_status_set(true) < 0 ||
hotkey_mask_set(hotkey_acpi_mask) < 0)
pr_err("error while attempting to reset the event firmware interface\n");
+ mutex_unlock(&hotkey_mutex);
tpacpi_send_radiosw_update();
tpacpi_input_send_tabletsw();
config POWER_MLXBF
tristate "Mellanox BlueField power handling driver"
- depends on (GPIO_MLXBF2 && ACPI)
+ depends on (GPIO_MLXBF2 || GPIO_MLXBF3) && ACPI
help
This driver supports reset or low power mode handling for Mellanox BlueField.
-// SPDX-License-Identifier: GPL-2.0-only or BSD-3-Clause
+// SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause
/*
* Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES.
return PTR_ERR(regmap);
dev_set_drvdata(&pdev->dev, regmap);
- switch ((enum vexpress_reset_func)match->data) {
+ switch ((uintptr_t)match->data) {
case FUNC_SHUTDOWN:
vexpress_power_off_device = &pdev->dev;
pm_power_off = vexpress_power_off;
config CHARGER_RT5033
tristate "RT5033 battery charger support"
depends on MFD_RT5033
+ depends on EXTCON || !EXTCON
help
This adds support for battery charger in Richtek RT5033 PMIC.
The device supports pre-charge mode, fast charge mode and
static enum power_supply_property ab8500_btemp_props[] = {
POWER_SUPPLY_PROP_PRESENT,
POWER_SUPPLY_PROP_ONLINE,
- POWER_SUPPLY_PROP_TECHNOLOGY,
POWER_SUPPLY_PROP_TEMP,
};
else
val->intval = 1;
break;
- case POWER_SUPPLY_PROP_TECHNOLOGY:
- if (di->bm->bi)
- val->intval = di->bm->bi->technology;
- else
- val->intval = POWER_SUPPLY_TECHNOLOGY_UNKNOWN;
- break;
case POWER_SUPPLY_PROP_TEMP:
val->intval = ab8500_btemp_get_temp(di);
break;
static const struct power_supply_desc ab8500_btemp_desc = {
.name = "ab8500_btemp",
- .type = POWER_SUPPLY_TYPE_BATTERY,
+ .type = POWER_SUPPLY_TYPE_UNKNOWN,
.properties = ab8500_btemp_props,
.num_properties = ARRAY_SIZE(ab8500_btemp_props),
.get_property = ab8500_btemp_get_property,
static const struct power_supply_desc ab8500_chargalg_desc = {
.name = "ab8500_chargalg",
- .type = POWER_SUPPLY_TYPE_BATTERY,
+ .type = POWER_SUPPLY_TYPE_UNKNOWN,
.properties = ab8500_chargalg_props,
.num_properties = ARRAY_SIZE(ab8500_chargalg_props),
.get_property = ab8500_chargalg_get_property,
if (fl_strobe) {
dev_err(priv->dev, "Flash led is still in strobe mode\n");
- return ret;
+ return -EINVAL;
}
/* cfo off */
if (ret)
return ret;
+ /*
+ * Kernel generates KOBJ_REMOVE uevent in device removal path, after
+ * resources have been freed. Exit early to avoid use-after-free.
+ */
+ if (psy->removing)
+ return 0;
+
prop_buf = (char *)get_zeroed_page(GFP_KERNEL);
if (!prop_buf)
return -ENOMEM;
queue_delayed_work(system_wq, &charger->work, msecs_to_jiffies(8000));
}
+static void rk817_cleanup_node(void *data)
+{
+ struct device_node *node = data;
+
+ of_node_put(node);
+}
+
static int rk817_charger_probe(struct platform_device *pdev)
{
struct rk808 *rk808 = dev_get_drvdata(pdev->dev.parent);
if (!node)
return -ENODEV;
+ ret = devm_add_action_or_reset(&pdev->dev, rk817_cleanup_node, node);
+ if (ret)
+ return ret;
+
charger = devm_kzalloc(&pdev->dev, sizeof(*charger), GFP_KERNEL);
- if (!charger) {
- of_node_put(node);
+ if (!charger)
return -ENOMEM;
- }
charger->rk808 = rk808;
MODULE_AUTHOR("Maya Matuszczyk <maccraft123mc@gmail.com>");
MODULE_AUTHOR("Chris Morgan <macromorgan@hotmail.com>");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:rk817-charger");
reinit_completion(&data->aicl_done);
ret = wait_for_completion_timeout(&data->aicl_done, msecs_to_jiffies(3500));
- if (ret)
- return ret;
+ if (ret == 0)
+ return -ETIMEDOUT;
ret = rt9467_get_value_from_ranges(data, F_IAICR, RT9467_RANGE_IAICR, &aicr_get);
if (ret) {
case POWER_SUPPLY_PROP_USB_TYPE:
return ucs1002_get_usb_type(info, val);
case POWER_SUPPLY_PROP_HEALTH:
- return val->intval = info->health;
+ val->intval = info->health;
+ return 0;
case POWER_SUPPLY_PROP_PRESENT:
val->intval = info->present;
return 0;
sel += rdev->desc->linear_ranges[i].min_sel;
range = rdev->desc->linear_range_selectors_bitfield[i];
- range <<= ffs(rdev->desc->vsel_mask) - 1;
+ range <<= ffs(rdev->desc->vsel_range_mask) - 1;
if (rdev->desc->vsel_reg == rdev->desc->vsel_range_reg) {
ret = regmap_update_bits(rdev->regmap,
return -EEXIST;
}
+ err = -EINVAL;
+ if (!sk_is_tcp(sock->sk))
+ goto free_socket;
+
err = iscsi_conn_bind(cls_session, cls_conn, is_leading);
if (err)
goto free_socket;
phba->hba_debugfs_root,
phba,
&lpfc_debugfs_op_multixripools);
- if (!phba->debug_multixri_pools) {
+ if (IS_ERR(phba->debug_multixri_pools)) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
"0527 Cannot create debugfs multixripools\n");
goto debug_failed;
debugfs_create_file(name, S_IFREG | 0644,
phba->hba_debugfs_root,
phba, &lpfc_cgn_buffer_op);
- if (!phba->debug_cgn_buffer) {
+ if (IS_ERR(phba->debug_cgn_buffer)) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
"6527 Cannot create debugfs "
"cgn_buffer\n");
debugfs_create_file(name, S_IFREG | 0644,
phba->hba_debugfs_root,
phba, &lpfc_rx_monitor_op);
- if (!phba->debug_rx_monitor) {
+ if (IS_ERR(phba->debug_rx_monitor)) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
"6528 Cannot create debugfs "
"rx_monitor\n");
debugfs_create_file(name, 0644,
phba->hba_debugfs_root,
phba, &lpfc_debugfs_ras_log);
- if (!phba->debug_ras_log) {
+ if (IS_ERR(phba->debug_ras_log)) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
"6148 Cannot create debugfs"
" ras_log\n");
debugfs_create_file(name, S_IFREG | 0644,
phba->hba_debugfs_root,
phba, &lpfc_debugfs_op_lockstat);
- if (!phba->debug_lockstat) {
+ if (IS_ERR(phba->debug_lockstat)) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
"4610 Can't create debugfs lockstat\n");
goto debug_failed;
debugfs_create_file(name, 0644,
vport->vport_debugfs_root,
vport, &lpfc_debugfs_op_scsistat);
- if (!vport->debug_scsistat) {
+ if (IS_ERR(vport->debug_scsistat)) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
"4611 Cannot create debugfs scsistat\n");
goto debug_failed;
debugfs_create_file(name, 0644,
vport->vport_debugfs_root,
vport, &lpfc_debugfs_op_ioktime);
- if (!vport->debug_ioktime) {
+ if (IS_ERR(vport->debug_ioktime)) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
"0815 Cannot create debugfs ioktime\n");
goto debug_failed;
/* Only 1 thread can drop the initial node reference. If
* another thread has set NLP_DROPPED, this thread is done.
*/
- if (!(ndlp->nlp_flag & NLP_DROPPED)) {
+ if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD) &&
+ !(ndlp->nlp_flag & NLP_DROPPED)) {
ndlp->nlp_flag |= NLP_DROPPED;
spin_unlock_irqrestore(&ndlp->lock, iflags);
lpfc_nlp_put(ndlp);
- spin_lock_irqsave(&ndlp->lock, iflags);
+ return;
}
spin_unlock_irqrestore(&ndlp->lock, iflags);
spin_unlock_irq(&ndlp->lock);
/* On a devloss timeout event, one more put is executed provided the
- * NVME and SCSI rport unregister requests are complete. If the vport
- * is unloading, this extra put is executed by lpfc_drop_node.
+ * NVME and SCSI rport unregister requests are complete.
*/
if (!(ndlp->fc4_xpt_flags & fc4_xpt_flags))
lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
* nvme_transport perspective. Loss of an rport just means IO cannot
* be sent and recovery is completely up to the initator.
* For now, the driver just unbinds the DID and port_role so that
- * no further IO can be issued. Changes are planned for later.
- *
- * Notes - the ndlp reference count is not decremented here since
- * since there is no nvme_transport api for devloss. Node ref count
- * is only adjusted in driver unload.
+ * no further IO can be issued.
*/
void
lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
"6167 NVME unregister failed %d "
"port_state x%x\n",
ret, remoteport->port_state);
+
+ if (vport->load_flag & FC_UNLOADING) {
+ /* Only 1 thread can drop the initial node
+ * reference. Check if another thread has set
+ * NLP_DROPPED.
+ */
+ spin_lock_irq(&ndlp->lock);
+ if (!(ndlp->nlp_flag & NLP_DROPPED)) {
+ ndlp->nlp_flag |= NLP_DROPPED;
+ spin_unlock_irq(&ndlp->lock);
+ lpfc_nlp_put(ndlp);
+ return;
+ }
+ spin_unlock_irq(&ndlp->lock);
+ }
}
}
return;
u32 support_morethan256jbod; /* FW support for more than 256 PD/JBOD */
bool use_seqnum_jbod_fp; /* Added for PD sequence */
bool smp_affinity_enable;
- spinlock_t crashdump_lock;
+ struct mutex crashdump_lock;
struct megasas_register_set __iomem *reg_set;
u32 __iomem *reply_post_host_index_addr[MR_MAX_MSIX_REG_ARRAY];
struct megasas_instance *instance =
(struct megasas_instance *) shost->hostdata;
int val = 0;
- unsigned long flags;
if (kstrtoint(buf, 0, &val) != 0)
return -EINVAL;
- spin_lock_irqsave(&instance->crashdump_lock, flags);
+ mutex_lock(&instance->crashdump_lock);
instance->fw_crash_buffer_offset = val;
- spin_unlock_irqrestore(&instance->crashdump_lock, flags);
+ mutex_unlock(&instance->crashdump_lock);
return strlen(buf);
}
unsigned long dmachunk = CRASH_DMA_BUF_SIZE;
unsigned long chunk_left_bytes;
unsigned long src_addr;
- unsigned long flags;
u32 buff_offset;
- spin_lock_irqsave(&instance->crashdump_lock, flags);
+ mutex_lock(&instance->crashdump_lock);
buff_offset = instance->fw_crash_buffer_offset;
if (!instance->crash_dump_buf ||
!((instance->fw_crash_state == AVAILABLE) ||
(instance->fw_crash_state == COPYING))) {
dev_err(&instance->pdev->dev,
"Firmware crash dump is not available\n");
- spin_unlock_irqrestore(&instance->crashdump_lock, flags);
+ mutex_unlock(&instance->crashdump_lock);
return -EINVAL;
}
if (buff_offset > (instance->fw_crash_buffer_size * dmachunk)) {
dev_err(&instance->pdev->dev,
"Firmware crash dump offset is out of range\n");
- spin_unlock_irqrestore(&instance->crashdump_lock, flags);
+ mutex_unlock(&instance->crashdump_lock);
return 0;
}
src_addr = (unsigned long)instance->crash_buf[buff_offset / dmachunk] +
(buff_offset % dmachunk);
memcpy(buf, (void *)src_addr, size);
- spin_unlock_irqrestore(&instance->crashdump_lock, flags);
+ mutex_unlock(&instance->crashdump_lock);
return size;
}
struct megasas_instance *instance =
(struct megasas_instance *) shost->hostdata;
int val = 0;
- unsigned long flags;
if (kstrtoint(buf, 0, &val) != 0)
return -EINVAL;
instance->fw_crash_state = val;
if ((val == COPIED) || (val == COPY_ERROR)) {
- spin_lock_irqsave(&instance->crashdump_lock, flags);
+ mutex_lock(&instance->crashdump_lock);
megasas_free_host_crash_buffer(instance);
- spin_unlock_irqrestore(&instance->crashdump_lock, flags);
+ mutex_unlock(&instance->crashdump_lock);
if (val == COPY_ERROR)
dev_info(&instance->pdev->dev, "application failed to "
"copy Firmware crash dump\n");
init_waitqueue_head(&instance->int_cmd_wait_q);
init_waitqueue_head(&instance->abort_cmd_wait_q);
- spin_lock_init(&instance->crashdump_lock);
+ mutex_init(&instance->crashdump_lock);
spin_lock_init(&instance->mfi_pool_lock);
spin_lock_init(&instance->hba_lock);
spin_lock_init(&instance->stream_lock);
payload.sas_identify.dev_type = SAS_END_DEVICE;
payload.sas_identify.initiator_bits = SAS_PROTOCOL_ALL;
memcpy(payload.sas_identify.sas_addr,
- pm8001_ha->sas_addr, SAS_ADDR_SIZE);
+ &pm8001_ha->phy[phy_id].dev_sas_addr, SAS_ADDR_SIZE);
payload.sas_identify.phy_id = phy_id;
return pm8001_mpi_build_cmd(pm8001_ha, 0, opcode, &payload,
return ret;
}
-static u32 pm8001_setup_irq(struct pm8001_hba_info *pm8001_ha);
static u32 pm8001_request_irq(struct pm8001_hba_info *pm8001_ha);
/**
pm8001_dbg(pm8001_ha, INIT, "pm8001_alloc: PHY:%x\n",
pm8001_ha->chip->n_phy);
- /* Setup Interrupt */
- rc = pm8001_setup_irq(pm8001_ha);
- if (rc) {
- pm8001_dbg(pm8001_ha, FAIL,
- "pm8001_setup_irq failed [ret: %d]\n", rc);
- goto err_out;
- }
/* Request Interrupt */
rc = pm8001_request_irq(pm8001_ha);
if (rc)
}
#endif
-static u32 pm8001_setup_irq(struct pm8001_hba_info *pm8001_ha)
-{
- struct pci_dev *pdev;
-
- pdev = pm8001_ha->pdev;
-
-#ifdef PM8001_USE_MSIX
- if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
- return pm8001_setup_msix(pm8001_ha);
- pm8001_dbg(pm8001_ha, INIT, "MSIX not supported!!!\n");
-#endif
- return 0;
-}
-
/**
* pm8001_request_irq - register interrupt
* @pm8001_ha: our ha struct.
*/
static u32 pm8001_request_irq(struct pm8001_hba_info *pm8001_ha)
{
- struct pci_dev *pdev;
+ struct pci_dev *pdev = pm8001_ha->pdev;
+#ifdef PM8001_USE_MSIX
int rc;
- pdev = pm8001_ha->pdev;
+ if (pci_find_capability(pdev, PCI_CAP_ID_MSIX)) {
+ rc = pm8001_setup_msix(pm8001_ha);
+ if (rc) {
+ pm8001_dbg(pm8001_ha, FAIL,
+ "pm8001_setup_irq failed [ret: %d]\n", rc);
+ return rc;
+ }
-#ifdef PM8001_USE_MSIX
- if (pdev->msix_cap && pci_msi_enabled())
- return pm8001_request_msix(pm8001_ha);
- else {
- pm8001_dbg(pm8001_ha, INIT, "MSIX not supported!!!\n");
- goto intx;
+ if (pdev->msix_cap && pci_msi_enabled())
+ return pm8001_request_msix(pm8001_ha);
}
+
+ pm8001_dbg(pm8001_ha, INIT, "MSIX not supported!!!\n");
#endif
-intx:
/* initialize the INT-X interrupt */
pm8001_ha->irq_vector[0].irq_id = 0;
pm8001_ha->irq_vector[0].drv_inst = pm8001_ha;
- rc = request_irq(pdev->irq, pm8001_interrupt_handler_intx, IRQF_SHARED,
- pm8001_ha->name, SHOST_TO_SAS_HA(pm8001_ha->shost));
- return rc;
+
+ return request_irq(pdev->irq, pm8001_interrupt_handler_intx,
+ IRQF_SHARED, pm8001_ha->name,
+ SHOST_TO_SAS_HA(pm8001_ha->shost));
}
/**
(struct set_ctrl_cfg_resp *)(piomb + 4);
u32 status = le32_to_cpu(pPayload->status);
u32 err_qlfr_pgcd = le32_to_cpu(pPayload->err_qlfr_pgcd);
+ u32 tag = le32_to_cpu(pPayload->tag);
pm8001_dbg(pm8001_ha, MSG,
"SET CONTROLLER RESP: status 0x%x qlfr_pgcd 0x%x\n",
status, err_qlfr_pgcd);
+ pm8001_tag_free(pm8001_ha, tag);
return 0;
}
payload.sas_identify.dev_type = SAS_END_DEVICE;
payload.sas_identify.initiator_bits = SAS_PROTOCOL_ALL;
memcpy(payload.sas_identify.sas_addr,
- &pm8001_ha->sas_addr, SAS_ADDR_SIZE);
+ &pm8001_ha->phy[phy_id].dev_sas_addr, SAS_ADDR_SIZE);
payload.sas_identify.phy_id = phy_id;
return pm8001_mpi_build_cmd(pm8001_ha, 0, opcode, &payload,
case PPA_EPP_8:
epp_reset(ppb);
w_ctr(ppb, 0x4);
- if (dev->mode == PPA_EPP_32 && !(((long) buffer | len) & 0x01))
+ if (dev->mode == PPA_EPP_32 && !(((long) buffer | len) & 0x03))
outsl(ppb + 4, buffer, len >> 2);
- else if (dev->mode == PPA_EPP_16 && !(((long) buffer | len) & 0x03))
+ else if (dev->mode == PPA_EPP_16 && !(((long) buffer | len) & 0x01))
outsw(ppb + 4, buffer, len >> 1);
else
outsb(ppb + 4, buffer, len);
goto drop_rdata_kref;
}
+ spin_lock_irqsave(&fcport->rport_lock, flags);
if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) ||
test_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags) ||
test_bit(QEDF_CMD_IN_ABORT, &io_req->flags)) {
"io_req xid=0x%x sc_cmd=%p already in cleanup or abort processing or already completed.\n",
io_req->xid, io_req->sc_cmd);
rc = 1;
+ spin_unlock_irqrestore(&fcport->rport_lock, flags);
goto drop_rdata_kref;
}
+ /* Set the command type to abort */
+ io_req->cmd_type = QEDF_ABTS;
+ spin_unlock_irqrestore(&fcport->rport_lock, flags);
+
kref_get(&io_req->refcount);
xid = io_req->xid;
qedf->control_requests++;
qedf->packet_aborts++;
- /* Set the command type to abort */
- io_req->cmd_type = QEDF_ABTS;
io_req->return_scsi_cmd_on_abts = return_scsi_cmd_on_abts;
set_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
refcount, fcport, fcport->rdata->ids.port_id);
/* Cleanup cmds re-use the same TID as the original I/O */
+ spin_lock_irqsave(&fcport->rport_lock, flags);
io_req->cmd_type = QEDF_CLEANUP;
+ spin_unlock_irqrestore(&fcport->rport_lock, flags);
io_req->return_scsi_cmd_on_abts = return_scsi_cmd_on_abts;
init_completion(&io_req->cleanup_done);
struct qedf_ioreq *io_req;
struct qedf_rport *fcport;
u32 comp_type;
+ u8 io_comp_type;
+ unsigned long flags;
comp_type = (cqe->cqe_data >> FCOE_CQE_CQE_TYPE_SHIFT) &
FCOE_CQE_CQE_TYPE_MASK;
return;
}
+ spin_lock_irqsave(&fcport->rport_lock, flags);
+ io_comp_type = io_req->cmd_type;
+ spin_unlock_irqrestore(&fcport->rport_lock, flags);
switch (comp_type) {
case FCOE_GOOD_COMPLETION_CQE_TYPE:
atomic_inc(&fcport->free_sqes);
- switch (io_req->cmd_type) {
+ switch (io_comp_type) {
case QEDF_SCSI_CMD:
qedf_scsi_completion(qedf, cqe, io_req);
break;
sprintf(wwn, "pn-%016llx", wwn_to_u64(fp->port_name));
fp->dfs_rport_dir = debugfs_create_dir(wwn, vha->dfs_rport_root);
- if (!fp->dfs_rport_dir)
+ if (IS_ERR(fp->dfs_rport_dir))
return;
if (NVME_TARGET(vha->hw, fp))
debugfs_create_file("dev_loss_tmo", 0600, fp->dfs_rport_dir,
if (IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha)) {
ha->tgt.dfs_naqp = debugfs_create_file("naqp",
0400, ha->dfs_dir, vha, &dfs_naqp_ops);
- if (!ha->tgt.dfs_naqp) {
+ if (IS_ERR(ha->tgt.dfs_naqp)) {
ql_log(ql_log_warn, vha, 0xd011,
"Unable to create debugFS naqp node.\n");
goto out;
}
}
vha->dfs_rport_root = debugfs_create_dir("rports", ha->dfs_dir);
- if (!vha->dfs_rport_root) {
+ if (IS_ERR(vha->dfs_rport_root)) {
ql_log(ql_log_warn, vha, 0xd012,
"Unable to create debugFS rports node.\n");
goto out;
static inline struct qla_qpair *
qla_mapq_nvme_select_qpair(struct qla_hw_data *ha, struct qla_qpair *qpair)
{
- int cpuid = smp_processor_id();
+ int cpuid = raw_smp_processor_id();
if (qpair->cpuid != cpuid &&
ha->qp_cpu_map[cpuid]) {
if (!ha->flags.fw_started)
return;
- if (rsp->qpair->cpuid != smp_processor_id() || !rsp->qpair->rcv_intr) {
+ if (rsp->qpair->cpuid != raw_smp_processor_id() || !rsp->qpair->rcv_intr) {
rsp->qpair->rcv_intr = 1;
if (!rsp->qpair->cpu_mapped)
}
ha = qpair->hw;
- queue_work_on(smp_processor_id(), ha->wq, &qpair->q_work);
+ queue_work(ha->wq, &qpair->q_work);
return IRQ_HANDLED;
}
wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
- queue_work_on(smp_processor_id(), ha->wq, &qpair->q_work);
+ queue_work(ha->wq, &qpair->q_work);
return IRQ_HANDLED;
}
nvme->u.nvme.dl = 0;
nvme->u.nvme.timeout_sec = 0;
nvme->u.nvme.cmd_dma = fd_resp->rspdma;
- nvme->u.nvme.cmd_len = fd_resp->rsplen;
+ nvme->u.nvme.cmd_len = cpu_to_le32(fd_resp->rsplen);
nvme->u.nvme.rsp_len = 0;
nvme->u.nvme.rsp_dma = 0;
nvme->u.nvme.exchange_address = uctx->exchange_address;
nvme->u.nvme.nport_handle = uctx->nport_handle;
nvme->u.nvme.ox_id = uctx->ox_id;
dma_sync_single_for_device(&ha->pdev->dev, nvme->u.nvme.cmd_dma,
- le32_to_cpu(fd_resp->rsplen), DMA_TO_DEVICE);
+ fd_resp->rsplen, DMA_TO_DEVICE);
ql_dbg(ql_dbg_unsol, vha, 0x2122,
"Unsol lsreq portid=%06x %8phC exchange_address 0x%x ox_id 0x%x hdl 0x%x\n",
nvme->u.nvme.desc = fd;
nvme->u.nvme.dir = 0;
nvme->u.nvme.dl = 0;
- nvme->u.nvme.cmd_len = fd->rqstlen;
- nvme->u.nvme.rsp_len = fd->rsplen;
+ nvme->u.nvme.cmd_len = cpu_to_le32(fd->rqstlen);
+ nvme->u.nvme.rsp_len = cpu_to_le32(fd->rsplen);
nvme->u.nvme.rsp_dma = fd->rspdma;
nvme->u.nvme.timeout_sec = fd->timeout;
nvme->u.nvme.cmd_dma = fd->rqstdma;
dma_sync_single_for_device(&ha->pdev->dev, nvme->u.nvme.cmd_dma,
- le32_to_cpu(fd->rqstlen), DMA_TO_DEVICE);
+ fd->rqstlen, DMA_TO_DEVICE);
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
queue_work_on(cmd->se_cmd.cpuid, qla_tgt_wq, &cmd->work);
} else if (ha->msix_count) {
if (cmd->atio.u.isp24.fcp_cmnd.rddata)
- queue_work_on(smp_processor_id(), qla_tgt_wq,
- &cmd->work);
+ queue_work(qla_tgt_wq, &cmd->work);
else
queue_work_on(cmd->se_cmd.cpuid, qla_tgt_wq,
&cmd->work);
cmd->trc_flags |= TRC_CMD_DONE;
INIT_WORK(&cmd->work, tcm_qla2xxx_complete_free);
- queue_work_on(smp_processor_id(), tcm_qla2xxx_free_wq, &cmd->work);
+ queue_work(tcm_qla2xxx_free_wq, &cmd->work);
}
/*
cmd->trc_flags |= TRC_DATA_IN;
cmd->cmd_in_wq = 1;
INIT_WORK(&cmd->work, tcm_qla2xxx_handle_data_work);
- queue_work_on(smp_processor_id(), tcm_qla2xxx_free_wq, &cmd->work);
+ queue_work(tcm_qla2xxx_free_wq, &cmd->work);
}
static int tcm_qla2xxx_chk_dif_tags(uint32_t tag)
bool cdl_supported;
unsigned char *buf;
+ /*
+ * Support for CDL was defined in SPC-5. Ignore devices reporting an
+ * lower SPC version. This also avoids problems with old drives choking
+ * on MAINTENANCE_IN / MI_REPORT_SUPPORTED_OPERATION_CODES with a
+ * service action specified, as done in scsi_cdl_check_cmd().
+ */
+ if (sdev->scsi_level < SCSI_SPC_5) {
+ sdev->cdl_supported = 0;
+ return;
+ }
+
buf = kmalloc(SCSI_CDL_CHECK_BUF_LEN, GFP_KERNEL);
if (!buf) {
sdev->cdl_supported = 0;
* device is attached at LUN 0 (SCSI_SCAN_TARGET_PRESENT) so
* non-zero LUNs can be scanned.
*/
- sdev->scsi_level = inq_result[2] & 0x07;
+ sdev->scsi_level = inq_result[2] & 0x0f;
if (sdev->scsi_level >= 2 ||
(sdev->scsi_level == 1 && (inq_result[3] & 0x0f) == 1))
sdev->scsi_level++;
}
EXPORT_SYMBOL(scsi_add_device);
-void scsi_rescan_device(struct scsi_device *sdev)
+int scsi_rescan_device(struct scsi_device *sdev)
{
struct device *dev = &sdev->sdev_gendev;
+ int ret = 0;
device_lock(dev);
+ /*
+ * Bail out if the device is not running. Otherwise, the rescan may
+ * block waiting for commands to be executed, with us holding the
+ * device lock. This can result in a potential deadlock in the power
+ * management core code when system resume is on-going.
+ */
+ if (sdev->sdev_state != SDEV_RUNNING) {
+ ret = -EWOULDBLOCK;
+ goto unlock;
+ }
+
scsi_attach_vpd(sdev);
scsi_cdl_check(sdev);
drv->rescan(dev);
module_put(dev->driver->owner);
}
+
+unlock:
device_unlock(dev);
+
+ return ret;
}
EXPORT_SYMBOL(scsi_rescan_device);
}
static ssize_t
-manage_start_stop_show(struct device *dev, struct device_attribute *attr,
- char *buf)
+manage_start_stop_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct scsi_disk *sdkp = to_scsi_disk(dev);
struct scsi_device *sdp = sdkp->device;
- return sprintf(buf, "%u\n", sdp->manage_start_stop);
+ return sysfs_emit(buf, "%u\n",
+ sdp->manage_system_start_stop &&
+ sdp->manage_runtime_start_stop);
}
+static DEVICE_ATTR_RO(manage_start_stop);
static ssize_t
-manage_start_stop_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+manage_system_start_stop_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scsi_disk *sdkp = to_scsi_disk(dev);
+ struct scsi_device *sdp = sdkp->device;
+
+ return sysfs_emit(buf, "%u\n", sdp->manage_system_start_stop);
+}
+
+static ssize_t
+manage_system_start_stop_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct scsi_disk *sdkp = to_scsi_disk(dev);
struct scsi_device *sdp = sdkp->device;
if (kstrtobool(buf, &v))
return -EINVAL;
- sdp->manage_start_stop = v;
+ sdp->manage_system_start_stop = v;
return count;
}
-static DEVICE_ATTR_RW(manage_start_stop);
+static DEVICE_ATTR_RW(manage_system_start_stop);
+
+static ssize_t
+manage_runtime_start_stop_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scsi_disk *sdkp = to_scsi_disk(dev);
+ struct scsi_device *sdp = sdkp->device;
+
+ return sysfs_emit(buf, "%u\n", sdp->manage_runtime_start_stop);
+}
+
+static ssize_t
+manage_runtime_start_stop_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct scsi_disk *sdkp = to_scsi_disk(dev);
+ struct scsi_device *sdp = sdkp->device;
+ bool v;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ if (kstrtobool(buf, &v))
+ return -EINVAL;
+
+ sdp->manage_runtime_start_stop = v;
+
+ return count;
+}
+static DEVICE_ATTR_RW(manage_runtime_start_stop);
static ssize_t
allow_restart_show(struct device *dev, struct device_attribute *attr, char *buf)
&dev_attr_FUA.attr,
&dev_attr_allow_restart.attr,
&dev_attr_manage_start_stop.attr,
+ &dev_attr_manage_system_start_stop.attr,
+ &dev_attr_manage_runtime_start_stop.attr,
&dev_attr_protection_type.attr,
&dev_attr_protection_mode.attr,
&dev_attr_app_tag_own.attr,
device_del(&sdkp->disk_dev);
del_gendisk(sdkp->disk);
- sd_shutdown(dev);
+ if (!sdkp->suspended)
+ sd_shutdown(dev);
put_disk(sdkp->disk);
return 0;
sd_sync_cache(sdkp, NULL);
}
- if (system_state != SYSTEM_RESTART && sdkp->device->manage_start_stop) {
+ if (system_state != SYSTEM_RESTART &&
+ sdkp->device->manage_system_start_stop) {
sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n");
sd_start_stop_device(sdkp, 0);
}
}
-static int sd_suspend_common(struct device *dev, bool ignore_stop_errors)
+static inline bool sd_do_start_stop(struct scsi_device *sdev, bool runtime)
+{
+ return (sdev->manage_system_start_stop && !runtime) ||
+ (sdev->manage_runtime_start_stop && runtime);
+}
+
+static int sd_suspend_common(struct device *dev, bool runtime)
{
struct scsi_disk *sdkp = dev_get_drvdata(dev);
struct scsi_sense_hdr sshdr;
}
}
- if (sdkp->device->manage_start_stop) {
+ if (sd_do_start_stop(sdkp->device, runtime)) {
if (!sdkp->device->silence_suspend)
sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n");
/* an error is not worth aborting a system sleep */
ret = sd_start_stop_device(sdkp, 0);
- if (ignore_stop_errors)
+ if (!runtime)
ret = 0;
}
+ if (!ret)
+ sdkp->suspended = true;
+
return ret;
}
if (pm_runtime_suspended(dev))
return 0;
- return sd_suspend_common(dev, true);
+ return sd_suspend_common(dev, false);
}
static int sd_suspend_runtime(struct device *dev)
{
- return sd_suspend_common(dev, false);
+ return sd_suspend_common(dev, true);
}
-static int sd_resume(struct device *dev)
+static int sd_resume(struct device *dev, bool runtime)
{
struct scsi_disk *sdkp = dev_get_drvdata(dev);
int ret = 0;
if (!sdkp) /* E.g.: runtime resume at the start of sd_probe() */
return 0;
- if (!sdkp->device->manage_start_stop)
+ if (!sd_do_start_stop(sdkp->device, runtime)) {
+ sdkp->suspended = false;
return 0;
+ }
if (!sdkp->device->no_start_on_resume) {
sd_printk(KERN_NOTICE, sdkp, "Starting disk\n");
ret = sd_start_stop_device(sdkp, 1);
}
- if (!ret)
+ if (!ret) {
opal_unlock_from_suspend(sdkp->opal_dev);
+ sdkp->suspended = false;
+ }
+
return ret;
}
if (pm_runtime_suspended(dev))
return 0;
- return sd_resume(dev);
+ return sd_resume(dev, false);
}
static int sd_resume_runtime(struct device *dev)
"Failed to clear sense data\n");
}
- return sd_resume(dev);
+ return sd_resume(dev, true);
}
static const struct dev_pm_ops sd_pm_ops = {
u8 provisioning_mode;
u8 zeroing_mode;
u8 nr_actuators; /* Number of actuators */
+ bool suspended; /* Disk is suspended (stopped) */
unsigned ATO : 1; /* state of disk ATO bit */
unsigned cache_override : 1; /* temp override of WCE,RCD */
unsigned WCE : 1; /* state of disk WCE bit */
{
void __iomem *ocotp_base;
struct device_node *np;
+ struct clk *clk;
u32 offset = of_machine_is_compatible("fsl,imx8mp") ?
IMX8MP_OCOTP_UID_OFFSET : 0;
ocotp_base = of_iomap(np, 0);
WARN_ON(!ocotp_base);
+ clk = of_clk_get_by_name(np, NULL);
+ if (IS_ERR(clk)) {
+ WARN_ON(IS_ERR(clk));
+ return;
+ }
+
+ clk_prepare_enable(clk);
soc_uid = readl_relaxed(ocotp_base + OCOTP_UID_HIGH + offset);
soc_uid <<= 32;
soc_uid |= readl_relaxed(ocotp_base + OCOTP_UID_LOW + offset);
+ clk_disable_unprepare(clk);
+ clk_put(clk);
iounmap(ocotp_base);
of_node_put(np);
}
config LOONGSON2_PM
bool "Loongson-2 SoC Power Management Controller Driver"
depends on LOONGARCH && OF
+ depends on INPUT=y
help
The Loongson-2's power management controller was ACPI, supports ACPI
S2Idle (Suspend To Idle), ACPI S3 (Suspend To RAM), ACPI S4 (Suspend To
if (matches->svr == (svr & matches->mask))
return matches;
matches++;
- };
+ }
return NULL;
}
{
struct device_node *root, *np = pdev->dev.of_node;
struct device *dev = &pdev->dev;
- struct resource *res;
const struct loongson2_soc_die_attr *soc_die;
const char *machine;
u32 svr;
guts->little_endian = of_property_read_bool(np, "little-endian");
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- guts->regs = ioremap(res->start, res->end - res->start + 1);
+ guts->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(guts->regs))
return PTR_ERR(guts->regs);
#include <linux/input.h>
#include <linux/suspend.h>
#include <linux/interrupt.h>
+#include <linux/of_platform.h>
#include <linux/pm_wakeirq.h>
#include <linux/platform_device.h>
#include <asm/bootinfo.h>
if (loongson_sysconf.suspend_addr)
suspend_set_ops(&loongson2_suspend_ops);
+ /* Populate children */
+ retval = devm_of_platform_populate(dev);
+ if (retval)
+ dev_err(dev, "Error populating children, reboot and poweroff might not work properly\n");
+
return 0;
}
static const struct of_device_id loongson2_pm_match[] = {
{ .compatible = "loongson,ls2k0500-pmc", },
- { .compatible = "loongson,ls2k1000-pmc", },
{},
};
ret = devm_spi_register_controller(priv->dev, priv->ctlr);
if (ret) {
- pm_runtime_disable(priv->dev);
dev_err(priv->dev, "Failed to register SPI controller: %d\n", ret);
}
return ret;
}
- return write_len;
+ return 0;
}
static int do_gxp_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op *op)
if (spi_imx->count >= 512)
ctrl |= 0xFFF << MX51_ECSPI_CTRL_BL_OFFSET;
else
- ctrl |= (spi_imx->count*8 - 1)
+ ctrl |= (spi_imx->count * spi_imx->bits_per_word - 1)
<< MX51_ECSPI_CTRL_BL_OFFSET;
}
{ PCI_VDEVICE(INTEL, 0x4da4), (unsigned long)&bxt_info },
{ PCI_VDEVICE(INTEL, 0x51a4), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0x54a4), (unsigned long)&cnl_info },
+ { PCI_VDEVICE(INTEL, 0x5794), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0x7a24), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0x7aa4), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0x7e23), (unsigned long)&cnl_info },
fspi_writel(f, FSPI_AHBCR_PREF_EN | FSPI_AHBCR_RDADDROPT,
base + FSPI_AHBCR);
+ /* Reset the FLSHxCR1 registers. */
+ reg = FSPI_FLSHXCR1_TCSH(0x3) | FSPI_FLSHXCR1_TCSS(0x3);
+ fspi_writel(f, reg, base + FSPI_FLSHA1CR1);
+ fspi_writel(f, reg, base + FSPI_FLSHA2CR1);
+ fspi_writel(f, reg, base + FSPI_FLSHB1CR1);
+ fspi_writel(f, reg, base + FSPI_FLSHB2CR1);
+
/* AHB Read - Set lut sequence ID for all CS. */
fspi_writel(f, SEQID_LUT, base + FSPI_FLSHA1CR2);
fspi_writel(f, SEQID_LUT, base + FSPI_FLSHA2CR2);
* @fifo_size: size of the embedded fifo in bytes
* @cur_midi: master inter-data idleness in ns
* @cur_speed: speed configured in Hz
+ * @cur_half_period: time of a half bit in us
* @cur_bpw: number of bits in a single SPI data frame
* @cur_fthlv: fifo threshold level (data frames in a single data packet)
* @cur_comm: SPI communication mode
unsigned int cur_midi;
unsigned int cur_speed;
+ unsigned int cur_half_period;
unsigned int cur_bpw;
unsigned int cur_fthlv;
unsigned int cur_comm;
spi->cur_speed = spi->clk_rate / (1 << mbrdiv);
+ spi->cur_half_period = DIV_ROUND_CLOSEST(USEC_PER_SEC, 2 * spi->cur_speed);
+
return mbrdiv - 1;
}
return;
}
+ /* Add a delay to make sure that transmission is ended. */
+ if (spi->cur_half_period)
+ udelay(spi->cur_half_period);
+
if (spi->cur_usedma && spi->dma_tx)
dmaengine_terminate_async(spi->dma_tx);
if (spi->cur_usedma && spi->dma_rx)
return 0;
clk_dis_all:
- pm_runtime_put_sync(&pdev->dev);
- pm_runtime_set_suspended(&pdev->dev);
pm_runtime_disable(&pdev->dev);
+ pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
clk_disable_unprepare(xqspi->refclk);
clk_dis_pclk:
clk_disable_unprepare(xqspi->pclk);
{
struct zynqmp_qspi *xqspi = platform_get_drvdata(pdev);
+ pm_runtime_get_sync(&pdev->dev);
+
zynqmp_gqspi_write(xqspi, GQSPI_EN_OFST, 0x0);
+
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
clk_disable_unprepare(xqspi->refclk);
clk_disable_unprepare(xqspi->pclk);
- pm_runtime_set_suspended(&pdev->dev);
- pm_runtime_disable(&pdev->dev);
}
MODULE_DEVICE_TABLE(of, zynqmp_qspi_of_match);
/* +2 to allow for a trailing (stripped) '\n' and null-terminator */
unsigned char buf[INQUIRY_VENDOR_LEN + 2];
char *stripped = NULL;
- size_t len;
+ ssize_t len;
ssize_t ret;
- len = strlcpy(buf, page, sizeof(buf));
- if (len < sizeof(buf)) {
+ len = strscpy(buf, page, sizeof(buf));
+ if (len > 0) {
/* Strip any newline added from userspace. */
stripped = strstrip(buf);
len = strlen(stripped);
}
- if (len > INQUIRY_VENDOR_LEN) {
+ if (len < 0 || len > INQUIRY_VENDOR_LEN) {
pr_err("Emulated T10 Vendor Identification exceeds"
" INQUIRY_VENDOR_LEN: " __stringify(INQUIRY_VENDOR_LEN)
"\n");
/* +2 to allow for a trailing (stripped) '\n' and null-terminator */
unsigned char buf[INQUIRY_MODEL_LEN + 2];
char *stripped = NULL;
- size_t len;
+ ssize_t len;
ssize_t ret;
- len = strlcpy(buf, page, sizeof(buf));
- if (len < sizeof(buf)) {
+ len = strscpy(buf, page, sizeof(buf));
+ if (len > 0) {
/* Strip any newline added from userspace. */
stripped = strstrip(buf);
len = strlen(stripped);
}
- if (len > INQUIRY_MODEL_LEN) {
+ if (len < 0 || len > INQUIRY_MODEL_LEN) {
pr_err("Emulated T10 Vendor exceeds INQUIRY_MODEL_LEN: "
__stringify(INQUIRY_MODEL_LEN)
"\n");
/* +2 to allow for a trailing (stripped) '\n' and null-terminator */
unsigned char buf[INQUIRY_REVISION_LEN + 2];
char *stripped = NULL;
- size_t len;
+ ssize_t len;
ssize_t ret;
- len = strlcpy(buf, page, sizeof(buf));
- if (len < sizeof(buf)) {
+ len = strscpy(buf, page, sizeof(buf));
+ if (len > 0) {
/* Strip any newline added from userspace. */
stripped = strstrip(buf);
len = strlen(stripped);
}
- if (len > INQUIRY_REVISION_LEN) {
+ if (len < 0 || len > INQUIRY_REVISION_LEN) {
pr_err("Emulated T10 Revision exceeds INQUIRY_REVISION_LEN: "
__stringify(INQUIRY_REVISION_LEN)
"\n");
percpu_ref_put(&cmd_cnt->refcnt);
percpu_ref_exit(&cmd_cnt->refcnt);
+ kfree(cmd_cnt);
}
EXPORT_SYMBOL_GPL(target_free_cmd_counter);
u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params,
struct tee_param *param);
-int optee_supp_read(struct tee_context *ctx, void __user *buf, size_t len);
-int optee_supp_write(struct tee_context *ctx, void __user *buf, size_t len);
void optee_supp_init(struct optee_supp *supp);
void optee_supp_uninit(struct optee_supp *supp);
void optee_supp_release(struct optee_supp *supp);
struct tee_shm_pool *pool;
};
-int tee_shm_init(void);
-
int tee_shm_get_fd(struct tee_shm *shm);
bool tee_device_get(struct tee_device *teedev);
struct thermal_trip trip;
/* Ignore disabled trip points */
- if (test_bit(trip_id, &tz->trips_disabled) ||
- trip.temperature == THERMAL_TEMP_INVALID)
+ if (test_bit(trip_id, &tz->trips_disabled))
return;
__thermal_zone_get_trip(tz, trip_id, &trip);
+ if (trip.temperature == THERMAL_TEMP_INVALID)
+ return;
+
if (tz->last_temperature != THERMAL_TEMP_INVALID) {
if (tz->last_temperature < trip.temperature &&
tz->temperature >= trip.temperature)
*/
for_each_child_of_node(trips, t) {
- if (t == trip)
+ if (t == trip) {
+ of_node_put(t);
goto out;
+ }
i++;
}
for_each_child_of_node(cm_np, child) {
ret = thermal_of_for_each_cooling_device(tz_np, child, tz, cdev, action);
- if (ret)
+ if (ret) {
+ of_node_put(child);
break;
+ }
}
of_node_put(cm_np);
if (sscanf(attr->attr.name, "trip_point_%d_hyst", &trip_id) != 1)
return -EINVAL;
- if (kstrtoint(buf, 10, &trip.hysteresis))
- return -EINVAL;
-
mutex_lock(&tz->lock);
if (!device_is_registered(dev)) {
ret = __thermal_zone_get_trip(tz, trip_id, &trip);
if (ret)
goto unlock;
-
+
+ ret = kstrtoint(buf, 10, &trip.hysteresis);
+ if (ret)
+ goto unlock;
+
ret = thermal_zone_set_trip(tz, trip_id, &trip);
unlock:
mutex_unlock(&tz->lock);
}
static int __ti_thermal_get_trend(struct thermal_zone_device *tz,
- struct thermal_trip *trip, enum thermal_trend *trend)
+ const struct thermal_trip *trip,
+ enum thermal_trend *trend)
{
struct ti_thermal_data *data = thermal_zone_device_priv(tz);
struct ti_bandgap *bgp;
gsm->has_devices = false;
}
for (i = NUM_DLCI - 1; i >= 0; i--)
- if (gsm->dlci[i]) {
+ if (gsm->dlci[i])
gsm_dlci_release(gsm->dlci[i]);
- gsm->dlci[i] = NULL;
- }
mutex_unlock(&gsm->mutex);
/* Now wipe the queues */
tty_ldisc_flush(gsm->tty);
skip_rx = true;
if (status & (UART_LSR_DR | UART_LSR_BI) && !skip_rx) {
- if (irqd_is_wakeup_set(irq_get_irq_data(port->irq)))
+ struct irq_data *d;
+
+ d = irq_get_irq_data(port->irq);
+ if (d && irqd_is_wakeup_set(d))
pm_wakeup_event(tport->tty->dev, 0);
if (!up->dma || handle_rx_dma(up, iir))
status = serial8250_rx_chars(up, status);
#include <linux/module.h>
#include <linux/regulator/consumer.h>
#include <linux/sched/clock.h>
+#include <linux/iopoll.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_dbg.h>
#include <scsi/scsi_driver.h>
*/
static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
{
- return ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY;
+ u32 val;
+ int ret = read_poll_timeout(ufshcd_readl, val, val & UIC_COMMAND_READY,
+ 500, UIC_CMD_TIMEOUT * 1000, false, hba,
+ REG_CONTROLLER_STATUS);
+ return ret == 0 ? true : false;
}
/**
bool completion)
{
lockdep_assert_held(&hba->uic_cmd_mutex);
- lockdep_assert_held(hba->host->host_lock);
if (!ufshcd_ready_for_uic_cmd(hba)) {
dev_err(hba->dev,
int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
{
int ret;
- unsigned long flags;
if (hba->quirks & UFSHCD_QUIRK_BROKEN_UIC_CMD)
return 0;
mutex_lock(&hba->uic_cmd_mutex);
ufshcd_add_delay_before_dme_cmd(hba);
- spin_lock_irqsave(hba->host->host_lock, flags);
ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true);
- spin_unlock_irqrestore(hba->host->host_lock, flags);
if (!ret)
ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
wmb();
reenable_intr = true;
}
- ret = __ufshcd_send_uic_cmd(hba, cmd, false);
spin_unlock_irqrestore(hba->host->host_lock, flags);
+ ret = __ufshcd_send_uic_cmd(hba, cmd, false);
if (ret) {
dev_err(hba->dev,
"pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
void ucsi_debugfs_unregister(struct ucsi *ucsi)
{
+ if (IS_ERR_OR_NULL(ucsi) || !ucsi->debugfs)
+ return;
+
debugfs_remove_recursive(ucsi->debugfs->dentry);
kfree(ucsi->debugfs);
}
out_err:
while (--i >= 0)
mdev_type_remove(parent->types[i]);
- return 0;
+ kset_unregister(parent->mdev_types_kset);
+ return ret;
}
static ssize_t remove_store(struct device *dev, struct device_attribute *attr,
config PDS_VFIO_PCI
tristate "VFIO support for PDS PCI devices"
- depends on PDS_CORE
+ depends on PDS_CORE && PCI_IOV
select VFIO_PCI_CORE
help
This provides generic PCI support for PDS devices using the VFIO
pci_id = PCI_DEVID(pdev->bus->number, pdev->devfn);
dev_dbg(&pdev->dev,
"%s: PF %#04x VF %#04x vf_id %d domain %d pds_vfio %p\n",
- __func__, pci_dev_id(pdev->physfn), pci_id, vf_id,
+ __func__, pci_dev_id(pci_physfn(pdev)), pci_id, vf_id,
pci_domain_nr(pdev->bus), pds_vfio);
return 0;
config FRAMEBUFFER_CONSOLE
bool "Framebuffer Console support"
depends on FB_CORE && !UML
+ default DRM_FBDEV_EMULATION
select VT_HW_CONSOLE_BINDING
select CRC32
select FONT_SUPPORT
config FB_SH7760
bool "SH7760/SH7763/SH7720/SH7721 LCDC support"
- depends on FB && (CPU_SUBTYPE_SH7760 || CPU_SUBTYPE_SH7763 \
+ depends on FB=y && (CPU_SUBTYPE_SH7760 || CPU_SUBTYPE_SH7763 \
|| CPU_SUBTYPE_SH7720 || CPU_SUBTYPE_SH7721)
select FB_IOMEM_HELPERS
help
config FB_DEVICE
bool "Provide legacy /dev/fb* device"
depends on FB_CORE
- default y
+ default FB
help
Say Y here if you want the legacy /dev/fb* device file and
interfaces within sysfs anc procfs. It is only required if you
.driver = {
.name = "ds2482",
},
- .probe_new = ds2482_probe,
+ .probe = ds2482_probe,
.remove = ds2482_remove,
.id_table = ds2482_id,
};
generic_handle_irq(irq);
}
-static int __xen_evtchn_do_upcall(void)
+int xen_evtchn_do_upcall(void)
{
struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
int ret = vcpu_info->evtchn_upcall_pending ? IRQ_HANDLED : IRQ_NONE;
return ret;
}
-
-void xen_evtchn_do_upcall(struct pt_regs *regs)
-{
- struct pt_regs *old_regs = set_irq_regs(regs);
-
- irq_enter();
-
- __xen_evtchn_do_upcall();
-
- irq_exit();
- set_irq_regs(old_regs);
-}
-
-int xen_hvm_evtchn_do_upcall(void)
-{
- return __xen_evtchn_do_upcall();
-}
-EXPORT_SYMBOL_GPL(xen_hvm_evtchn_do_upcall);
+EXPORT_SYMBOL_GPL(xen_evtchn_do_upcall);
/* Rebind a new event channel to an existing irq. */
void rebind_evtchn_irq(evtchn_port_t evtchn, int irq)
static irqreturn_t do_hvm_evtchn_intr(int irq, void *dev_id)
{
- return xen_hvm_evtchn_do_upcall();
+ return xen_evtchn_do_upcall();
}
static int xen_allocate_irq(struct pci_dev *pdev)
struct kioctx_table {
struct rcu_head rcu;
unsigned nr;
- struct kioctx __rcu *table[];
+ struct kioctx __rcu *table[] __counted_by(nr);
};
struct kioctx_cpu {
/* there's now no turning back... the old userspace image is dead,
* defunct, deceased, etc.
*/
+ SET_PERSONALITY(exec_params.hdr);
if (elf_check_fdpic(&exec_params.hdr))
- set_personality(PER_LINUX_FDPIC);
- else
- set_personality(PER_LINUX);
+ current->personality |= PER_LINUX_FDPIC;
if (elf_read_implies_exec(&exec_params.hdr, executable_stack))
current->personality |= READ_IMPLIES_EXEC;
continue to be mountable and usable by newer kernels.
For more information, please see the web pages at
- http://btrfs.wiki.kernel.org.
+ https://btrfs.readthedocs.io
To compile this file system support as a module, choose M here. The
module will be called btrfs.
btrfs_mark_buffer_dirty(leaf);
fail:
btrfs_release_path(path);
- /* We didn't update the block group item, need to revert @commit_used. */
- if (ret < 0) {
+ /*
+ * We didn't update the block group item, need to revert commit_used
+ * unless the block group item didn't exist yet - this is to prevent a
+ * race with a concurrent insertion of the block group item, with
+ * insert_block_group_item(), that happened just after we attempted to
+ * update. In that case we would reset commit_used to 0 just after the
+ * insertion set it to a value greater than 0 - if the block group later
+ * becomes with 0 used bytes, we would incorrectly skip its update.
+ */
+ if (ret < 0 && ret != -ENOENT) {
spin_lock(&cache->lock);
cache->commit_used = old_commit_used;
spin_unlock(&cache->lock);
static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
{
+ struct btrfs_delayed_node *delayed_node = delayed_item->delayed_node;
struct rb_root_cached *root;
struct btrfs_delayed_root *delayed_root;
if (RB_EMPTY_NODE(&delayed_item->rb_node))
return;
- delayed_root = delayed_item->delayed_node->root->fs_info->delayed_root;
+ /* If it's in a rbtree, then we need to have delayed node locked. */
+ lockdep_assert_held(&delayed_node->mutex);
+
+ delayed_root = delayed_node->root->fs_info->delayed_root;
BUG_ON(!delayed_root);
if (delayed_item->type == BTRFS_DELAYED_INSERTION_ITEM)
- root = &delayed_item->delayed_node->ins_root;
+ root = &delayed_node->ins_root;
else
- root = &delayed_item->delayed_node->del_root;
+ root = &delayed_node->del_root;
rb_erase_cached(&delayed_item->rb_node, root);
RB_CLEAR_NODE(&delayed_item->rb_node);
- delayed_item->delayed_node->count--;
+ delayed_node->count--;
finish_one_item(delayed_root);
}
ret = __btrfs_commit_inode_delayed_items(trans, path,
curr_node);
if (ret) {
- btrfs_release_delayed_node(curr_node);
- curr_node = NULL;
btrfs_abort_transaction(trans, ret);
break;
}
prev_node = curr_node;
curr_node = btrfs_next_delayed_node(curr_node);
+ /*
+ * See the comment below about releasing path before releasing
+ * node. If the commit of delayed items was successful the path
+ * should always be released, but in case of an error, it may
+ * point to locked extent buffers (a leaf at the very least).
+ */
+ ASSERT(path->nodes[0] == NULL);
btrfs_release_delayed_node(prev_node);
}
+ /*
+ * Release the path to avoid a potential deadlock and lockdep splat when
+ * releasing the delayed node, as that requires taking the delayed node's
+ * mutex. If another task starts running delayed items before we take
+ * the mutex, it will first lock the mutex and then it may try to lock
+ * the same btree path (leaf).
+ */
+ btrfs_free_path(path);
+
if (curr_node)
btrfs_release_delayed_node(curr_node);
- btrfs_free_path(path);
trans->block_rsv = block_rsv;
return ret;
btrfs_wq_run_delayed_node(delayed_root, fs_info, BTRFS_DELAYED_BATCH);
}
-/* Will return 0 or -ENOMEM */
+static void btrfs_release_dir_index_item_space(struct btrfs_trans_handle *trans)
+{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
+ const u64 bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
+
+ if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
+ return;
+
+ /*
+ * Adding the new dir index item does not require touching another
+ * leaf, so we can release 1 unit of metadata that was previously
+ * reserved when starting the transaction. This applies only to
+ * the case where we had a transaction start and excludes the
+ * transaction join case (when replaying log trees).
+ */
+ trace_btrfs_space_reservation(fs_info, "transaction",
+ trans->transid, bytes, 0);
+ btrfs_block_rsv_release(fs_info, trans->block_rsv, bytes, NULL);
+ ASSERT(trans->bytes_reserved >= bytes);
+ trans->bytes_reserved -= bytes;
+}
+
+/* Will return 0, -ENOMEM or -EEXIST (index number collision, unexpected). */
int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
const char *name, int name_len,
struct btrfs_inode *dir,
mutex_lock(&delayed_node->mutex);
+ /*
+ * First attempt to insert the delayed item. This is to make the error
+ * handling path simpler in case we fail (-EEXIST). There's no risk of
+ * any other task coming in and running the delayed item before we do
+ * the metadata space reservation below, because we are holding the
+ * delayed node's mutex and that mutex must also be locked before the
+ * node's delayed items can be run.
+ */
+ ret = __btrfs_add_delayed_item(delayed_node, delayed_item);
+ if (unlikely(ret)) {
+ btrfs_err(trans->fs_info,
+"error adding delayed dir index item, name: %.*s, index: %llu, root: %llu, dir: %llu, dir->index_cnt: %llu, delayed_node->index_cnt: %llu, error: %d",
+ name_len, name, index, btrfs_root_id(delayed_node->root),
+ delayed_node->inode_id, dir->index_cnt,
+ delayed_node->index_cnt, ret);
+ btrfs_release_delayed_item(delayed_item);
+ btrfs_release_dir_index_item_space(trans);
+ mutex_unlock(&delayed_node->mutex);
+ goto release_node;
+ }
+
if (delayed_node->index_item_leaves == 0 ||
delayed_node->curr_index_batch_size + data_len > leaf_data_size) {
delayed_node->curr_index_batch_size = data_len;
* impossible.
*/
if (WARN_ON(ret)) {
- mutex_unlock(&delayed_node->mutex);
btrfs_release_delayed_item(delayed_item);
+ mutex_unlock(&delayed_node->mutex);
goto release_node;
}
delayed_node->index_item_leaves++;
- } else if (!test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) {
- const u64 bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
-
- /*
- * Adding the new dir index item does not require touching another
- * leaf, so we can release 1 unit of metadata that was previously
- * reserved when starting the transaction. This applies only to
- * the case where we had a transaction start and excludes the
- * transaction join case (when replaying log trees).
- */
- trace_btrfs_space_reservation(fs_info, "transaction",
- trans->transid, bytes, 0);
- btrfs_block_rsv_release(fs_info, trans->block_rsv, bytes, NULL);
- ASSERT(trans->bytes_reserved >= bytes);
- trans->bytes_reserved -= bytes;
- }
-
- ret = __btrfs_add_delayed_item(delayed_node, delayed_item);
- if (unlikely(ret)) {
- btrfs_err(trans->fs_info,
- "err add delayed dir index item(name: %.*s) into the insertion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
- name_len, name, delayed_node->root->root_key.objectid,
- delayed_node->inode_id, ret);
- BUG();
+ } else {
+ btrfs_release_dir_index_item_space(trans);
}
mutex_unlock(&delayed_node->mutex);
* Transfer bytes to our delayed refs rsv.
*
* @fs_info: the filesystem
- * @src: source block rsv to transfer from
* @num_bytes: number of bytes to transfer
*
- * This transfers up to the num_bytes amount from the src rsv to the
+ * This transfers up to the num_bytes amount, previously reserved, to the
* delayed_refs_rsv. Any extra bytes are returned to the space info.
*/
void btrfs_migrate_to_delayed_refs_rsv(struct btrfs_fs_info *fs_info,
- struct btrfs_block_rsv *src,
u64 num_bytes)
{
struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv;
u64 to_free = 0;
- spin_lock(&src->lock);
- src->reserved -= num_bytes;
- src->size -= num_bytes;
- spin_unlock(&src->lock);
-
spin_lock(&delayed_refs_rsv->lock);
if (delayed_refs_rsv->size > delayed_refs_rsv->reserved) {
u64 delta = delayed_refs_rsv->size -
struct btrfs_block_rsv *block_rsv = &fs_info->delayed_refs_rsv;
u64 limit = btrfs_calc_delayed_ref_bytes(fs_info, 1);
u64 num_bytes = 0;
+ u64 refilled_bytes;
+ u64 to_free;
int ret = -ENOSPC;
spin_lock(&block_rsv->lock);
ret = btrfs_reserve_metadata_bytes(fs_info, block_rsv, num_bytes, flush);
if (ret)
return ret;
- btrfs_block_rsv_add_bytes(block_rsv, num_bytes, false);
- trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
- 0, num_bytes, 1);
+
+ /*
+ * We may have raced with someone else, so check again if we the block
+ * reserve is still not full and release any excess space.
+ */
+ spin_lock(&block_rsv->lock);
+ if (block_rsv->reserved < block_rsv->size) {
+ u64 needed = block_rsv->size - block_rsv->reserved;
+
+ if (num_bytes >= needed) {
+ block_rsv->reserved += needed;
+ block_rsv->full = true;
+ to_free = num_bytes - needed;
+ refilled_bytes = needed;
+ } else {
+ block_rsv->reserved += num_bytes;
+ to_free = 0;
+ refilled_bytes = num_bytes;
+ }
+ } else {
+ to_free = num_bytes;
+ refilled_bytes = 0;
+ }
+ spin_unlock(&block_rsv->lock);
+
+ if (to_free > 0)
+ btrfs_space_info_free_bytes_may_use(fs_info, block_rsv->space_info,
+ to_free);
+
+ if (refilled_bytes > 0)
+ trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv", 0,
+ refilled_bytes, 1);
return 0;
}
int btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info *fs_info,
enum btrfs_reserve_flush_enum flush);
void btrfs_migrate_to_delayed_refs_rsv(struct btrfs_fs_info *fs_info,
- struct btrfs_block_rsv *src,
u64 num_bytes);
bool btrfs_check_space_for_delayed_refs(struct btrfs_fs_info *fs_info);
struct folio *folio)
{
struct btrfs_fs_info *fs_info = btrfs_sb(mapping->host->i_sb);
+ struct btrfs_subpage_info *spi = fs_info->subpage_info;
struct btrfs_subpage *subpage;
struct extent_buffer *eb;
int cur_bit = 0;
btrfs_assert_tree_write_locked(eb);
return filemap_dirty_folio(mapping, folio);
}
+
+ ASSERT(spi);
subpage = folio_get_private(folio);
- ASSERT(subpage->dirty_bitmap);
- while (cur_bit < BTRFS_SUBPAGE_BITMAP_SIZE) {
+ for (cur_bit = spi->dirty_offset;
+ cur_bit < spi->dirty_offset + spi->bitmap_nr_bits;
+ cur_bit++) {
unsigned long flags;
u64 cur;
- u16 tmp = (1 << cur_bit);
spin_lock_irqsave(&subpage->lock, flags);
- if (!(tmp & subpage->dirty_bitmap)) {
+ if (!test_bit(cur_bit, subpage->bitmaps)) {
spin_unlock_irqrestore(&subpage->lock, flags);
- cur_bit++;
continue;
}
spin_unlock_irqrestore(&subpage->lock, flags);
btrfs_assert_tree_write_locked(eb);
free_extent_buffer(eb);
- cur_bit += (fs_info->nodesize >> fs_info->sectorsize_bits);
+ cur_bit += (fs_info->nodesize >> fs_info->sectorsize_bits) - 1;
}
return filemap_dirty_folio(mapping, folio);
}
delta = ktime_get_seconds() - cur->start_time;
if (!test_and_clear_bit(BTRFS_FS_COMMIT_TRANS, &fs_info->flags) &&
- cur->state < TRANS_STATE_COMMIT_START &&
+ cur->state < TRANS_STATE_COMMIT_PREP &&
delta < fs_info->commit_interval) {
spin_unlock(&fs_info->trans_lock);
delay -= msecs_to_jiffies((delta - 1) * 1000);
btrfs_lockdep_init_map(fs_info, btrfs_trans_num_extwriters);
btrfs_lockdep_init_map(fs_info, btrfs_trans_pending_ordered);
btrfs_lockdep_init_map(fs_info, btrfs_ordered_extent);
- btrfs_state_lockdep_init_map(fs_info, btrfs_trans_commit_start,
- BTRFS_LOCKDEP_TRANS_COMMIT_START);
+ btrfs_state_lockdep_init_map(fs_info, btrfs_trans_commit_prep,
+ BTRFS_LOCKDEP_TRANS_COMMIT_PREP);
btrfs_state_lockdep_init_map(fs_info, btrfs_trans_unblocked,
BTRFS_LOCKDEP_TRANS_UNBLOCKED);
btrfs_state_lockdep_init_map(fs_info, btrfs_trans_super_committed,
while (!list_empty(&fs_info->trans_list)) {
t = list_first_entry(&fs_info->trans_list,
struct btrfs_transaction, list);
- if (t->state >= TRANS_STATE_COMMIT_START) {
+ if (t->state >= TRANS_STATE_COMMIT_PREP) {
refcount_inc(&t->use_count);
spin_unlock(&fs_info->trans_lock);
btrfs_wait_for_commit(fs_info, t->transid);
btrfs_release_path(path);
/* now insert the actual backref */
- if (owner < BTRFS_FIRST_FREE_OBJECTID) {
- BUG_ON(refs_to_add != 1);
+ if (owner < BTRFS_FIRST_FREE_OBJECTID)
ret = insert_tree_block_ref(trans, path, bytenr, parent,
root_objectid);
- } else {
+ else
ret = insert_extent_data_ref(trans, path, bytenr, parent,
root_objectid, owner, offset,
refs_to_add);
- }
+
if (ret)
btrfs_abort_transaction(trans, ret);
out:
goto again;
}
} else {
- err = -EIO;
+ err = -EUCLEAN;
+ btrfs_err(fs_info,
+ "missing extent item for extent %llu num_bytes %llu level %d",
+ head->bytenr, head->num_bytes, extent_op->level);
goto out;
}
}
parent = ref->parent;
ref_root = ref->root;
- if (node->ref_mod != 1) {
+ if (unlikely(node->ref_mod != 1)) {
btrfs_err(trans->fs_info,
- "btree block(%llu) has %d references rather than 1: action %d ref_root %llu parent %llu",
+ "btree block %llu has %d references rather than 1: action %d ref_root %llu parent %llu",
node->bytenr, node->ref_mod, node->action, ref_root,
parent);
- return -EIO;
+ return -EUCLEAN;
}
if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
BUG_ON(!extent_op || !extent_op->update_flags);
bvec->bv_offset, bvec->bv_len);
btrfs_finish_ordered_extent(bbio->ordered, page, start, len, !error);
- if (error) {
- btrfs_page_clear_uptodate(fs_info, page, start, len);
+ if (error)
mapping_set_error(page->mapping, error);
- }
btrfs_page_clear_writeback(fs_info, page, start, len);
}
if (ret) {
btrfs_mark_ordered_io_finished(BTRFS_I(inode), page, page_start,
PAGE_SIZE, !ret);
- btrfs_page_clear_uptodate(btrfs_sb(inode->i_sb), page,
- page_start, PAGE_SIZE);
mapping_set_error(page->mapping, ret);
}
unlock_page(page);
struct page *page = bvec->bv_page;
u32 len = bvec->bv_len;
- if (!uptodate)
- btrfs_page_clear_uptodate(fs_info, page, start, len);
btrfs_page_clear_writeback(fs_info, page, start, len);
bio_offset += len;
}
if (ret) {
btrfs_mark_ordered_io_finished(BTRFS_I(inode), page,
cur, cur_len, !ret);
- btrfs_page_clear_uptodate(fs_info, page, cur, cur_len);
mapping_set_error(page->mapping, ret);
}
btrfs_page_unlock_writer(fs_info, page, cur, cur_len);
char *dst = (char *)dstv;
unsigned long i = get_eb_page_index(start);
- if (check_eb_range(eb, start, len))
+ if (check_eb_range(eb, start, len)) {
+ /*
+ * Invalid range hit, reset the memory, so callers won't get
+ * some random garbage for their uninitialzed memory.
+ */
+ memset(dstv, 0, len);
return;
+ }
offset = get_eb_offset_in_page(eb, start);
btrfs_drew_write_unlock(&inode->root->snapshot_lock);
}
+static void update_time_for_write(struct inode *inode)
+{
+ struct timespec64 now, ctime;
+
+ if (IS_NOCMTIME(inode))
+ return;
+
+ now = current_time(inode);
+ if (!timespec64_equal(&inode->i_mtime, &now))
+ inode->i_mtime = now;
+
+ ctime = inode_get_ctime(inode);
+ if (!timespec64_equal(&ctime, &now))
+ inode_set_ctime_to_ts(inode, now);
+
+ if (IS_I_VERSION(inode))
+ inode_inc_iversion(inode);
+}
+
static int btrfs_write_check(struct kiocb *iocb, struct iov_iter *from,
size_t count)
{
* need to start yet another transaction to update the inode as we will
* update the inode when we finish writing whatever data we write.
*/
- if (!IS_NOCMTIME(inode)) {
- inode->i_mtime = inode_set_ctime_current(inode);
- inode_inc_iversion(inode);
- }
+ update_time_for_write(inode);
start_pos = round_down(pos, fs_info->sectorsize);
oldsize = i_size_read(inode);
if (iocb->ki_flags & IOCB_NOWAIT)
ilock_flags |= BTRFS_ILOCK_TRY;
- /* If the write DIO is within EOF, use a shared lock */
- if (iocb->ki_pos + iov_iter_count(from) <= i_size_read(inode))
+ /*
+ * If the write DIO is within EOF, use a shared lock and also only if
+ * security bits will likely not be dropped by file_remove_privs() called
+ * from btrfs_write_check(). Either will need to be rechecked after the
+ * lock was acquired.
+ */
+ if (iocb->ki_pos + iov_iter_count(from) <= i_size_read(inode) && IS_NOSEC(inode))
ilock_flags |= BTRFS_ILOCK_SHARED;
relock:
if (err < 0)
return err;
+ /* Shared lock cannot be used with security bits set. */
+ if ((ilock_flags & BTRFS_ILOCK_SHARED) && !IS_NOSEC(inode)) {
+ btrfs_inode_unlock(BTRFS_I(inode), ilock_flags);
+ ilock_flags &= ~BTRFS_ILOCK_SHARED;
+ goto relock;
+ }
+
err = generic_write_checks(iocb, from);
if (err <= 0) {
btrfs_inode_unlock(BTRFS_I(inode), ilock_flags);
btrfs_mark_ordered_io_finished(inode, locked_page,
page_start, PAGE_SIZE,
!ret);
- btrfs_page_clear_uptodate(inode->root->fs_info,
- locked_page, page_start,
- PAGE_SIZE);
mapping_set_error(locked_page->mapping, ret);
unlock_page(locked_page);
}
mapping_set_error(page->mapping, ret);
btrfs_mark_ordered_io_finished(inode, page, page_start,
PAGE_SIZE, !ret);
- btrfs_page_clear_uptodate(fs_info, page, page_start, PAGE_SIZE);
clear_page_dirty_for_io(page);
}
btrfs_page_clear_checked(fs_info, page, page_start, PAGE_SIZE);
static int btrfs_get_dir_last_index(struct btrfs_inode *dir, u64 *index)
{
- if (dir->index_cnt == (u64)-1) {
- int ret;
+ int ret = 0;
+ btrfs_inode_lock(dir, 0);
+ if (dir->index_cnt == (u64)-1) {
ret = btrfs_inode_delayed_dir_index_count(dir);
if (ret) {
ret = btrfs_set_inode_index_count(dir);
if (ret)
- return ret;
+ goto out;
}
}
- *index = dir->index_cnt;
+ /* index_cnt is the index number of next new entry, so decrement it. */
+ *index = dir->index_cnt - 1;
+out:
+ btrfs_inode_unlock(dir, 0);
- return 0;
+ return ret;
}
/*
return 0;
}
+static loff_t btrfs_dir_llseek(struct file *file, loff_t offset, int whence)
+{
+ struct btrfs_file_private *private = file->private_data;
+ int ret;
+
+ ret = btrfs_get_dir_last_index(BTRFS_I(file_inode(file)),
+ &private->last_index);
+ if (ret)
+ return ret;
+
+ return generic_file_llseek(file, offset, whence);
+}
+
struct dir_entry {
u64 ino;
u64 offset;
};
static const struct file_operations btrfs_dir_file_operations = {
- .llseek = generic_file_llseek,
+ .llseek = btrfs_dir_llseek,
.read = generic_read_dir,
.iterate_shared = btrfs_real_readdir,
.open = btrfs_opendir,
goto out_put;
}
+ /*
+ * We don't need the path anymore, so release it and
+ * avoid deadlocks and lockdep warnings in case
+ * btrfs_iget() needs to lookup the inode from its root
+ * btree and lock the same leaf.
+ */
+ btrfs_release_path(path);
temp_inode = btrfs_iget(sb, key2.objectid, root);
if (IS_ERR(temp_inode)) {
ret = PTR_ERR(temp_inode);
goto out_put;
}
- btrfs_release_path(path);
key.objectid = key.offset;
key.offset = (u64)-1;
dirid = key.objectid;
};
enum btrfs_lockdep_trans_states {
- BTRFS_LOCKDEP_TRANS_COMMIT_START,
+ BTRFS_LOCKDEP_TRANS_COMMIT_PREP,
BTRFS_LOCKDEP_TRANS_UNBLOCKED,
BTRFS_LOCKDEP_TRANS_SUPER_COMMITTED,
BTRFS_LOCKDEP_TRANS_COMPLETED,
refcount_inc(&trans->use_count);
spin_unlock(&fs_info->trans_lock);
- ASSERT(trans);
+ ASSERT(trans || BTRFS_FS_ERROR(fs_info));
if (trans) {
if (atomic_dec_and_test(&trans->pending_ordered))
wake_up(&trans->pending_wait);
* calculated f_bavail.
*/
if (!mixed && block_rsv->space_info->full &&
- total_free_meta - thresh < block_rsv->size)
+ (total_free_meta < thresh || total_free_meta - thresh < block_rsv->size))
buf->f_bavail = 0;
buf->f_type = BTRFS_SUPER_MAGIC;
.name = "btrfs",
.mount = btrfs_mount,
.kill_sb = btrfs_kill_super,
- .fs_flags = FS_REQUIRES_DEV | FS_BINARY_MOUNTDATA | FS_MGTIME,
+ .fs_flags = FS_REQUIRES_DEV | FS_BINARY_MOUNTDATA,
};
static struct file_system_type btrfs_root_fs_type = {
.name = "btrfs",
.mount = btrfs_mount_root,
.kill_sb = btrfs_kill_super,
- .fs_flags = FS_REQUIRES_DEV | FS_BINARY_MOUNTDATA |
- FS_ALLOW_IDMAP | FS_MGTIME,
+ .fs_flags = FS_REQUIRES_DEV | FS_BINARY_MOUNTDATA | FS_ALLOW_IDMAP,
};
MODULE_ALIAS_FS("btrfs");
* | Call btrfs_commit_transaction() on any trans handle attached to
* | transaction N
* V
- * Transaction N [[TRANS_STATE_COMMIT_START]]
+ * Transaction N [[TRANS_STATE_COMMIT_PREP]]
+ * |
+ * | If there are simultaneous calls to btrfs_commit_transaction() one will win
+ * | the race and the rest will wait for the winner to commit the transaction.
+ * |
+ * | The winner will wait for previous running transaction to completely finish
+ * | if there is one.
* |
- * | Will wait for previous running transaction to completely finish if there
- * | is one
+ * Transaction N [[TRANS_STATE_COMMIT_START]]
* |
- * | Then one of the following happes:
+ * | Then one of the following happens:
* | - Wait for all other trans handle holders to release.
* | The btrfs_commit_transaction() caller will do the commit work.
* | - Wait for current transaction to be committed by others.
*/
static const unsigned int btrfs_blocked_trans_types[TRANS_STATE_MAX] = {
[TRANS_STATE_RUNNING] = 0U,
+ [TRANS_STATE_COMMIT_PREP] = 0U,
[TRANS_STATE_COMMIT_START] = (__TRANS_START | __TRANS_ATTACH),
[TRANS_STATE_COMMIT_DOING] = (__TRANS_START |
__TRANS_ATTACH |
reloc_reserved = true;
}
- ret = btrfs_block_rsv_add(fs_info, rsv, num_bytes, flush);
+ ret = btrfs_reserve_metadata_bytes(fs_info, rsv, num_bytes, flush);
if (ret)
goto reserve_fail;
if (delayed_refs_bytes) {
- btrfs_migrate_to_delayed_refs_rsv(fs_info, rsv,
- delayed_refs_bytes);
+ btrfs_migrate_to_delayed_refs_rsv(fs_info, delayed_refs_bytes);
num_bytes -= delayed_refs_bytes;
}
+ btrfs_block_rsv_add_bytes(rsv, num_bytes, true);
if (rsv->space_info->force_alloc)
do_chunk_alloc = true;
* Wait for the current transaction commit to start and block
* subsequent transaction joins
*/
- btrfs_might_wait_for_state(fs_info, BTRFS_LOCKDEP_TRANS_COMMIT_START);
+ btrfs_might_wait_for_state(fs_info, BTRFS_LOCKDEP_TRANS_COMMIT_PREP);
wait_event(fs_info->transaction_blocked_wait,
cur_trans->state >= TRANS_STATE_COMMIT_START ||
TRANS_ABORTED(cur_trans));
return;
lockdep_assert_held(&trans->fs_info->trans_lock);
- ASSERT(cur_trans->state >= TRANS_STATE_COMMIT_START);
+ ASSERT(cur_trans->state >= TRANS_STATE_COMMIT_PREP);
list_add(&trans->pending_snapshot->list, &cur_trans->pending_snapshots);
}
ktime_t interval;
ASSERT(refcount_read(&trans->use_count) == 1);
- btrfs_trans_state_lockdep_acquire(fs_info, BTRFS_LOCKDEP_TRANS_COMMIT_START);
+ btrfs_trans_state_lockdep_acquire(fs_info, BTRFS_LOCKDEP_TRANS_COMMIT_PREP);
clear_bit(BTRFS_FS_NEED_TRANS_COMMIT, &fs_info->flags);
}
spin_lock(&fs_info->trans_lock);
- if (cur_trans->state >= TRANS_STATE_COMMIT_START) {
+ if (cur_trans->state >= TRANS_STATE_COMMIT_PREP) {
enum btrfs_trans_state want_state = TRANS_STATE_COMPLETED;
add_pending_snapshot(trans);
want_state = TRANS_STATE_SUPER_COMMITTED;
btrfs_trans_state_lockdep_release(fs_info,
- BTRFS_LOCKDEP_TRANS_COMMIT_START);
+ BTRFS_LOCKDEP_TRANS_COMMIT_PREP);
ret = btrfs_end_transaction(trans);
wait_for_commit(cur_trans, want_state);
return ret;
}
- cur_trans->state = TRANS_STATE_COMMIT_START;
+ cur_trans->state = TRANS_STATE_COMMIT_PREP;
wake_up(&fs_info->transaction_blocked_wait);
- btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_COMMIT_START);
+ btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_COMMIT_PREP);
if (cur_trans->list.prev != &fs_info->trans_list) {
enum btrfs_trans_state want_state = TRANS_STATE_COMPLETED;
btrfs_put_transaction(prev_trans);
if (ret)
goto lockdep_release;
- } else {
- spin_unlock(&fs_info->trans_lock);
+ spin_lock(&fs_info->trans_lock);
}
} else {
- spin_unlock(&fs_info->trans_lock);
/*
* The previous transaction was aborted and was already removed
* from the list of transactions at fs_info->trans_list. So we
* corrupt state (pointing to trees with unwritten nodes/leafs).
*/
if (BTRFS_FS_ERROR(fs_info)) {
+ spin_unlock(&fs_info->trans_lock);
ret = -EROFS;
goto lockdep_release;
}
}
+ cur_trans->state = TRANS_STATE_COMMIT_START;
+ wake_up(&fs_info->transaction_blocked_wait);
+ spin_unlock(&fs_info->trans_lock);
+
/*
* Get the time spent on the work done by the commit thread and not
* the time spent waiting on a previous commit
goto cleanup_transaction;
lockdep_trans_commit_start_release:
- btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_COMMIT_START);
+ btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_COMMIT_PREP);
btrfs_end_transaction(trans);
return ret;
}
enum btrfs_trans_state {
TRANS_STATE_RUNNING,
+ TRANS_STATE_COMMIT_PREP,
TRANS_STATE_COMMIT_START,
TRANS_STATE_COMMIT_DOING,
TRANS_STATE_UNBLOCKED,
struct extent_buffer *leaf;
int slot;
int ins_nr = 0;
- int start_slot;
+ int start_slot = 0;
int ret;
if (!(inode->flags & BTRFS_INODE_PREALLOC))
pgoff_t index,
unsigned long num_ra_pages)
{
- struct page *page;
+ struct folio *folio;
u64 off = (u64)index << PAGE_SHIFT;
loff_t merkle_pos = merkle_file_pos(inode);
int ret;
return ERR_PTR(-EFBIG);
index += merkle_pos >> PAGE_SHIFT;
again:
- page = find_get_page_flags(inode->i_mapping, index, FGP_ACCESSED);
- if (page) {
- if (PageUptodate(page))
- return page;
+ folio = __filemap_get_folio(inode->i_mapping, index, FGP_ACCESSED, 0);
+ if (!IS_ERR(folio)) {
+ if (folio_test_uptodate(folio))
+ goto out;
- lock_page(page);
- /*
- * We only insert uptodate pages, so !Uptodate has to be
- * an error
- */
- if (!PageUptodate(page)) {
- unlock_page(page);
- put_page(page);
+ folio_lock(folio);
+ /* If it's not uptodate after we have the lock, we got a read error. */
+ if (!folio_test_uptodate(folio)) {
+ folio_unlock(folio);
+ folio_put(folio);
return ERR_PTR(-EIO);
}
- unlock_page(page);
- return page;
+ folio_unlock(folio);
+ goto out;
}
- page = __page_cache_alloc(mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS));
- if (!page)
+ folio = filemap_alloc_folio(mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS),
+ 0);
+ if (!folio)
return ERR_PTR(-ENOMEM);
+ ret = filemap_add_folio(inode->i_mapping, folio, index, GFP_NOFS);
+ if (ret) {
+ folio_put(folio);
+ /* Did someone else insert a folio here? */
+ if (ret == -EEXIST)
+ goto again;
+ return ERR_PTR(ret);
+ }
+
/*
* Merkle item keys are indexed from byte 0 in the merkle tree.
* They have the form:
* [ inode objectid, BTRFS_MERKLE_ITEM_KEY, offset in bytes ]
*/
ret = read_key_bytes(BTRFS_I(inode), BTRFS_VERITY_MERKLE_ITEM_KEY, off,
- page_address(page), PAGE_SIZE, page);
+ folio_address(folio), PAGE_SIZE, &folio->page);
if (ret < 0) {
- put_page(page);
+ folio_put(folio);
return ERR_PTR(ret);
}
if (ret < PAGE_SIZE)
- memzero_page(page, ret, PAGE_SIZE - ret);
+ folio_zero_segment(folio, ret, PAGE_SIZE);
- SetPageUptodate(page);
- ret = add_to_page_cache_lru(page, inode->i_mapping, index, GFP_NOFS);
+ folio_mark_uptodate(folio);
+ folio_unlock(folio);
- if (!ret) {
- /* Inserted and ready for fsverity */
- unlock_page(page);
- } else {
- put_page(page);
- /* Did someone race us into inserting this page? */
- if (ret == -EEXIST)
- goto again;
- page = ERR_PTR(ret);
- }
- return page;
+out:
+ return folio_file_page(folio, index);
}
/*
u64 search_start;
u64 hole_size;
u64 max_hole_start;
- u64 max_hole_size;
+ u64 max_hole_size = 0;
u64 extent_end;
u64 search_end = device->total_bytes;
int ret;
struct extent_buffer *l;
search_start = dev_extent_search_start(device);
+ max_hole_start = search_start;
WARN_ON(device->zone_info &&
!IS_ALIGNED(num_bytes, device->zone_info->zone_size));
path = btrfs_alloc_path();
- if (!path)
- return -ENOMEM;
-
- max_hole_start = search_start;
- max_hole_size = 0;
-
+ if (!path) {
+ ret = -ENOMEM;
+ goto out;
+ }
again:
if (search_start >= search_end ||
test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
}
EXPORT_SYMBOL(folio_zero_new_buffers);
-static void
+static int
iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh,
const struct iomap *iomap)
{
* current block, then do not map the buffer and let the caller
* handle it.
*/
- BUG_ON(offset >= iomap->offset + iomap->length);
+ if (offset >= iomap->offset + iomap->length)
+ return -EIO;
switch (iomap->type) {
case IOMAP_HOLE:
if (!buffer_uptodate(bh) ||
(offset >= i_size_read(inode)))
set_buffer_new(bh);
- break;
+ return 0;
case IOMAP_DELALLOC:
if (!buffer_uptodate(bh) ||
(offset >= i_size_read(inode)))
set_buffer_uptodate(bh);
set_buffer_mapped(bh);
set_buffer_delay(bh);
- break;
+ return 0;
case IOMAP_UNWRITTEN:
/*
* For unwritten regions, we always need to ensure that regions
fallthrough;
case IOMAP_MAPPED:
if ((iomap->flags & IOMAP_F_NEW) ||
- offset >= i_size_read(inode))
+ offset >= i_size_read(inode)) {
+ /*
+ * This can happen if truncating the block device races
+ * with the check in the caller as i_size updates on
+ * block devices aren't synchronized by i_rwsem for
+ * block devices.
+ */
+ if (S_ISBLK(inode->i_mode))
+ return -EIO;
set_buffer_new(bh);
+ }
bh->b_blocknr = (iomap->addr + offset - iomap->offset) >>
inode->i_blkbits;
set_buffer_mapped(bh);
- break;
+ return 0;
+ default:
+ WARN_ON_ONCE(1);
+ return -EIO;
}
}
clear_buffer_new(bh);
if (!buffer_mapped(bh)) {
WARN_ON(bh->b_size != blocksize);
- if (get_block) {
+ if (get_block)
err = get_block(inode, block, bh, 1);
- if (err)
- break;
- } else {
- iomap_to_bh(inode, block, bh, iomap);
- }
+ else
+ err = iomap_to_bh(inode, block, bh, iomap);
+ if (err)
+ break;
if (buffer_new(bh)) {
clean_bdev_bh_alias(bh);
if (!dir) {
/* This can happen if we're not mounting cephfs on the root */
dir = ceph_get_inode(parent->i_sb, vino, NULL);
- if (!dir)
- dir = ERR_PTR(-ENOENT);
+ if (IS_ERR(dir))
+ dout("Can't find inode %s (%s)\n", inode_number, name);
}
- if (IS_ERR(dir))
- dout("Can't find inode %s (%s)\n", inode_number, name);
out:
kfree(inode_number);
u64 storage_space, remaining_space, max_variable_size;
efi_status_t status;
- status = efivar_query_variable_info(attr, &storage_space, &remaining_space,
- &max_variable_size);
- if (status != EFI_SUCCESS)
- return efi_status_to_err(status);
+ /* Some UEFI firmware does not implement QueryVariableInfo() */
+ storage_space = remaining_space = 0;
+ if (efi_rt_services_supported(EFI_RT_SUPPORTED_QUERY_VARIABLE_INFO)) {
+ status = efivar_query_variable_info(attr, &storage_space,
+ &remaining_space,
+ &max_variable_size);
+ if (status != EFI_SUCCESS && status != EFI_UNSUPPORTED)
+ pr_warn_ratelimited("query_variable_info() failed: 0x%lx\n",
+ status);
+ }
/*
* This is not a normal filesystem, so no point in pretending it has a block
#include <linux/slab.h>
#include <linux/nospec.h>
#include <linux/backing-dev.h>
+#include <linux/freezer.h>
#include <trace/events/ext4.h>
/*
return ret;
}
+static ext4_grpblk_t ext4_last_grp_cluster(struct super_block *sb,
+ ext4_group_t grp)
+{
+ if (grp < ext4_get_groups_count(sb))
+ return EXT4_CLUSTERS_PER_GROUP(sb) - 1;
+ return (ext4_blocks_count(EXT4_SB(sb)->s_es) -
+ ext4_group_first_block_no(sb, grp) - 1) >>
+ EXT4_CLUSTER_BITS(sb);
+}
+
+static bool ext4_trim_interrupted(void)
+{
+ return fatal_signal_pending(current) || freezing(current);
+}
+
static int ext4_try_to_trim_range(struct super_block *sb,
struct ext4_buddy *e4b, ext4_grpblk_t start,
ext4_grpblk_t max, ext4_grpblk_t minblocks)
__releases(ext4_group_lock_ptr(sb, e4b->bd_group))
{
ext4_grpblk_t next, count, free_count;
+ bool set_trimmed = false;
void *bitmap;
bitmap = e4b->bd_bitmap;
+ if (start == 0 && max >= ext4_last_grp_cluster(sb, e4b->bd_group))
+ set_trimmed = true;
start = max(e4b->bd_info->bb_first_free, start);
count = 0;
free_count = 0;
int ret = ext4_trim_extent(sb, start, next - start, e4b);
if (ret && ret != -EOPNOTSUPP)
- break;
+ return count;
count += next - start;
}
free_count += next - start;
start = next + 1;
- if (fatal_signal_pending(current)) {
- count = -ERESTARTSYS;
- break;
- }
+ if (ext4_trim_interrupted())
+ return count;
if (need_resched()) {
ext4_unlock_group(sb, e4b->bd_group);
break;
}
+ if (set_trimmed)
+ EXT4_MB_GRP_SET_TRIMMED(e4b->bd_info);
+
return count;
}
* @start: first group block to examine
* @max: last group block to examine
* @minblocks: minimum extent block count
- * @set_trimmed: set the trimmed flag if at least one block is trimmed
*
* ext4_trim_all_free walks through group's block bitmap searching for free
* extents. When the free extent is found, mark it as used in group buddy
static ext4_grpblk_t
ext4_trim_all_free(struct super_block *sb, ext4_group_t group,
ext4_grpblk_t start, ext4_grpblk_t max,
- ext4_grpblk_t minblocks, bool set_trimmed)
+ ext4_grpblk_t minblocks)
{
struct ext4_buddy e4b;
int ret;
ext4_lock_group(sb, group);
if (!EXT4_MB_GRP_WAS_TRIMMED(e4b.bd_info) ||
- minblocks < EXT4_SB(sb)->s_last_trim_minblks) {
+ minblocks < EXT4_SB(sb)->s_last_trim_minblks)
ret = ext4_try_to_trim_range(sb, &e4b, start, max, minblocks);
- if (ret >= 0 && set_trimmed)
- EXT4_MB_GRP_SET_TRIMMED(e4b.bd_info);
- } else {
+ else
ret = 0;
- }
ext4_unlock_group(sb, group);
ext4_mb_unload_buddy(&e4b);
ext4_fsblk_t first_data_blk =
le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
ext4_fsblk_t max_blks = ext4_blocks_count(EXT4_SB(sb)->s_es);
- bool whole_group, eof = false;
int ret = 0;
start = range->start >> sb->s_blocksize_bits;
if (minlen > EXT4_CLUSTERS_PER_GROUP(sb))
goto out;
}
- if (end >= max_blks - 1) {
+ if (end >= max_blks - 1)
end = max_blks - 1;
- eof = true;
- }
if (end <= first_data_blk)
goto out;
if (start < first_data_blk)
/* end now represents the last cluster to discard in this group */
end = EXT4_CLUSTERS_PER_GROUP(sb) - 1;
- whole_group = true;
for (group = first_group; group <= last_group; group++) {
+ if (ext4_trim_interrupted())
+ break;
grp = ext4_get_group_info(sb, group);
if (!grp)
continue;
* change it for the last group, note that last_cluster is
* already computed earlier by ext4_get_group_no_and_offset()
*/
- if (group == last_group) {
+ if (group == last_group)
end = last_cluster;
- whole_group = eof ? true : end == EXT4_CLUSTERS_PER_GROUP(sb) - 1;
- }
if (grp->bb_free >= minlen) {
cnt = ext4_trim_all_free(sb, group, first_cluster,
- end, minlen, whole_group);
+ end, minlen);
if (cnt < 0) {
ret = cnt;
break;
struct buffer_head *bh)
{
struct ext4_dir_entry_tail *t;
+ int blocksize = EXT4_BLOCK_SIZE(inode->i_sb);
#ifdef PARANOID
struct ext4_dir_entry *d, *top;
d = (struct ext4_dir_entry *)bh->b_data;
top = (struct ext4_dir_entry *)(bh->b_data +
- (EXT4_BLOCK_SIZE(inode->i_sb) -
- sizeof(struct ext4_dir_entry_tail)));
- while (d < top && d->rec_len)
+ (blocksize - sizeof(struct ext4_dir_entry_tail)));
+ while (d < top && ext4_rec_len_from_disk(d->rec_len, blocksize))
d = (struct ext4_dir_entry *)(((void *)d) +
- le16_to_cpu(d->rec_len));
+ ext4_rec_len_from_disk(d->rec_len, blocksize));
if (d != top)
return NULL;
#endif
if (t->det_reserved_zero1 ||
- le16_to_cpu(t->det_rec_len) != sizeof(struct ext4_dir_entry_tail) ||
+ (ext4_rec_len_from_disk(t->det_rec_len, blocksize) !=
+ sizeof(struct ext4_dir_entry_tail)) ||
t->det_reserved_zero2 ||
t->det_reserved_ft != EXT4_FT_DIR_CSUM)
return NULL;
struct ext4_dir_entry *dp;
struct dx_root_info *root;
int count_offset;
+ int blocksize = EXT4_BLOCK_SIZE(inode->i_sb);
+ unsigned int rlen = ext4_rec_len_from_disk(dirent->rec_len, blocksize);
- if (le16_to_cpu(dirent->rec_len) == EXT4_BLOCK_SIZE(inode->i_sb))
+ if (rlen == blocksize)
count_offset = 8;
- else if (le16_to_cpu(dirent->rec_len) == 12) {
+ else if (rlen == 12) {
dp = (struct ext4_dir_entry *)(((void *)dirent) + 12);
- if (le16_to_cpu(dp->rec_len) !=
- EXT4_BLOCK_SIZE(inode->i_sb) - 12)
+ if (ext4_rec_len_from_disk(dp->rec_len, blocksize) != blocksize - 12)
return NULL;
root = (struct dx_root_info *)(((void *)dp + 12));
if (root->reserved_zero ||
unsigned int buflen = bh->b_size;
char *base = bh->b_data;
struct dx_hash_info h = *hinfo;
+ int blocksize = EXT4_BLOCK_SIZE(dir->i_sb);
if (ext4_has_metadata_csum(dir->i_sb))
buflen -= sizeof(struct ext4_dir_entry_tail);
map_tail--;
map_tail->hash = h.hash;
map_tail->offs = ((char *) de - base)>>2;
- map_tail->size = le16_to_cpu(de->rec_len);
+ map_tail->size = ext4_rec_len_from_disk(de->rec_len,
+ blocksize);
count++;
cond_resched();
}
- de = ext4_next_entry(de, dir->i_sb->s_blocksize);
+ de = ext4_next_entry(de, blocksize);
}
return count;
}
.init_fs_context = ext4_init_fs_context,
.parameters = ext4_param_specs,
.kill_sb = ext4_kill_sb,
- .fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP | FS_MGTIME,
+ .fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP,
};
MODULE_ALIAS_FS("ext4");
if (wbc->pages_skipped) {
/*
- * writeback is not making progress due to locked
- * buffers. Skip this inode for now.
+ * Writeback is not making progress due to locked buffers.
+ * Skip this inode for now. Although having skipped pages
+ * is odd for clean inodes, it can happen for some
+ * filesystems so handle that gracefully.
*/
- redirty_tail_locked(inode, wb);
+ if (inode->i_state & I_DIRTY_ALL)
+ redirty_tail_locked(inode, wb);
+ else
+ inode_cgwb_move_to_attached(inode, wb);
return;
}
if (!test_bit(GLF_LOCK, &gl->gl_flags)) {
if (!spin_trylock(&gl->gl_lockref.lock))
continue;
- if (!gl->gl_lockref.count) {
+ if (gl->gl_lockref.count <= 1 &&
+ (gl->gl_state == LM_ST_UNLOCKED ||
+ demote_ok(gl))) {
list_move(&gl->gl_lru, &dispose);
atomic_dec(&lru_count);
freed++;
struct super_block *sb = sdp->sd_vfs;
if (!remote ||
- gl->gl_state != LM_ST_SHARED ||
+ (gl->gl_state != LM_ST_SHARED &&
+ gl->gl_state != LM_ST_UNLOCKED) ||
gl->gl_demote_state != LM_ST_UNLOCKED)
return;
/*
* Try to get an active super block reference to prevent racing with
- * unmount (see trylock_super()). But note that unmount isn't the only
- * place where a write lock on s_umount is taken, and we can fail here
- * because of things like remount as well.
+ * unmount (see super_trylock_shared()). But note that unmount isn't
+ * the only place where a write lock on s_umount is taken, and we can
+ * fail here because of things like remount as well.
*/
if (down_read_trylock(&sb->s_umount)) {
atomic_inc(&sb->s_active);
ret = gfs2_quota_lock(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE);
if (ret)
return ret;
- if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
+ if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON &&
+ sdp->sd_args.ar_quota != GFS2_QUOTA_QUIET)
return 0;
ret = gfs2_quota_check(ip, ip->i_inode.i_uid, ip->i_inode.i_gid, ap);
if (ret)
}
EXPORT_SYMBOL(file_remove_privs);
-/**
- * current_mgtime - Return FS time (possibly fine-grained)
- * @inode: inode.
- *
- * Return the current time truncated to the time granularity supported by
- * the fs, as suitable for a ctime/mtime change. If the ctime is flagged
- * as having been QUERIED, get a fine-grained timestamp.
- */
-struct timespec64 current_mgtime(struct inode *inode)
-{
- struct timespec64 now, ctime;
- atomic_long_t *pnsec = (atomic_long_t *)&inode->__i_ctime.tv_nsec;
- long nsec = atomic_long_read(pnsec);
-
- if (nsec & I_CTIME_QUERIED) {
- ktime_get_real_ts64(&now);
- return timestamp_truncate(now, inode);
- }
-
- ktime_get_coarse_real_ts64(&now);
- now = timestamp_truncate(now, inode);
-
- /*
- * If we've recently fetched a fine-grained timestamp
- * then the coarse-grained one may still be earlier than the
- * existing ctime. Just keep the existing value if so.
- */
- ctime = inode_get_ctime(inode);
- if (timespec64_compare(&ctime, &now) > 0)
- now = ctime;
-
- return now;
-}
-EXPORT_SYMBOL(current_mgtime);
-
-static struct timespec64 current_ctime(struct inode *inode)
-{
- if (is_mgtime(inode))
- return current_mgtime(inode);
- return current_time(inode);
-}
-
static int inode_needs_update_time(struct inode *inode)
{
int sync_it = 0;
- struct timespec64 now = current_ctime(inode);
+ struct timespec64 now = current_time(inode);
struct timespec64 ctime;
/* First try to exhaust all avenues to not sync */
*/
struct timespec64 inode_set_ctime_current(struct inode *inode)
{
- struct timespec64 now;
- struct timespec64 ctime;
-
- ctime.tv_nsec = READ_ONCE(inode->__i_ctime.tv_nsec);
- if (!(ctime.tv_nsec & I_CTIME_QUERIED)) {
- now = current_time(inode);
+ struct timespec64 now = current_time(inode);
- /* Just copy it into place if it's not multigrain */
- if (!is_mgtime(inode)) {
- inode_set_ctime_to_ts(inode, now);
- return now;
- }
-
- /*
- * If we've recently updated with a fine-grained timestamp,
- * then the coarse-grained one may still be earlier than the
- * existing ctime. Just keep the existing value if so.
- */
- ctime.tv_sec = inode->__i_ctime.tv_sec;
- if (timespec64_compare(&ctime, &now) > 0)
- return ctime;
-
- /*
- * Ctime updates are usually protected by the inode_lock, but
- * we can still race with someone setting the QUERIED flag.
- * Try to swap the new nsec value into place. If it's changed
- * in the interim, then just go with a fine-grained timestamp.
- */
- if (cmpxchg(&inode->__i_ctime.tv_nsec, ctime.tv_nsec,
- now.tv_nsec) != ctime.tv_nsec)
- goto fine_grained;
- inode->__i_ctime.tv_sec = now.tv_sec;
- return now;
- }
-fine_grained:
- ktime_get_real_ts64(&now);
- inode_set_ctime_to_ts(inode, timestamp_truncate(now, inode));
+ inode_set_ctime(inode, now.tv_sec, now.tv_nsec);
return now;
}
EXPORT_SYMBOL(inode_set_ctime_current);
size_t poff, plen;
/*
- * If the write completely overlaps the current folio, then
+ * If the write or zeroing completely overlaps the current folio, then
* entire folio will be dirtied so there is no need for
* per-block state tracking structures to be attached to this folio.
+ * For the unshare case, we must read in the ondisk contents because we
+ * are not changing pagecache contents.
*/
- if (pos <= folio_pos(folio) &&
+ if (!(iter->flags & IOMAP_UNSHARE) && pos <= folio_pos(folio) &&
pos + len >= folio_pos(folio) + folio_size(folio))
return 0;
/*
* Scan the data range passed to us for dirty page cache folios. If we find a
- * dirty folio, punch out the preceeding range and update the offset from which
+ * dirty folio, punch out the preceding range and update the offset from which
* the next punch will start from.
*
* We can punch out storage reservations under clean pages because they either
const struct iomap *srcmap = iomap_iter_srcmap(iter);
loff_t pos = iter->pos;
loff_t length = iomap_length(iter);
- long status = 0;
loff_t written = 0;
/* don't bother with blocks that are not shared to start with */
return length;
do {
- unsigned long offset = offset_in_page(pos);
- unsigned long bytes = min_t(loff_t, PAGE_SIZE - offset, length);
struct folio *folio;
+ int status;
+ size_t offset;
+ size_t bytes = min_t(u64, SIZE_MAX, length);
status = iomap_write_begin(iter, pos, bytes, &folio);
if (unlikely(status))
return status;
- if (iter->iomap.flags & IOMAP_F_STALE)
+ if (iomap->flags & IOMAP_F_STALE)
break;
- status = iomap_write_end(iter, pos, bytes, bytes, folio);
- if (WARN_ON_ONCE(status == 0))
+ offset = offset_in_folio(folio, pos);
+ if (bytes > folio_size(folio) - offset)
+ bytes = folio_size(folio) - offset;
+
+ bytes = iomap_write_end(iter, pos, bytes, bytes, folio);
+ if (WARN_ON_ONCE(bytes == 0))
return -EIO;
cond_resched();
- pos += status;
- written += status;
- length -= status;
+ pos += bytes;
+ written += bytes;
+ length -= bytes;
balance_dirty_pages_ratelimited(iter->inode->i_mapping);
- } while (length);
+ } while (length > 0);
return written;
}
static __u32 jbd2_checksum_data(__u32 crc32_sum, struct buffer_head *bh)
{
- struct page *page = bh->b_page;
char *addr;
__u32 checksum;
- addr = kmap_atomic(page);
- checksum = crc32_be(crc32_sum,
- (void *)(addr + offset_in_page(bh->b_data)), bh->b_size);
- kunmap_atomic(addr);
+ addr = kmap_local_folio(bh->b_folio, bh_offset(bh));
+ checksum = crc32_be(crc32_sum, addr, bh->b_size);
+ kunmap_local(addr);
return checksum;
}
struct buffer_head *bh, __u32 sequence)
{
journal_block_tag3_t *tag3 = (journal_block_tag3_t *)tag;
- struct page *page = bh->b_page;
__u8 *addr;
__u32 csum32;
__be32 seq;
return;
seq = cpu_to_be32(sequence);
- addr = kmap_atomic(page);
+ addr = kmap_local_folio(bh->b_folio, bh_offset(bh));
csum32 = jbd2_chksum(j, j->j_csum_seed, (__u8 *)&seq, sizeof(seq));
- csum32 = jbd2_chksum(j, csum32, addr + offset_in_page(bh->b_data),
- bh->b_size);
- kunmap_atomic(addr);
+ csum32 = jbd2_chksum(j, csum32, addr, bh->b_size);
+ kunmap_local(addr);
if (jbd2_has_feature_csum3(j))
tag3->t_checksum = cpu_to_be32(csum32);
err_cleanup:
percpu_counter_destroy(&journal->j_checkpoint_jh_count);
+ if (journal->j_chksum_driver)
+ crypto_free_shash(journal->j_chksum_driver);
kfree(journal->j_wbuf);
jbd2_journal_destroy_revoke(journal);
journal_fail_superblock(journal);
/* Call t_frozen trigger and copy buffer data into jh->b_frozen_data. */
static void jbd2_freeze_jh_data(struct journal_head *jh)
{
- struct page *page;
- int offset;
char *source;
struct buffer_head *bh = jh2bh(jh);
J_EXPECT_JH(jh, buffer_uptodate(bh), "Possible IO failure.\n");
- page = bh->b_page;
- offset = offset_in_page(bh->b_data);
- source = kmap_atomic(page);
+ source = kmap_local_folio(bh->b_folio, bh_offset(bh));
/* Fire data frozen trigger just before we copy the data */
- jbd2_buffer_frozen_trigger(jh, source + offset, jh->b_triggers);
- memcpy(jh->b_frozen_data, source + offset, bh->b_size);
- kunmap_atomic(source);
+ jbd2_buffer_frozen_trigger(jh, source, jh->b_triggers);
+ memcpy(jh->b_frozen_data, source, bh->b_size);
+ kunmap_local(source);
/*
* Now that the frozen data is saved off, we need to store any matching
* We don't know how much we wrote, so just return the number of
* bytes which were direct-written
*/
+ iocb->ki_pos -= buffered_written;
if (direct_written)
return direct_written;
return err;
xas_for_each(&xas, folio, last_page) {
loff_t pg_end;
bool pg_failed = false;
+ bool folio_started;
if (xas_retry(&xas, folio))
continue;
pg_end = folio_pos(folio) + folio_size(folio) - 1;
+ folio_started = false;
for (;;) {
loff_t sreq_end;
pg_failed = true;
break;
}
- if (test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags))
+ if (!folio_started && test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags)) {
folio_start_fscache(folio);
+ folio_started = true;
+ }
pg_failed |= subreq_failed;
sreq_end = subreq->start + subreq->len - 1;
if (pg_end < sreq_end)
dreq->max_count = dreq_len;
if (dreq->count > dreq_len)
dreq->count = dreq_len;
-
- if (test_bit(NFS_IOHDR_ERROR, &hdr->flags))
- dreq->error = hdr->error;
- else /* Clear outstanding error if this is EOF */
- dreq->error = 0;
}
+
+ if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) && !dreq->error)
+ dreq->error = hdr->error;
}
static void
dreq->count = dreq_len;
}
+static void nfs_direct_truncate_request(struct nfs_direct_req *dreq,
+ struct nfs_page *req)
+{
+ loff_t offs = req_offset(req);
+ size_t req_start = (size_t)(offs - dreq->io_start);
+
+ if (req_start < dreq->max_count)
+ dreq->max_count = req_start;
+ if (req_start < dreq->count)
+ dreq->count = req_start;
+}
+
/**
* nfs_swap_rw - NFS address space operation for swap I/O
* @iocb: target I/O control block
kref_get(&head->wb_kref);
}
-static void nfs_direct_join_group(struct list_head *list, struct inode *inode)
+static void nfs_direct_join_group(struct list_head *list,
+ struct nfs_commit_info *cinfo,
+ struct inode *inode)
{
struct nfs_page *req, *subreq;
nfs_release_request(subreq);
}
} while ((subreq = subreq->wb_this_page) != req);
- nfs_join_page_group(req, inode);
+ nfs_join_page_group(req, cinfo, inode);
}
}
static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
{
struct nfs_pageio_descriptor desc;
- struct nfs_page *req, *tmp;
+ struct nfs_page *req;
LIST_HEAD(reqs);
struct nfs_commit_info cinfo;
- LIST_HEAD(failed);
nfs_init_cinfo_from_dreq(&cinfo, dreq);
nfs_direct_write_scan_commit_list(dreq->inode, &reqs, &cinfo);
- nfs_direct_join_group(&reqs, dreq->inode);
+ nfs_direct_join_group(&reqs, &cinfo, dreq->inode);
- dreq->count = 0;
- dreq->max_count = 0;
- list_for_each_entry(req, &reqs, wb_list)
- dreq->max_count += req->wb_bytes;
nfs_clear_pnfs_ds_commit_verifiers(&dreq->ds_cinfo);
get_dreq(dreq);
&nfs_direct_write_completion_ops);
desc.pg_dreq = dreq;
- list_for_each_entry_safe(req, tmp, &reqs, wb_list) {
+ while (!list_empty(&reqs)) {
+ req = nfs_list_entry(reqs.next);
/* Bump the transmission count */
req->wb_nio++;
if (!nfs_pageio_add_request(&desc, req)) {
- nfs_list_move_request(req, &failed);
- spin_lock(&cinfo.inode->i_lock);
- dreq->flags = 0;
- if (desc.pg_error < 0)
+ spin_lock(&dreq->lock);
+ if (dreq->error < 0) {
+ desc.pg_error = dreq->error;
+ } else if (desc.pg_error != -EAGAIN) {
+ dreq->flags = 0;
+ if (!desc.pg_error)
+ desc.pg_error = -EIO;
dreq->error = desc.pg_error;
- else
- dreq->error = -EIO;
- spin_unlock(&cinfo.inode->i_lock);
+ } else
+ dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
+ spin_unlock(&dreq->lock);
+ break;
}
nfs_release_request(req);
}
nfs_pageio_complete(&desc);
- while (!list_empty(&failed)) {
- req = nfs_list_entry(failed.next);
+ while (!list_empty(&reqs)) {
+ req = nfs_list_entry(reqs.next);
nfs_list_remove_request(req);
nfs_unlock_and_release_request(req);
+ if (desc.pg_error == -EAGAIN) {
+ nfs_mark_request_commit(req, NULL, &cinfo, 0);
+ } else {
+ spin_lock(&dreq->lock);
+ nfs_direct_truncate_request(dreq, req);
+ spin_unlock(&dreq->lock);
+ nfs_release_request(req);
+ }
}
if (put_dreq(dreq))
if (status < 0) {
/* Errors in commit are fatal */
dreq->error = status;
- dreq->max_count = 0;
- dreq->count = 0;
dreq->flags = NFS_ODIRECT_DONE;
} else {
status = dreq->error;
while (!list_empty(&data->pages)) {
req = nfs_list_entry(data->pages.next);
nfs_list_remove_request(req);
- if (status >= 0 && !nfs_write_match_verf(verf, req)) {
+ if (status < 0) {
+ spin_lock(&dreq->lock);
+ nfs_direct_truncate_request(dreq, req);
+ spin_unlock(&dreq->lock);
+ nfs_release_request(req);
+ } else if (!nfs_write_match_verf(verf, req)) {
dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
/*
* Despite the reboot, the write was successful,
*/
req->wb_nio = 0;
nfs_mark_request_commit(req, NULL, &cinfo, 0);
- } else /* Error or match */
+ } else
nfs_release_request(req);
nfs_unlock_and_release_request(req);
}
while (!list_empty(&reqs)) {
req = nfs_list_entry(reqs.next);
nfs_list_remove_request(req);
+ nfs_direct_truncate_request(dreq, req);
nfs_release_request(req);
nfs_unlock_and_release_request(req);
}
}
nfs_direct_count_bytes(dreq, hdr);
- if (test_bit(NFS_IOHDR_UNSTABLE_WRITES, &hdr->flags)) {
+ if (test_bit(NFS_IOHDR_UNSTABLE_WRITES, &hdr->flags) &&
+ !test_bit(NFS_IOHDR_ERROR, &hdr->flags)) {
if (!dreq->flags)
dreq->flags = NFS_ODIRECT_DO_COMMIT;
flags = dreq->flags;
static void nfs_direct_write_reschedule_io(struct nfs_pgio_header *hdr)
{
struct nfs_direct_req *dreq = hdr->dreq;
+ struct nfs_page *req;
+ struct nfs_commit_info cinfo;
trace_nfs_direct_write_reschedule_io(dreq);
+ nfs_init_cinfo_from_dreq(&cinfo, dreq);
spin_lock(&dreq->lock);
- if (dreq->error == 0) {
+ if (dreq->error == 0)
dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
- /* fake unstable write to let common nfs resend pages */
- hdr->verf.committed = NFS_UNSTABLE;
- hdr->good_bytes = hdr->args.offset + hdr->args.count -
- hdr->io_start;
- }
+ set_bit(NFS_IOHDR_REDO, &hdr->flags);
spin_unlock(&dreq->lock);
+ while (!list_empty(&hdr->pages)) {
+ req = nfs_list_entry(hdr->pages.next);
+ nfs_list_remove_request(req);
+ nfs_unlock_request(req);
+ nfs_mark_request_commit(req, NULL, &cinfo, 0);
+ }
}
static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops = {
{
struct nfs_pageio_descriptor desc;
struct inode *inode = dreq->inode;
+ struct nfs_commit_info cinfo;
ssize_t result = 0;
size_t requested_bytes = 0;
size_t wsize = max_t(size_t, NFS_SERVER(inode)->wsize, PAGE_SIZE);
+ bool defer = false;
trace_nfs_direct_write_schedule_iovec(dreq);
break;
}
- nfs_lock_request(req);
- if (!nfs_pageio_add_request(&desc, req)) {
- result = desc.pg_error;
- nfs_unlock_and_release_request(req);
- break;
- }
pgbase = 0;
bytes -= req_len;
requested_bytes += req_len;
pos += req_len;
dreq->bytes_left -= req_len;
+
+ if (defer) {
+ nfs_mark_request_commit(req, NULL, &cinfo, 0);
+ continue;
+ }
+
+ nfs_lock_request(req);
+ if (nfs_pageio_add_request(&desc, req))
+ continue;
+
+ /* Exit on hard errors */
+ if (desc.pg_error < 0 && desc.pg_error != -EAGAIN) {
+ result = desc.pg_error;
+ nfs_unlock_and_release_request(req);
+ break;
+ }
+
+ /* If the error is soft, defer remaining requests */
+ nfs_init_cinfo_from_dreq(&cinfo, dreq);
+ spin_lock(&dreq->lock);
+ dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
+ spin_unlock(&dreq->lock);
+ nfs_unlock_request(req);
+ nfs_mark_request_commit(req, NULL, &cinfo, 0);
+ desc.pg_error = 0;
+ defer = true;
}
nfs_direct_release_pages(pagevec, npages);
kvfree(pagevec);
case -EPFNOSUPPORT:
case -EPROTONOSUPPORT:
case -EOPNOTSUPP:
+ case -EINVAL:
case -ECONNREFUSED:
case -ECONNRESET:
case -EHOSTDOWN:
.net = old->cl_net,
.servername = old->cl_hostname,
};
+ int max_connect = test_bit(NFS_CS_PNFS, &clp->cl_flags) ?
+ clp->cl_max_connect : old->cl_max_connect;
if (clp->cl_proto != old->cl_proto)
return;
xprt_args.addrlen = clp_salen;
rpc_clnt_add_xprt(old->cl_rpcclient, &xprt_args,
- rpc_clnt_test_and_add_xprt, NULL);
+ rpc_clnt_test_and_add_xprt, &max_connect);
}
/**
__set_bit(NFS_CS_NORESVPORT, &cl_init.init_flags);
__set_bit(NFS_CS_DS, &cl_init.init_flags);
+ __set_bit(NFS_CS_PNFS, &cl_init.init_flags);
+ cl_init.max_connect = NFS_MAX_TRANSPORTS;
/*
* Set an authflavor equual to the MDS value. Use the MDS nfs_client
* cl_ipaddr so as to use the same EXCHANGE_ID co_ownerid as the MDS
return status;
}
if (!(o_res->f_attr->valid & NFS_ATTR_FATTR)) {
+ struct nfs_fh *fh = &o_res->fh;
+
nfs4_sequence_free_slot(&o_res->seq_res);
- nfs4_proc_getattr(server, &o_res->fh, o_res->f_attr, NULL);
+ if (o_arg->claim == NFS4_OPEN_CLAIM_FH)
+ fh = NFS_FH(d_inode(data->dentry));
+ nfs4_proc_getattr(server, fh, o_res->f_attr, NULL);
}
return 0;
}
static const struct nfs_commit_completion_ops nfs_commit_completion_ops;
static const struct nfs_rw_ops nfs_rw_write_ops;
static void nfs_inode_remove_request(struct nfs_page *req);
-static void nfs_clear_request_commit(struct nfs_page *req);
+static void nfs_clear_request_commit(struct nfs_commit_info *cinfo,
+ struct nfs_page *req);
static void nfs_init_cinfo_from_inode(struct nfs_commit_info *cinfo,
struct inode *inode);
static struct nfs_page *
* the (former) group. All subrequests are removed from any write or commit
* lists, unlinked from the group and destroyed.
*/
-void
-nfs_join_page_group(struct nfs_page *head, struct inode *inode)
+void nfs_join_page_group(struct nfs_page *head, struct nfs_commit_info *cinfo,
+ struct inode *inode)
{
struct nfs_page *subreq;
struct nfs_page *destroy_list = NULL;
* Commit list removal accounting is done after locks are dropped */
subreq = head;
do {
- nfs_clear_request_commit(subreq);
+ nfs_clear_request_commit(cinfo, subreq);
subreq = subreq->wb_this_page;
} while (subreq != head);
{
struct inode *inode = folio_file_mapping(folio)->host;
struct nfs_page *head;
+ struct nfs_commit_info cinfo;
int ret;
+ nfs_init_cinfo_from_inode(&cinfo, inode);
/*
* A reference is taken only on the head request which acts as a
* reference to the whole page group - the group will not be destroyed
return ERR_PTR(ret);
}
- nfs_join_page_group(head, inode);
+ nfs_join_page_group(head, &cinfo, inode);
return head;
}
}
/* Called holding the request lock on @req */
-static void
-nfs_clear_request_commit(struct nfs_page *req)
+static void nfs_clear_request_commit(struct nfs_commit_info *cinfo,
+ struct nfs_page *req)
{
if (test_bit(PG_CLEAN, &req->wb_flags)) {
struct nfs_open_context *ctx = nfs_req_openctx(req);
struct inode *inode = d_inode(ctx->dentry);
- struct nfs_commit_info cinfo;
- nfs_init_cinfo_from_inode(&cinfo, inode);
mutex_lock(&NFS_I(inode)->commit_mutex);
- if (!pnfs_clear_request_commit(req, &cinfo)) {
- nfs_request_remove_commit_list(req, &cinfo);
+ if (!pnfs_clear_request_commit(req, cinfo)) {
+ nfs_request_remove_commit_list(req, cinfo);
}
mutex_unlock(&NFS_I(inode)->commit_mutex);
nfs_folio_clear_commit(nfs_page_to_folio(req));
rename->rn_tname, rename->rn_tnamelen);
if (status)
return status;
- set_change_info(&rename->rn_sinfo, &cstate->current_fh);
- set_change_info(&rename->rn_tinfo, &cstate->save_fh);
+ set_change_info(&rename->rn_sinfo, &cstate->save_fh);
+ set_change_info(&rename->rn_tinfo, &cstate->current_fh);
return nfs_ok;
}
struct file *file, unsigned long maxcount)
{
struct xdr_stream *xdr = resp->xdr;
+ unsigned int base = xdr->buf->page_len & ~PAGE_MASK;
unsigned int starting_len = xdr->buf->len;
__be32 zero = xdr_zero;
__be32 nfserr;
return nfserr_resource;
nfserr = nfsd_iter_read(resp->rqstp, read->rd_fhp, file,
- read->rd_offset, &maxcount,
- xdr->buf->page_len & ~PAGE_MASK,
+ read->rd_offset, &maxcount, base,
&read->rd_eof);
read->rd_length = maxcount;
if (nfserr)
int nfsd_pool_stats_release(struct inode *inode, struct file *file)
{
+ struct seq_file *seq = file->private_data;
+ struct svc_serv *serv = seq->private;
int ret = seq_release(inode, file);
- struct net *net = inode->i_sb->s_fs_info;
mutex_lock(&nfsd_mutex);
- nfsd_put(net);
+ svc_put(serv);
mutex_unlock(&nfsd_mutex);
return ret;
}
struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
err = nilfs_dat_translate(nilfs->ns_dat, vbn, &pbn);
- if (unlikely(err)) { /* -EIO, -ENOMEM, -ENOENT */
- brelse(bh);
+ if (unlikely(err)) /* -EIO, -ENOMEM, -ENOENT */
goto failed;
- }
}
lock_buffer(bh);
failed:
unlock_page(bh->b_page);
put_page(bh->b_page);
+ if (unlikely(err))
+ brelse(bh);
return err;
}
put_inode_out:
iput(inode);
out:
+ ntfs3_put_sbi(sbi);
kfree(boot2);
return err;
}
{
struct iattr attr = {
.ia_valid =
- ATTR_ATIME | ATTR_MTIME | ATTR_ATIME_SET | ATTR_MTIME_SET,
+ ATTR_ATIME | ATTR_MTIME | ATTR_ATIME_SET | ATTR_MTIME_SET | ATTR_CTIME,
.ia_atime = stat->atime,
.ia_mtime = stat->mtime,
};
if (err)
return err;
- if (inode->i_flags & OVL_COPY_I_FLAGS_MASK) {
+ if (inode->i_flags & OVL_COPY_I_FLAGS_MASK &&
+ (S_ISREG(c->stat.mode) || S_ISDIR(c->stat.mode))) {
/*
* Copy the fileattr inode flags that are the source of already
* copied i_flags
struct kiocb iocb;
refcount_t ref;
struct kiocb *orig_iocb;
- struct fd fd;
};
static struct kmem_cache *ovl_aio_request_cachep;
static inline void ovl_aio_put(struct ovl_aio_req *aio_req)
{
if (refcount_dec_and_test(&aio_req->ref)) {
- fdput(aio_req->fd);
+ fput(aio_req->iocb.ki_filp);
kmem_cache_free(ovl_aio_request_cachep, aio_req);
}
}
if (!aio_req)
goto out;
- aio_req->fd = real;
real.flags = 0;
aio_req->orig_iocb = iocb;
- kiocb_clone(&aio_req->iocb, iocb, real.file);
+ kiocb_clone(&aio_req->iocb, iocb, get_file(real.file));
aio_req->iocb.ki_complete = ovl_aio_rw_complete;
refcount_set(&aio_req->ref, 2);
ret = vfs_iocb_iter_read(real.file, &aio_req->iocb, iter);
if (!ovl_should_sync(OVL_FS(inode->i_sb)))
ifl &= ~(IOCB_DSYNC | IOCB_SYNC);
+ /*
+ * Overlayfs doesn't support deferred completions, don't copy
+ * this property in case it is set by the issuer.
+ */
+ ifl &= ~IOCB_DIO_CALLER_COMP;
+
old_cred = ovl_override_creds(file_inode(file)->i_sb);
if (is_sync_kiocb(iocb)) {
file_start_write(real.file);
if (!aio_req)
goto out;
- aio_req->fd = real;
real.flags = 0;
aio_req->orig_iocb = iocb;
- kiocb_clone(&aio_req->iocb, iocb, real.file);
+ kiocb_clone(&aio_req->iocb, iocb, get_file(real.file));
aio_req->iocb.ki_flags = ifl;
aio_req->iocb.ki_complete = ovl_aio_rw_complete;
refcount_set(&aio_req->ref, 2);
break;
}
ret += copied;
- buf->offset = 0;
buf->len = copied;
if (!iov_iter_count(from))
struct inode *inode;
struct task_struct *task;
struct mm_struct *mm;
-#ifdef CONFIG_MMU
struct vma_iterator iter;
-#endif
#ifdef CONFIG_NUMA
struct mempolicy *task_mempolicy;
#endif
return nommu_vma_show(m, _p);
}
-static void *m_start(struct seq_file *m, loff_t *pos)
+static struct vm_area_struct *proc_get_vma(struct proc_maps_private *priv,
+ loff_t *ppos)
+{
+ struct vm_area_struct *vma = vma_next(&priv->iter);
+
+ if (vma) {
+ *ppos = vma->vm_start;
+ } else {
+ *ppos = -1UL;
+ }
+
+ return vma;
+}
+
+static void *m_start(struct seq_file *m, loff_t *ppos)
{
struct proc_maps_private *priv = m->private;
+ unsigned long last_addr = *ppos;
struct mm_struct *mm;
- struct vm_area_struct *vma;
- unsigned long addr = *pos;
- /* See m_next(). Zero at the start or after lseek. */
- if (addr == -1UL)
+ /* See proc_get_vma(). Zero at the start or after lseek. */
+ if (last_addr == -1UL)
return NULL;
/* pin the task and mm whilst we play with them */
return ERR_PTR(-ESRCH);
mm = priv->mm;
- if (!mm || !mmget_not_zero(mm))
+ if (!mm || !mmget_not_zero(mm)) {
+ put_task_struct(priv->task);
+ priv->task = NULL;
return NULL;
+ }
if (mmap_read_lock_killable(mm)) {
mmput(mm);
+ put_task_struct(priv->task);
+ priv->task = NULL;
return ERR_PTR(-EINTR);
}
- /* start the next element from addr */
- vma = find_vma(mm, addr);
- if (vma)
- return vma;
+ vma_iter_init(&priv->iter, mm, last_addr);
- mmap_read_unlock(mm);
- mmput(mm);
- return NULL;
+ return proc_get_vma(priv, ppos);
}
-static void m_stop(struct seq_file *m, void *_vml)
+static void m_stop(struct seq_file *m, void *v)
{
struct proc_maps_private *priv = m->private;
+ struct mm_struct *mm = priv->mm;
- if (!IS_ERR_OR_NULL(_vml)) {
- mmap_read_unlock(priv->mm);
- mmput(priv->mm);
- }
- if (priv->task) {
- put_task_struct(priv->task);
- priv->task = NULL;
- }
+ if (!priv->task)
+ return;
+
+ mmap_read_unlock(mm);
+ mmput(mm);
+ put_task_struct(priv->task);
+ priv->task = NULL;
}
-static void *m_next(struct seq_file *m, void *_p, loff_t *pos)
+static void *m_next(struct seq_file *m, void *_p, loff_t *ppos)
{
- struct vm_area_struct *vma = _p;
-
- *pos = vma->vm_end;
- return find_vma(vma->vm_mm, vma->vm_end);
+ return proc_get_vma(m->private, ppos);
}
static const struct seq_operations proc_pid_maps_ops = {
#define get_journal_desc_magic(bh) (bh->b_data + bh->b_size - 12)
#define journal_trans_half(blocksize) \
- ((blocksize - sizeof (struct reiserfs_journal_desc) + sizeof (__u32) - 12) / sizeof (__u32))
+ ((blocksize - sizeof(struct reiserfs_journal_desc) - 12) / sizeof(__u32))
/* journal.c see journal.c for all the comments here */
__le32 j_len;
__le32 j_mount_id; /* mount id of this trans */
- __le32 j_realblock[1]; /* real locations for each block */
+ __le32 j_realblock[]; /* real locations for each block */
};
#define get_desc_trans_id(d) le32_to_cpu((d)->j_trans_id)
struct reiserfs_journal_commit {
__le32 j_trans_id; /* must match j_trans_id from the desc block */
__le32 j_len; /* ditto */
- __le32 j_realblock[1]; /* real locations for each block */
+ __le32 j_realblock[]; /* real locations for each block */
};
#define get_commit_trans_id(c) le32_to_cpu((c)->j_trans_id)
struct cached_fid *cfid, *q;
LIST_HEAD(entry);
+ if (cfids == NULL)
+ return;
+
spin_lock(&cfids->cfid_list_lock);
list_for_each_entry_safe(cfid, q, &cfids->entries, entry) {
list_move(&cfid->entry, &entry);
struct cached_fid *cfid, *q;
LIST_HEAD(entry);
+ if (cfids == NULL)
+ return;
+
if (cfids->laundromat) {
kthread_stop(cfids->laundromat);
cfids->laundromat = NULL;
#define MID_RETRY_NEEDED 8 /* session closed while this request out */
#define MID_RESPONSE_MALFORMED 0x10
#define MID_SHUTDOWN 0x20
+#define MID_RESPONSE_READY 0x40 /* ready for other process handle the rsp */
/* Flags */
#define MID_WAIT_CANCELLED 1 /* Cancelled while waiting for response */
* cifsInodeInfo->lock_sem cifsInodeInfo->llist cifs_init_once
* ->can_cache_brlcks
* cifsInodeInfo->deferred_lock cifsInodeInfo->deferred_closes cifsInodeInfo_alloc
- * cached_fid->fid_mutex cifs_tcon->crfid tconInfoAlloc
+ * cached_fid->fid_mutex cifs_tcon->crfid tcon_info_alloc
* cifsFileInfo->fh_mutex cifsFileInfo cifs_new_fileinfo
* cifsFileInfo->file_info_lock cifsFileInfo->count cifs_new_fileinfo
* ->invalidHandle initiate_cifs_search
extern struct cifs_ses *sesInfoAlloc(void);
extern void sesInfoFree(struct cifs_ses *);
-extern struct cifs_tcon *tconInfoAlloc(void);
+extern struct cifs_tcon *tcon_info_alloc(bool dir_leases_enabled);
extern void tconInfoFree(struct cifs_tcon *);
extern int cifs_sign_rqst(struct smb_rqst *rqst, struct TCP_Server_Info *server,
}
}
- tcon = tconInfoAlloc();
+ /* no need to setup directory caching on IPC share, so pass in false */
+ tcon = tcon_info_alloc(false);
if (tcon == NULL)
return -ENOMEM;
goto out_fail;
}
- tcon = tconInfoAlloc();
+ if (ses->server->capabilities & SMB2_GLOBAL_CAP_DIRECTORY_LEASING)
+ tcon = tcon_info_alloc(true);
+ else
+ tcon = tcon_info_alloc(false);
if (tcon == NULL) {
rc = -ENOMEM;
goto out_fail;
cifs_parse_mount_err:
kfree_sensitive(ctx->password);
+ ctx->password = NULL;
return -EINVAL;
}
}
cifsFileInfo_put(cfile);
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
int cifs_truncate_page(struct address_space *mapping, loff_t from)
}
struct cifs_tcon *
-tconInfoAlloc(void)
+tcon_info_alloc(bool dir_leases_enabled)
{
struct cifs_tcon *ret_buf;
ret_buf = kzalloc(sizeof(*ret_buf), GFP_KERNEL);
if (!ret_buf)
return NULL;
- ret_buf->cfids = init_cached_dirs();
- if (!ret_buf->cfids) {
- kfree(ret_buf);
- return NULL;
+
+ if (dir_leases_enabled == true) {
+ ret_buf->cfids = init_cached_dirs();
+ if (!ret_buf->cfids) {
+ kfree(ret_buf);
+ return NULL;
+ }
}
+ /* else ret_buf->cfids is already set to NULL above */
atomic_inc(&tconInfoAllocCount);
ret_buf->status = TID_NEW;
int rc = 0;
switch (rsp->hdr.Status) {
+ case STATUS_IO_REPARSE_TAG_NOT_HANDLED:
+ reparse_point = true;
+ break;
case STATUS_STOPPED_ON_SYMLINK:
rc = smb2_parse_symlink_response(cifs_sb, iov,
&data->symlink_target);
"STATUS_IO_REPARSE_TAG_MISMATCH"},
{STATUS_IO_REPARSE_DATA_INVALID, -EIO,
"STATUS_IO_REPARSE_DATA_INVALID"},
- {STATUS_IO_REPARSE_TAG_NOT_HANDLED, -EIO,
- "STATUS_IO_REPARSE_TAG_NOT_HANDLED"},
{STATUS_REPARSE_POINT_NOT_RESOLVED, -EIO,
"STATUS_REPARSE_POINT_NOT_RESOLVED"},
{STATUS_DIRECTORY_IS_A_REPARSE_POINT, -EIO,
cifs_server_dbg(VFS, "request has less credits (%d) than required (%d)",
credits->value, new_val);
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
spin_lock(&server->req_lock);
/* Use a fudge factor of 256 bytes in case we collide
* with a different set_EAs command.
*/
- if(CIFSMaxBufSize - MAX_SMB2_CREATE_RESPONSE_SIZE -
+ if (CIFSMaxBufSize - MAX_SMB2_CREATE_RESPONSE_SIZE -
MAX_SMB2_CLOSE_RESPONSE_SIZE - 256 <
used_len + ea_name_len + ea_value_len + 1) {
rc = -ENOSPC;
if (shdr->Command != SMB2_READ) {
cifs_server_dbg(VFS, "only big read responses are supported\n");
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
if (server->ops->is_session_expired &&
struct TCP_Server_Info *server)
{
struct smb3_hdr_req *smb3_hdr;
+
shdr->ProtocolId = SMB2_PROTO_NUMBER;
shdr->StructureSize = cpu_to_le16(64);
shdr->Command = smb2_cmd;
- if (server->dialect >= SMB30_PROT_ID) {
- /* After reconnect SMB3 must set ChannelSequence on subsequent reqs */
- smb3_hdr = (struct smb3_hdr_req *)shdr;
- /* if primary channel is not set yet, use default channel for chan sequence num */
- if (SERVER_IS_CHAN(server))
- smb3_hdr->ChannelSequence =
- cpu_to_le16(server->primary_server->channel_sequence_num);
- else
- smb3_hdr->ChannelSequence = cpu_to_le16(server->channel_sequence_num);
- }
+
if (server) {
+ /* After reconnect SMB3 must set ChannelSequence on subsequent reqs */
+ if (server->dialect >= SMB30_PROT_ID) {
+ smb3_hdr = (struct smb3_hdr_req *)shdr;
+ /*
+ * if primary channel is not set yet, use default
+ * channel for chan sequence num
+ */
+ if (SERVER_IS_CHAN(server))
+ smb3_hdr->ChannelSequence =
+ cpu_to_le16(server->primary_server->channel_sequence_num);
+ else
+ smb3_hdr->ChannelSequence =
+ cpu_to_le16(server->channel_sequence_num);
+ }
spin_lock(&server->req_lock);
/* Request up to 10 credits but don't go over the limit. */
if (server->credits >= server->max_credits)
iov[num].iov_base = create_posix_buf(mode);
if (mode == ACL_NO_MODE)
- cifs_dbg(FYI, "Invalid mode\n");
+ cifs_dbg(FYI, "%s: no mode\n", __func__);
if (iov[num].iov_base == NULL)
return -ENOMEM;
iov[num].iov_len = sizeof(struct create_posix);
* (most servers default to 120 seconds) and most clients default to 0.
* This can be overridden at mount ("handletimeout=") if the user wants
* a different persistent (or resilient) handle timeout for all opens
- * opens on a particular SMB3 mount.
+ * on a particular SMB3 mount.
*/
buf->dcontext.Timeout = cpu_to_le32(oparms->tcon->handle_timeout);
buf->dcontext.Flags = cpu_to_le32(SMB2_DHANDLE_FLAG_PERSISTENT);
return 0;
}
-/* See See http://technet.microsoft.com/en-us/library/hh509017(v=ws.10).aspx */
+/* See http://technet.microsoft.com/en-us/library/hh509017(v=ws.10).aspx */
static void setup_owner_group_sids(char *buf)
{
struct owner_group_sids *sids = (struct owner_group_sids *)buf;
SMB2_ioctl_free(struct smb_rqst *rqst)
{
int i;
+
if (rqst && rqst->rq_iov) {
cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
for (i = 1; i < rqst->rq_nvec; i++)
goto done;
/* allocate a dummy tcon struct used for reconnect */
- tcon = tconInfoAlloc();
+ tcon = tcon_info_alloc(false);
if (!tcon) {
resched = true;
list_for_each_entry_safe(ses, ses2, &tmp_ses_list, rlist) {
server->smbd_conn = smbd_get_connection(
server, (struct sockaddr *) &server->dstaddr);
- if (server->smbd_conn)
+ if (server->smbd_conn) {
cifs_dbg(VFS, "RDMA transport re-established\n");
-
- return server->smbd_conn ? 0 : -ENOENT;
+ trace_smb3_smbd_connect_done(server->hostname, server->conn_id, &server->dstaddr);
+ return 0;
+ }
+ trace_smb3_smbd_connect_err(server->hostname, server->conn_id, &server->dstaddr);
+ return -ENOENT;
}
static void destroy_caches_and_workqueue(struct smbd_connection *info)
TP_ARGS(hostname, conn_id, addr))
DEFINE_SMB3_CONNECT_EVENT(connect_done);
+DEFINE_SMB3_CONNECT_EVENT(smbd_connect_done);
+DEFINE_SMB3_CONNECT_EVENT(smbd_connect_err);
DECLARE_EVENT_CLASS(smb3_connect_err_class,
TP_PROTO(char *hostname, __u64 conn_id,
#include <linux/bvec.h>
#include <linux/highmem.h>
#include <linux/uaccess.h>
-#include <asm/processor.h>
+#include <linux/processor.h>
#include <linux/mempool.h>
#include <linux/sched/signal.h>
#include <linux/task_io_accounting_ops.h>
void
cifs_wake_up_task(struct mid_q_entry *mid)
{
+ if (mid->mid_state == MID_RESPONSE_RECEIVED)
+ mid->mid_state = MID_RESPONSE_READY;
wake_up_process(mid->callback_data);
}
struct TCP_Server_Info *server = midEntry->server;
if (midEntry->resp_buf && (midEntry->mid_flags & MID_WAIT_CANCELLED) &&
- midEntry->mid_state == MID_RESPONSE_RECEIVED &&
+ (midEntry->mid_state == MID_RESPONSE_RECEIVED ||
+ midEntry->mid_state == MID_RESPONSE_READY) &&
server->ops->handle_cancelled_mid)
server->ops->handle_cancelled_mid(midEntry, server);
int error;
error = wait_event_state(server->response_q,
- midQ->mid_state != MID_REQUEST_SUBMITTED,
+ midQ->mid_state != MID_REQUEST_SUBMITTED &&
+ midQ->mid_state != MID_RESPONSE_RECEIVED,
(TASK_KILLABLE|TASK_FREEZABLE_UNSAFE));
if (error < 0)
return -ERESTARTSYS;
spin_lock(&server->mid_lock);
switch (mid->mid_state) {
- case MID_RESPONSE_RECEIVED:
+ case MID_RESPONSE_READY:
spin_unlock(&server->mid_lock);
return rc;
case MID_RETRY_NEEDED:
credits.instance = server->reconnect_instance;
add_credits(server, &credits, mid->optype);
+
+ if (mid->mid_state == MID_RESPONSE_RECEIVED)
+ mid->mid_state = MID_RESPONSE_READY;
}
static void
send_cancel(server, &rqst[i], midQ[i]);
spin_lock(&server->mid_lock);
midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
- if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) {
+ if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED ||
+ midQ[i]->mid_state == MID_RESPONSE_RECEIVED) {
midQ[i]->callback = cifs_cancelled_callback;
cancelled_mid[i] = true;
credits[i].value = 0;
}
if (!midQ[i]->resp_buf ||
- midQ[i]->mid_state != MID_RESPONSE_RECEIVED) {
+ midQ[i]->mid_state != MID_RESPONSE_READY) {
rc = -EIO;
cifs_dbg(FYI, "Bad MID state?\n");
goto out;
if (rc != 0) {
send_cancel(server, &rqst, midQ);
spin_lock(&server->mid_lock);
- if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
+ if (midQ->mid_state == MID_REQUEST_SUBMITTED ||
+ midQ->mid_state == MID_RESPONSE_RECEIVED) {
/* no longer considered to be "in-flight" */
midQ->callback = release_mid;
spin_unlock(&server->mid_lock);
}
if (!midQ->resp_buf || !out_buf ||
- midQ->mid_state != MID_RESPONSE_RECEIVED) {
+ midQ->mid_state != MID_RESPONSE_READY) {
rc = -EIO;
cifs_server_dbg(VFS, "Bad MID state?\n");
goto out;
/* Wait for a reply - allow signals to interrupt. */
rc = wait_event_interruptible(server->response_q,
- (!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
+ (!(midQ->mid_state == MID_REQUEST_SUBMITTED ||
+ midQ->mid_state == MID_RESPONSE_RECEIVED)) ||
((server->tcpStatus != CifsGood) &&
(server->tcpStatus != CifsNew)));
/* Were we interrupted by a signal ? */
spin_lock(&server->srv_lock);
if ((rc == -ERESTARTSYS) &&
- (midQ->mid_state == MID_REQUEST_SUBMITTED) &&
+ (midQ->mid_state == MID_REQUEST_SUBMITTED ||
+ midQ->mid_state == MID_RESPONSE_RECEIVED) &&
((server->tcpStatus == CifsGood) ||
(server->tcpStatus == CifsNew))) {
spin_unlock(&server->srv_lock);
if (rc) {
send_cancel(server, &rqst, midQ);
spin_lock(&server->mid_lock);
- if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
+ if (midQ->mid_state == MID_REQUEST_SUBMITTED ||
+ midQ->mid_state == MID_RESPONSE_RECEIVED) {
/* no longer considered to be "in-flight" */
midQ->callback = release_mid;
spin_unlock(&server->mid_lock);
return rc;
/* rcvd frame is ok */
- if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
+ if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_READY) {
rc = -EIO;
cifs_tcon_dbg(VFS, "Bad MID state?\n");
goto out;
if (work->send_no_response)
return 0;
+ if (!work->iov_idx)
+ return -EINVAL;
+
ksmbd_conn_lock(conn);
sent = conn->transport->ops->writev(conn->transport, work->iov,
work->iov_cnt,
if (check_conn_state(work))
return SERVER_HANDLER_CONTINUE;
- if (ksmbd_verify_smb_message(work))
+ if (ksmbd_verify_smb_message(work)) {
+ conn->ops->set_rsp_status(work, STATUS_INVALID_PARAMETER);
return SERVER_HANDLER_ABORT;
+ }
command = conn->ops->get_cmd_val(work);
*cmd = command;
validate_credit:
if ((work->conn->vals->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU) &&
- smb2_validate_credit_charge(work->conn, hdr)) {
- work->conn->ops->set_rsp_status(work, STATUS_INVALID_PARAMETER);
+ smb2_validate_credit_charge(work->conn, hdr))
return 1;
- }
return 0;
}
aux_payload_buf,
nbytes);
kvfree(aux_payload_buf);
-
+ aux_payload_buf = NULL;
nbytes = 0;
if (remain_bytes < 0) {
err = (int)remain_bytes;
out:
posix_acl_release(fattr.cf_acls);
posix_acl_release(fattr.cf_dacls);
- mark_inode_dirty(inode);
return rc;
}
#include "mount.h"
/**
- * fill_mg_cmtime - Fill in the mtime and ctime and flag ctime as QUERIED
- * @stat: where to store the resulting values
- * @request_mask: STATX_* values requested
- * @inode: inode from which to grab the c/mtime
- *
- * Given @inode, grab the ctime and mtime out if it and store the result
- * in @stat. When fetching the value, flag it as queried so the next write
- * will use a fine-grained timestamp.
- */
-void fill_mg_cmtime(struct kstat *stat, u32 request_mask, struct inode *inode)
-{
- atomic_long_t *pnsec = (atomic_long_t *)&inode->__i_ctime.tv_nsec;
-
- /* If neither time was requested, then don't report them */
- if (!(request_mask & (STATX_CTIME|STATX_MTIME))) {
- stat->result_mask &= ~(STATX_CTIME|STATX_MTIME);
- return;
- }
-
- stat->mtime = inode->i_mtime;
- stat->ctime.tv_sec = inode->__i_ctime.tv_sec;
- /*
- * Atomically set the QUERIED flag and fetch the new value with
- * the flag masked off.
- */
- stat->ctime.tv_nsec = atomic_long_fetch_or(I_CTIME_QUERIED, pnsec) &
- ~I_CTIME_QUERIED;
-}
-EXPORT_SYMBOL(fill_mg_cmtime);
-
-/**
* generic_fillattr - Fill in the basic attributes from the inode struct
* @idmap: idmap of the mount the inode was found from
* @request_mask: statx request_mask
stat->rdev = inode->i_rdev;
stat->size = i_size_read(inode);
stat->atime = inode->i_atime;
-
- if (is_mgtime(inode)) {
- fill_mg_cmtime(stat, request_mask, inode);
- } else {
- stat->mtime = inode->i_mtime;
- stat->ctime = inode_get_ctime(inode);
- }
-
+ stat->mtime = inode->i_mtime;
+ stat->ctime = inode_get_ctime(inode);
stat->blksize = i_blocksize(inode);
stat->blocks = inode->i_blocks;
#ifdef __ARCH_WANT_NEW_STAT
-#if BITS_PER_LONG == 32
-# define choose_32_64(a,b) a
-#else
-# define choose_32_64(a,b) b
-#endif
-
#ifndef INIT_STRUCT_STAT_PADDING
# define INIT_STRUCT_STAT_PADDING(st) memset(&st, 0, sizeof(st))
#endif
struct dentry *dentry,
unsigned int flags);
static int dcache_dir_open_wrapper(struct inode *inode, struct file *file);
+static int dcache_readdir_wrapper(struct file *file, struct dir_context *ctx);
static int eventfs_release(struct inode *inode, struct file *file);
static const struct inode_operations eventfs_root_dir_inode_operations = {
static const struct file_operations eventfs_file_operations = {
.open = dcache_dir_open_wrapper,
.read = generic_read_dir,
- .iterate_shared = dcache_readdir,
+ .iterate_shared = dcache_readdir_wrapper,
.llseek = generic_file_llseek,
.release = eventfs_release,
};
/**
* eventfs_set_ef_status_free - set the ef->status to free
+ * @ti: the tracefs_inode of the dentry
* @dentry: dentry who's status to be freed
*
* eventfs_set_ef_status_free will be called if no more
* references remain
*/
-void eventfs_set_ef_status_free(struct dentry *dentry)
+void eventfs_set_ef_status_free(struct tracefs_inode *ti, struct dentry *dentry)
{
struct tracefs_inode *ti_parent;
- struct eventfs_file *ef;
+ struct eventfs_inode *ei;
+ struct eventfs_file *ef, *tmp;
+
+ /* The top level events directory may be freed by this */
+ if (unlikely(ti->flags & TRACEFS_EVENT_TOP_INODE)) {
+ LIST_HEAD(ef_del_list);
+
+ mutex_lock(&eventfs_mutex);
+
+ ei = ti->private;
+
+ /* Record all the top level files */
+ list_for_each_entry_srcu(ef, &ei->e_top_files, list,
+ lockdep_is_held(&eventfs_mutex)) {
+ list_add_tail(&ef->del_list, &ef_del_list);
+ }
+
+ /* Nothing should access this, but just in case! */
+ ti->private = NULL;
+
+ mutex_unlock(&eventfs_mutex);
+
+ /* Now safely free the top level files and their children */
+ list_for_each_entry_safe(ef, tmp, &ef_del_list, del_list) {
+ list_del(&ef->del_list);
+ eventfs_remove(ef);
+ }
+
+ kfree(ei);
+ return;
+ }
mutex_lock(&eventfs_mutex);
+
ti_parent = get_tracefs(dentry->d_parent->d_inode);
if (!ti_parent || !(ti_parent->flags & TRACEFS_EVENT_INODE))
goto out;
return ret;
}
+struct dentry_list {
+ void *cursor;
+ struct dentry **dentries;
+};
+
/**
* eventfs_release - called to release eventfs file/dir
* @inode: inode to be released
static int eventfs_release(struct inode *inode, struct file *file)
{
struct tracefs_inode *ti;
- struct eventfs_inode *ei;
- struct eventfs_file *ef;
- struct dentry *dentry;
- int idx;
+ struct dentry_list *dlist = file->private_data;
+ void *cursor;
+ int i;
ti = get_tracefs(inode);
if (!(ti->flags & TRACEFS_EVENT_INODE))
return -EINVAL;
- ei = ti->private;
- idx = srcu_read_lock(&eventfs_srcu);
- list_for_each_entry_srcu(ef, &ei->e_top_files, list,
- srcu_read_lock_held(&eventfs_srcu)) {
- mutex_lock(&eventfs_mutex);
- dentry = ef->dentry;
- mutex_unlock(&eventfs_mutex);
- if (dentry)
- dput(dentry);
+ if (WARN_ON_ONCE(!dlist))
+ return -EINVAL;
+
+ for (i = 0; dlist->dentries && dlist->dentries[i]; i++) {
+ dput(dlist->dentries[i]);
}
- srcu_read_unlock(&eventfs_srcu, idx);
+
+ cursor = dlist->cursor;
+ kfree(dlist->dentries);
+ kfree(dlist);
+ file->private_data = cursor;
return dcache_dir_close(inode, file);
}
struct tracefs_inode *ti;
struct eventfs_inode *ei;
struct eventfs_file *ef;
+ struct dentry_list *dlist;
+ struct dentry **dentries = NULL;
struct dentry *dentry = file_dentry(file);
+ struct dentry *d;
struct inode *f_inode = file_inode(file);
+ int cnt = 0;
int idx;
+ int ret;
ti = get_tracefs(f_inode);
if (!(ti->flags & TRACEFS_EVENT_INODE))
return -EINVAL;
+ if (WARN_ON_ONCE(file->private_data))
+ return -EINVAL;
+
+ dlist = kmalloc(sizeof(*dlist), GFP_KERNEL);
+ if (!dlist)
+ return -ENOMEM;
+
ei = ti->private;
idx = srcu_read_lock(&eventfs_srcu);
- list_for_each_entry_rcu(ef, &ei->e_top_files, list) {
- create_dentry(ef, dentry, false);
+ list_for_each_entry_srcu(ef, &ei->e_top_files, list,
+ srcu_read_lock_held(&eventfs_srcu)) {
+ d = create_dentry(ef, dentry, false);
+ if (d) {
+ struct dentry **tmp;
+
+ tmp = krealloc(dentries, sizeof(d) * (cnt + 2), GFP_KERNEL);
+ if (!tmp)
+ break;
+ tmp[cnt] = d;
+ tmp[cnt + 1] = NULL;
+ cnt++;
+ dentries = tmp;
+ }
}
srcu_read_unlock(&eventfs_srcu, idx);
- return dcache_dir_open(inode, file);
+ ret = dcache_dir_open(inode, file);
+
+ /*
+ * dcache_dir_open() sets file->private_data to a dentry cursor.
+ * Need to save that but also save all the dentries that were
+ * opened by this function.
+ */
+ dlist->cursor = file->private_data;
+ dlist->dentries = dentries;
+ file->private_data = dlist;
+ return ret;
+}
+
+/*
+ * This just sets the file->private_data back to the cursor and back.
+ */
+static int dcache_readdir_wrapper(struct file *file, struct dir_context *ctx)
+{
+ struct dentry_list *dlist = file->private_data;
+ int ret;
+
+ file->private_data = dlist->cursor;
+ ret = dcache_readdir(file, ctx);
+ dlist->cursor = file->private_data;
+ file->private_data = dlist;
+ return ret;
}
/**
struct tracefs_inode *ti;
struct inode *inode;
+ if (security_locked_down(LOCKDOWN_TRACEFS))
+ return NULL;
+
if (IS_ERR(dentry))
return dentry;
INIT_LIST_HEAD(&ei->e_top_files);
ti = get_tracefs(inode);
- ti->flags |= TRACEFS_EVENT_INODE;
+ ti->flags |= TRACEFS_EVENT_INODE | TRACEFS_EVENT_TOP_INODE;
ti->private = ei;
inode->i_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
struct eventfs_inode *ei_parent;
struct eventfs_file *ef;
+ if (security_locked_down(LOCKDOWN_TRACEFS))
+ return NULL;
+
if (!parent)
return ERR_PTR(-EINVAL);
{
struct eventfs_file *ef;
+ if (security_locked_down(LOCKDOWN_TRACEFS))
+ return NULL;
+
if (!ef_parent)
return ERR_PTR(-EINVAL);
struct eventfs_inode *ei;
struct eventfs_file *ef;
+ if (security_locked_down(LOCKDOWN_TRACEFS))
+ return -ENODEV;
+
if (!parent)
return -EINVAL;
{
struct eventfs_file *ef;
+ if (security_locked_down(LOCKDOWN_TRACEFS))
+ return -ENODEV;
+
if (!ef_parent)
return -EINVAL;
void eventfs_remove_events_dir(struct dentry *dentry)
{
struct tracefs_inode *ti;
- struct eventfs_inode *ei;
if (!dentry || !dentry->d_inode)
return;
if (!ti || !(ti->flags & TRACEFS_EVENT_INODE))
return;
- ei = ti->private;
d_invalidate(dentry);
dput(dentry);
- kfree(ei);
}
ti = get_tracefs(inode);
if (ti && ti->flags & TRACEFS_EVENT_INODE)
- eventfs_set_ef_status_free(dentry);
+ eventfs_set_ef_status_free(ti, dentry);
iput(inode);
}
*/
struct dentry *tracefs_create_dir(const char *name, struct dentry *parent)
{
+ if (security_locked_down(LOCKDOWN_TRACEFS))
+ return NULL;
+
return __create_dir(name, parent, &simple_dir_inode_operations);
}
#define _TRACEFS_INTERNAL_H
enum {
- TRACEFS_EVENT_INODE = BIT(1),
+ TRACEFS_EVENT_INODE = BIT(1),
+ TRACEFS_EVENT_TOP_INODE = BIT(2),
};
struct tracefs_inode {
struct dentry *eventfs_start_creating(const char *name, struct dentry *parent);
struct dentry *eventfs_failed_creating(struct dentry *dentry);
struct dentry *eventfs_end_creating(struct dentry *dentry);
-void eventfs_set_ef_status_free(struct dentry *dentry);
+void eventfs_set_ef_status_free(struct tracefs_inode *ti, struct dentry *dentry);
#endif /* _TRACEFS_INTERNAL_H */
bool "XFS online metadata check usage data collection"
default y
depends on XFS_ONLINE_SCRUB
- select FS_DEBUG
+ select XFS_DEBUG
help
If you say Y here, the kernel will gather usage data about
the online metadata check subsystem. This includes the number
#define xlog_check_buf_cancel_table(log) do { } while (0)
#endif
+/*
+ * Transform a regular reservation into one suitable for recovery of a log
+ * intent item.
+ *
+ * Intent recovery only runs a single step of the transaction chain and defers
+ * the rest to a separate transaction. Therefore, we reduce logcount to 1 here
+ * to avoid livelocks if the log grant space is nearly exhausted due to the
+ * recovered intent pinning the tail. Keep the same logflags to avoid tripping
+ * asserts elsewhere. Struct copies abound below.
+ */
+static inline struct xfs_trans_res
+xlog_recover_resv(const struct xfs_trans_res *r)
+{
+ struct xfs_trans_res ret = {
+ .tr_logres = r->tr_logres,
+ .tr_logcount = 1,
+ .tr_logflags = r->tr_logflags,
+ };
+
+ return ret;
+}
+
#endif /* __XFS_LOG_RECOVER_H__ */
return -EFSCORRUPTED;
}
- if (xfs_sb_has_ro_compat_feature(sbp, XFS_SB_FEAT_RO_COMPAT_UNKNOWN)) {
+ if (!xfs_is_readonly(mp) &&
+ xfs_sb_has_ro_compat_feature(sbp, XFS_SB_FEAT_RO_COMPAT_UNKNOWN)) {
xfs_alert(mp,
"Corruption detected in superblock read-only compatible features (0x%x)!",
(sbp->sb_features_ro_compat &
ASSERT(tp);
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
- /* If the mtime changes, then ctime must also change */
- ASSERT(flags & XFS_ICHGTIME_CHG);
+ tv = current_time(inode);
- tv = inode_set_ctime_current(inode);
if (flags & XFS_ICHGTIME_MOD)
inode->i_mtime = tv;
+ if (flags & XFS_ICHGTIME_CHG)
+ inode_set_ctime_to_ts(inode, tv);
if (flags & XFS_ICHGTIME_CREATE)
ip->i_crtime = tv;
}
out_teardown:
error = xchk_teardown(sc, error);
out_sc:
+ if (error != -ENOENT)
+ xchk_stats_merge(mp, sm, &run);
kfree(sc);
out:
trace_xchk_done(XFS_I(file_inode(file)), sm, error);
sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
error = 0;
}
- if (error != -ENOENT)
- xchk_stats_merge(mp, sm, &run);
return error;
need_drain:
error = xchk_teardown(sc, 0);
{
struct xchk_scrub_stats *css;
- ASSERT(sm->sm_type < XFS_SCRUB_TYPE_NR);
+ if (sm->sm_type >= XFS_SCRUB_TYPE_NR) {
+ ASSERT(sm->sm_type < XFS_SCRUB_TYPE_NR);
+ return;
+ }
css = &cs->cs_stats[sm->sm_type];
spin_lock(&css->css_lock);
int error = 0;
mp = dp->i_mount;
- ASSERT(! XFS_NOT_DQATTACHED(mp, dp));
xfs_ilock(dp, lock_mode);
if (!xfs_inode_has_attr_fork(dp))
struct xfs_inode *ip;
struct xfs_da_args *args;
struct xfs_trans *tp;
- struct xfs_trans_res tres;
+ struct xfs_trans_res resv;
struct xfs_attri_log_format *attrp;
struct xfs_attri_log_nameval *nv = attrip->attri_nameval;
int error;
goto out;
}
- xfs_init_attr_trans(args, &tres, &total);
- error = xfs_trans_alloc(mp, &tres, total, 0, XFS_TRANS_RESERVE, &tp);
+ xfs_init_attr_trans(args, &resv, &total);
+ resv = xlog_recover_resv(&resv);
+ error = xfs_trans_alloc(mp, &resv, total, 0, XFS_TRANS_RESERVE, &tp);
if (error)
goto out;
struct list_head *capture_list)
{
struct xfs_bmap_intent fake = { };
+ struct xfs_trans_res resv;
struct xfs_bui_log_item *buip = BUI_ITEM(lip);
struct xfs_trans *tp;
struct xfs_inode *ip = NULL;
return error;
/* Allocate transaction and do the work. */
- error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate,
+ resv = xlog_recover_resv(&M_RES(mp)->tr_itruncate);
+ error = xfs_trans_alloc(mp, &resv,
XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK), 0, 0, &tp);
if (error)
goto err_rele;
return ERR_PTR(error);
}
+ /*
+ * Reload the incore unlinked list to avoid failure in inodegc.
+ * Use an unlocked check here because unrecovered unlinked inodes
+ * should be somewhat rare.
+ */
+ if (xfs_inode_unlinked_incomplete(ip)) {
+ error = xfs_inode_reload_unlinked(ip);
+ if (error) {
+ xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
+ xfs_irele(ip);
+ return ERR_PTR(error);
+ }
+ }
+
if (VFS_I(ip)->i_generation != generation) {
xfs_irele(ip);
return ERR_PTR(-ESTALE);
struct xfs_log_item *lip,
struct list_head *capture_list)
{
+ struct xfs_trans_res resv;
struct xfs_efi_log_item *efip = EFI_ITEM(lip);
struct xfs_mount *mp = lip->li_log->l_mp;
struct xfs_efd_log_item *efdp;
}
}
- error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
+ resv = xlog_recover_resv(&M_RES(mp)->tr_itruncate);
+ error = xfs_trans_alloc(mp, &resv, 0, 0, 0, &tp);
if (error)
return error;
efdp = xfs_trans_get_efd(tp, efip, efip->efi_format.efi_nextents);
}
#endif /* CONFIG_XFS_RT */
+static inline bool
+rmap_not_shareable(struct xfs_mount *mp, const struct xfs_rmap_irec *r)
+{
+ if (!xfs_has_reflink(mp))
+ return true;
+ if (XFS_RMAP_NON_INODE_OWNER(r->rm_owner))
+ return true;
+ if (r->rm_flags & (XFS_RMAP_ATTR_FORK | XFS_RMAP_BMBT_BLOCK |
+ XFS_RMAP_UNWRITTEN))
+ return true;
+ return false;
+}
+
/* Execute a getfsmap query against the regular data device. */
STATIC int
__xfs_getfsmap_datadev(
* low to the fsmap low key and max out the high key to the end
* of the AG.
*/
- info->low.rm_startblock = XFS_FSB_TO_AGBNO(mp, start_fsb);
info->low.rm_offset = XFS_BB_TO_FSBT(mp, keys[0].fmr_offset);
error = xfs_fsmap_owner_to_rmap(&info->low, &keys[0]);
if (error)
/* Adjust the low key if we are continuing from where we left off. */
if (info->low.rm_blockcount == 0) {
- /* empty */
- } else if (XFS_RMAP_NON_INODE_OWNER(info->low.rm_owner) ||
- (info->low.rm_flags & (XFS_RMAP_ATTR_FORK |
- XFS_RMAP_BMBT_BLOCK |
- XFS_RMAP_UNWRITTEN))) {
- info->low.rm_startblock += info->low.rm_blockcount;
+ /* No previous record from which to continue */
+ } else if (rmap_not_shareable(mp, &info->low)) {
+ /* Last record seen was an unshareable extent */
info->low.rm_owner = 0;
info->low.rm_offset = 0;
if (XFS_FSB_TO_DADDR(mp, start_fsb) >= eofs)
return 0;
} else {
+ /* Last record seen was a shareable file data extent */
info->low.rm_offset += info->low.rm_blockcount;
}
+ info->low.rm_startblock = XFS_FSB_TO_AGBNO(mp, start_fsb);
info->high.rm_startblock = -1U;
info->high.rm_owner = ULLONG_MAX;
INIT_LIST_HEAD(&ip->i_ioend_list);
spin_lock_init(&ip->i_ioend_lock);
ip->i_next_unlinked = NULLAGINO;
- ip->i_prev_unlinked = NULLAGINO;
+ ip->i_prev_unlinked = 0;
return ip;
}
int cpu;
bool ret = false;
- for_each_online_cpu(cpu) {
+ for_each_cpu(cpu, &mp->m_inodegc_cpumask) {
gc = per_cpu_ptr(mp->m_inodegc, cpu);
if (!llist_empty(&gc->list)) {
mod_delayed_work_on(cpu, mp->m_inodegc_wq, &gc->work, 0);
int error = 0;
flush_workqueue(mp->m_inodegc_wq);
- for_each_online_cpu(cpu) {
+ for_each_cpu(cpu, &mp->m_inodegc_cpumask) {
struct xfs_inodegc *gc;
gc = per_cpu_ptr(mp->m_inodegc, cpu);
struct xfs_inodegc, work);
struct llist_node *node = llist_del_all(&gc->list);
struct xfs_inode *ip, *n;
+ struct xfs_mount *mp = gc->mp;
unsigned int nofs_flag;
- ASSERT(gc->cpu == smp_processor_id());
+ /*
+ * Clear the cpu mask bit and ensure that we have seen the latest
+ * update of the gc structure associated with this CPU. This matches
+ * with the release semantics used when setting the cpumask bit in
+ * xfs_inodegc_queue.
+ */
+ cpumask_clear_cpu(gc->cpu, &mp->m_inodegc_cpumask);
+ smp_mb__after_atomic();
WRITE_ONCE(gc->items, 0);
nofs_flag = memalloc_nofs_save();
ip = llist_entry(node, struct xfs_inode, i_gclist);
- trace_xfs_inodegc_worker(ip->i_mount, READ_ONCE(gc->shrinker_hits));
+ trace_xfs_inodegc_worker(mp, READ_ONCE(gc->shrinker_hits));
WRITE_ONCE(gc->shrinker_hits, 0);
llist_for_each_entry_safe(ip, n, node, i_gclist) {
struct xfs_inodegc *gc;
int items;
unsigned int shrinker_hits;
+ unsigned int cpu_nr;
unsigned long queue_delay = 1;
trace_xfs_inode_set_need_inactive(ip);
ip->i_flags |= XFS_NEED_INACTIVE;
spin_unlock(&ip->i_flags_lock);
- gc = get_cpu_ptr(mp->m_inodegc);
+ cpu_nr = get_cpu();
+ gc = this_cpu_ptr(mp->m_inodegc);
llist_add(&ip->i_gclist, &gc->list);
items = READ_ONCE(gc->items);
WRITE_ONCE(gc->items, items + 1);
shrinker_hits = READ_ONCE(gc->shrinker_hits);
/*
+ * Ensure the list add is always seen by anyone who finds the cpumask
+ * bit set. This effectively gives the cpumask bit set operation
+ * release ordering semantics.
+ */
+ smp_mb__before_atomic();
+ if (!cpumask_test_cpu(cpu_nr, &mp->m_inodegc_cpumask))
+ cpumask_test_and_set_cpu(cpu_nr, &mp->m_inodegc_cpumask);
+
+ /*
* We queue the work while holding the current CPU so that the work
* is scheduled to run on this CPU.
*/
if (!xfs_is_inodegc_enabled(mp)) {
- put_cpu_ptr(gc);
+ put_cpu();
return;
}
trace_xfs_inodegc_queue(mp, __return_address);
mod_delayed_work_on(current_cpu(), mp->m_inodegc_wq, &gc->work,
queue_delay);
- put_cpu_ptr(gc);
+ put_cpu();
if (xfs_inodegc_want_flush_work(ip, items, shrinker_hits)) {
trace_xfs_inodegc_throttle(mp, __return_address);
}
/*
- * Fold the dead CPU inodegc queue into the current CPUs queue.
- */
-void
-xfs_inodegc_cpu_dead(
- struct xfs_mount *mp,
- unsigned int dead_cpu)
-{
- struct xfs_inodegc *dead_gc, *gc;
- struct llist_node *first, *last;
- unsigned int count = 0;
-
- dead_gc = per_cpu_ptr(mp->m_inodegc, dead_cpu);
- cancel_delayed_work_sync(&dead_gc->work);
-
- if (llist_empty(&dead_gc->list))
- return;
-
- first = dead_gc->list.first;
- last = first;
- while (last->next) {
- last = last->next;
- count++;
- }
- dead_gc->list.first = NULL;
- dead_gc->items = 0;
-
- /* Add pending work to current CPU */
- gc = get_cpu_ptr(mp->m_inodegc);
- llist_add_batch(first, last, &gc->list);
- count += READ_ONCE(gc->items);
- WRITE_ONCE(gc->items, count);
-
- if (xfs_is_inodegc_enabled(mp)) {
- trace_xfs_inodegc_queue(mp, __return_address);
- mod_delayed_work_on(current_cpu(), mp->m_inodegc_wq, &gc->work,
- 0);
- }
- put_cpu_ptr(gc);
-}
-
-/*
* We set the inode flag atomically with the radix tree tag. Once we get tag
* lookups on the radix tree, this inode flag can go away.
*
if (!xfs_is_inodegc_enabled(mp))
return 0;
- for_each_online_cpu(cpu) {
+ for_each_cpu(cpu, &mp->m_inodegc_cpumask) {
gc = per_cpu_ptr(mp->m_inodegc, cpu);
if (!llist_empty(&gc->list))
return XFS_INODEGC_SHRINKER_COUNT;
trace_xfs_inodegc_shrinker_scan(mp, sc, __return_address);
- for_each_online_cpu(cpu) {
+ for_each_cpu(cpu, &mp->m_inodegc_cpumask) {
gc = per_cpu_ptr(mp->m_inodegc, cpu);
if (!llist_empty(&gc->list)) {
unsigned int h = READ_ONCE(gc->shrinker_hits);
int xfs_inodegc_flush(struct xfs_mount *mp);
void xfs_inodegc_stop(struct xfs_mount *mp);
void xfs_inodegc_start(struct xfs_mount *mp);
-void xfs_inodegc_cpu_dead(struct xfs_mount *mp, unsigned int cpu);
int xfs_inodegc_register_shrinker(struct xfs_mount *mp);
#endif
if (VFS_I(ip)->i_mode == 0)
return false;
- /* If this is a read-only mount, don't do this (would generate I/O) */
- if (xfs_is_readonly(mp))
+ /*
+ * If this is a read-only mount, don't do this (would generate I/O)
+ * unless we're in log recovery and cleaning the iunlinked list.
+ */
+ if (xfs_is_readonly(mp) && !xlog_recovery_needed(mp->m_log))
return false;
/* If the log isn't running, push inodes straight to reclaim. */
mp = ip->i_mount;
ASSERT(!xfs_iflags_test(ip, XFS_IRECOVERY));
- /* If this is a read-only mount, don't do this (would generate I/O) */
- if (xfs_is_readonly(mp))
+ /*
+ * If this is a read-only mount, don't do this (would generate I/O)
+ * unless we're in log recovery and cleaning the iunlinked list.
+ */
+ if (xfs_is_readonly(mp) && !xlog_recovery_needed(mp->m_log))
goto out;
/* Metadata inodes require explicit resource cleanup. */
ip->i_df.if_nextents > 0 || ip->i_delayed_blks > 0))
truncate = 1;
- error = xfs_qm_dqattach(ip);
- if (error)
- goto out;
+ if (xfs_iflags_test(ip, XFS_IQUOTAUNCHECKED)) {
+ /*
+ * If this inode is being inactivated during a quotacheck and
+ * has not yet been scanned by quotacheck, we /must/ remove
+ * the dquots from the inode before inactivation changes the
+ * block and inode counts. Most probably this is a result of
+ * reloading the incore iunlinked list to purge unrecovered
+ * unlinked inodes.
+ */
+ xfs_qm_dqdetach(ip);
+ } else {
+ error = xfs_qm_dqattach(ip);
+ if (error)
+ goto out;
+ }
if (S_ISLNK(VFS_I(ip)->i_mode))
error = xfs_inactive_symlink(ip);
rcu_read_lock();
ip = radix_tree_lookup(&pag->pag_ici_root, agino);
+ if (!ip) {
+ /* Caller can handle inode not being in memory. */
+ rcu_read_unlock();
+ return NULL;
+ }
/*
- * Inode not in memory or in RCU freeing limbo should not happen.
- * Warn about this and let the caller handle the failure.
+ * Inode in RCU freeing limbo should not happen. Warn about this and
+ * let the caller handle the failure.
*/
- if (WARN_ON_ONCE(!ip || !ip->i_ino)) {
+ if (WARN_ON_ONCE(!ip->i_ino)) {
rcu_read_unlock();
return NULL;
}
return ip;
}
-/* Update the prev pointer of the next agino. */
+/*
+ * Update the prev pointer of the next agino. Returns -ENOLINK if the inode
+ * is not in cache.
+ */
static int
xfs_iunlink_update_backref(
struct xfs_perag *pag,
ip = xfs_iunlink_lookup(pag, next_agino);
if (!ip)
- return -EFSCORRUPTED;
+ return -ENOLINK;
+
ip->i_prev_unlinked = prev_agino;
return 0;
}
return 0;
}
+/*
+ * Load the inode @next_agino into the cache and set its prev_unlinked pointer
+ * to @prev_agino. Caller must hold the AGI to synchronize with other changes
+ * to the unlinked list.
+ */
+STATIC int
+xfs_iunlink_reload_next(
+ struct xfs_trans *tp,
+ struct xfs_buf *agibp,
+ xfs_agino_t prev_agino,
+ xfs_agino_t next_agino)
+{
+ struct xfs_perag *pag = agibp->b_pag;
+ struct xfs_mount *mp = pag->pag_mount;
+ struct xfs_inode *next_ip = NULL;
+ xfs_ino_t ino;
+ int error;
+
+ ASSERT(next_agino != NULLAGINO);
+
+#ifdef DEBUG
+ rcu_read_lock();
+ next_ip = radix_tree_lookup(&pag->pag_ici_root, next_agino);
+ ASSERT(next_ip == NULL);
+ rcu_read_unlock();
+#endif
+
+ xfs_info_ratelimited(mp,
+ "Found unrecovered unlinked inode 0x%x in AG 0x%x. Initiating recovery.",
+ next_agino, pag->pag_agno);
+
+ /*
+ * Use an untrusted lookup just to be cautious in case the AGI has been
+ * corrupted and now points at a free inode. That shouldn't happen,
+ * but we'd rather shut down now since we're already running in a weird
+ * situation.
+ */
+ ino = XFS_AGINO_TO_INO(mp, pag->pag_agno, next_agino);
+ error = xfs_iget(mp, tp, ino, XFS_IGET_UNTRUSTED, 0, &next_ip);
+ if (error)
+ return error;
+
+ /* If this is not an unlinked inode, something is very wrong. */
+ if (VFS_I(next_ip)->i_nlink != 0) {
+ error = -EFSCORRUPTED;
+ goto rele;
+ }
+
+ next_ip->i_prev_unlinked = prev_agino;
+ trace_xfs_iunlink_reload_next(next_ip);
+rele:
+ ASSERT(!(VFS_I(next_ip)->i_state & I_DONTCACHE));
+ if (xfs_is_quotacheck_running(mp) && next_ip)
+ xfs_iflags_set(next_ip, XFS_IQUOTAUNCHECKED);
+ xfs_irele(next_ip);
+ return error;
+}
+
static int
xfs_iunlink_insert_inode(
struct xfs_trans *tp,
* inode.
*/
error = xfs_iunlink_update_backref(pag, agino, next_agino);
+ if (error == -ENOLINK)
+ error = xfs_iunlink_reload_next(tp, agibp, agino, next_agino);
if (error)
return error;
}
/* Point the head of the list to point to this inode. */
+ ip->i_prev_unlinked = NULLAGINO;
return xfs_iunlink_update_bucket(tp, pag, agibp, bucket_index, agino);
}
*/
error = xfs_iunlink_update_backref(pag, ip->i_prev_unlinked,
ip->i_next_unlinked);
+ if (error == -ENOLINK)
+ error = xfs_iunlink_reload_next(tp, agibp, ip->i_prev_unlinked,
+ ip->i_next_unlinked);
if (error)
return error;
}
ip->i_next_unlinked = NULLAGINO;
- ip->i_prev_unlinked = NULLAGINO;
+ ip->i_prev_unlinked = 0;
return error;
}
if (ip1 != ip2)
inode_unlock(VFS_I(ip1));
}
+
+/*
+ * Reload the incore inode list for this inode. Caller should ensure that
+ * the link count cannot change, either by taking ILOCK_SHARED or otherwise
+ * preventing other threads from executing.
+ */
+int
+xfs_inode_reload_unlinked_bucket(
+ struct xfs_trans *tp,
+ struct xfs_inode *ip)
+{
+ struct xfs_mount *mp = tp->t_mountp;
+ struct xfs_buf *agibp;
+ struct xfs_agi *agi;
+ struct xfs_perag *pag;
+ xfs_agnumber_t agno = XFS_INO_TO_AGNO(mp, ip->i_ino);
+ xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
+ xfs_agino_t prev_agino, next_agino;
+ unsigned int bucket;
+ bool foundit = false;
+ int error;
+
+ /* Grab the first inode in the list */
+ pag = xfs_perag_get(mp, agno);
+ error = xfs_ialloc_read_agi(pag, tp, &agibp);
+ xfs_perag_put(pag);
+ if (error)
+ return error;
+
+ /*
+ * We've taken ILOCK_SHARED and the AGI buffer lock to stabilize the
+ * incore unlinked list pointers for this inode. Check once more to
+ * see if we raced with anyone else to reload the unlinked list.
+ */
+ if (!xfs_inode_unlinked_incomplete(ip)) {
+ foundit = true;
+ goto out_agibp;
+ }
+
+ bucket = agino % XFS_AGI_UNLINKED_BUCKETS;
+ agi = agibp->b_addr;
+
+ trace_xfs_inode_reload_unlinked_bucket(ip);
+
+ xfs_info_ratelimited(mp,
+ "Found unrecovered unlinked inode 0x%x in AG 0x%x. Initiating list recovery.",
+ agino, agno);
+
+ prev_agino = NULLAGINO;
+ next_agino = be32_to_cpu(agi->agi_unlinked[bucket]);
+ while (next_agino != NULLAGINO) {
+ struct xfs_inode *next_ip = NULL;
+
+ /* Found this caller's inode, set its backlink. */
+ if (next_agino == agino) {
+ next_ip = ip;
+ next_ip->i_prev_unlinked = prev_agino;
+ foundit = true;
+ goto next_inode;
+ }
+
+ /* Try in-memory lookup first. */
+ next_ip = xfs_iunlink_lookup(pag, next_agino);
+ if (next_ip)
+ goto next_inode;
+
+ /* Inode not in memory, try reloading it. */
+ error = xfs_iunlink_reload_next(tp, agibp, prev_agino,
+ next_agino);
+ if (error)
+ break;
+
+ /* Grab the reloaded inode. */
+ next_ip = xfs_iunlink_lookup(pag, next_agino);
+ if (!next_ip) {
+ /* No incore inode at all? We reloaded it... */
+ ASSERT(next_ip != NULL);
+ error = -EFSCORRUPTED;
+ break;
+ }
+
+next_inode:
+ prev_agino = next_agino;
+ next_agino = next_ip->i_next_unlinked;
+ }
+
+out_agibp:
+ xfs_trans_brelse(tp, agibp);
+ /* Should have found this inode somewhere in the iunlinked bucket. */
+ if (!error && !foundit)
+ error = -EFSCORRUPTED;
+ return error;
+}
+
+/* Decide if this inode is missing its unlinked list and reload it. */
+int
+xfs_inode_reload_unlinked(
+ struct xfs_inode *ip)
+{
+ struct xfs_trans *tp;
+ int error;
+
+ error = xfs_trans_alloc_empty(ip->i_mount, &tp);
+ if (error)
+ return error;
+
+ xfs_ilock(ip, XFS_ILOCK_SHARED);
+ if (xfs_inode_unlinked_incomplete(ip))
+ error = xfs_inode_reload_unlinked_bucket(tp, ip);
+ xfs_iunlock(ip, XFS_ILOCK_SHARED);
+ xfs_trans_cancel(tp);
+
+ return error;
+}
uint64_t i_diflags2; /* XFS_DIFLAG2_... */
struct timespec64 i_crtime; /* time created */
- /* unlinked list pointers */
+ /*
+ * Unlinked list pointers. These point to the next and previous inodes
+ * in the AGI unlinked bucket list, respectively. These fields can
+ * only be updated with the AGI locked.
+ *
+ * i_next_unlinked caches di_next_unlinked.
+ */
xfs_agino_t i_next_unlinked;
+
+ /*
+ * If the inode is not on an unlinked list, this field is zero. If the
+ * inode is the first element in an unlinked list, this field is
+ * NULLAGINO. Otherwise, i_prev_unlinked points to the previous inode
+ * in the unlinked list.
+ */
xfs_agino_t i_prev_unlinked;
/* VFS inode */
struct list_head i_ioend_list;
} xfs_inode_t;
+static inline bool xfs_inode_on_unlinked_list(const struct xfs_inode *ip)
+{
+ return ip->i_prev_unlinked != 0;
+}
+
static inline bool xfs_inode_has_attr_fork(struct xfs_inode *ip)
{
return ip->i_forkoff > 0;
*/
#define XFS_INACTIVATING (1 << 13)
+/* Quotacheck is running but inode has not been added to quota counts. */
+#define XFS_IQUOTAUNCHECKED (1 << 14)
+
/* All inode state flags related to inode reclaim. */
#define XFS_ALL_IRECLAIM_FLAGS (XFS_IRECLAIMABLE | \
XFS_IRECLAIM | \
#define XFS_IRECLAIM_RESET_FLAGS \
(XFS_IRECLAIMABLE | XFS_IRECLAIM | \
XFS_IDIRTY_RELEASE | XFS_ITRUNCATED | XFS_NEED_INACTIVE | \
- XFS_INACTIVATING)
+ XFS_INACTIVATING | XFS_IQUOTAUNCHECKED)
/*
* Flags for inode locking.
int xfs_ilock2_io_mmap(struct xfs_inode *ip1, struct xfs_inode *ip2);
void xfs_iunlock2_io_mmap(struct xfs_inode *ip1, struct xfs_inode *ip2);
+static inline bool
+xfs_inode_unlinked_incomplete(
+ struct xfs_inode *ip)
+{
+ return VFS_I(ip)->i_nlink == 0 && !xfs_inode_on_unlinked_list(ip);
+}
+int xfs_inode_reload_unlinked_bucket(struct xfs_trans *tp, struct xfs_inode *ip);
+int xfs_inode_reload_unlinked(struct xfs_inode *ip);
+
#endif /* __XFS_INODE_H__ */
stat->gid = vfsgid_into_kgid(vfsgid);
stat->ino = ip->i_ino;
stat->atime = inode->i_atime;
+ stat->mtime = inode->i_mtime;
+ stat->ctime = inode_get_ctime(inode);
stat->blocks = XFS_FSB_TO_BB(mp, ip->i_nblocks + ip->i_delayed_blks);
- fill_mg_cmtime(stat, request_mask, inode);
-
if (xfs_has_v3inodes(mp)) {
if (request_mask & STATX_BTIME) {
stat->result_mask |= STATX_BTIME;
if (newsize != oldsize &&
!(iattr->ia_valid & (ATTR_CTIME | ATTR_MTIME))) {
iattr->ia_ctime = iattr->ia_mtime =
- current_mgtime(inode);
+ current_time(inode);
iattr->ia_valid |= ATTR_CTIME | ATTR_MTIME;
}
if (error)
goto out;
+ /* Reload the incore unlinked list to avoid failure in inodegc. */
+ if (xfs_inode_unlinked_incomplete(ip)) {
+ error = xfs_inode_reload_unlinked_bucket(tp, ip);
+ if (error) {
+ xfs_iunlock(ip, XFS_ILOCK_SHARED);
+ xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
+ xfs_irele(ip);
+ return error;
+ }
+ }
+
ASSERT(ip != NULL);
ASSERT(ip->i_imap.im_blkno != 0);
inode = VFS_I(ip);
* just worked.
*/
if (!xfs_has_norecovery(mp)) {
- /*
- * log recovery ignores readonly state and so we need to clear
- * mount-based read only state so it can write to disk.
- */
- bool readonly = test_and_clear_bit(XFS_OPSTATE_READONLY,
- &mp->m_opstate);
error = xlog_recover(log);
- if (readonly)
- set_bit(XFS_OPSTATE_READONLY, &mp->m_opstate);
if (error) {
xfs_warn(mp, "log mount/recovery failed: error %d",
error);
struct xfs_mount *mp)
{
struct xlog *log = mp->m_log;
- bool readonly;
int error = 0;
if (xfs_has_norecovery(mp)) {
}
/*
- * log recovery ignores readonly state and so we need to clear
- * mount-based read only state so it can write to disk.
- */
- readonly = test_and_clear_bit(XFS_OPSTATE_READONLY, &mp->m_opstate);
-
- /*
* During the second phase of log recovery, we need iget and
* iput to behave like they do for an active filesystem.
* xfs_fs_drop_inode needs to be able to prevent the deletion
xfs_buftarg_drain(mp->m_ddev_targp);
clear_bit(XLOG_RECOVERY_NEEDED, &log->l_opstate);
- if (readonly)
- set_bit(XFS_OPSTATE_READONLY, &mp->m_opstate);
/* Make sure the log is dead if we're returning failure. */
ASSERT(!error || xlog_is_shutdown(log));
struct xlog_cil_pcp *cilpcp;
int cpu;
- for_each_online_cpu(cpu) {
+ for_each_cpu(cpu, &ctx->cil_pcpmask) {
cilpcp = per_cpu_ptr(cil->xc_pcp, cpu);
ctx->ticket->t_curr_res += cilpcp->space_reserved;
if (!test_and_clear_bit(XLOG_CIL_PCP_SPACE, &cil->xc_flags))
return;
- for_each_online_cpu(cpu) {
+ /*
+ * We can race with other cpus setting cil_pcpmask. However, we've
+ * atomically cleared PCP_SPACE which forces other threads to add to
+ * the global space used count. cil_pcpmask is a superset of cilpcp
+ * structures that could have a nonzero space_used.
+ */
+ for_each_cpu(cpu, &ctx->cil_pcpmask) {
int old, prev;
cilpcp = per_cpu_ptr(cil->xc_pcp, cpu);
int iovhdr_res = 0, split_res = 0, ctx_res = 0;
int space_used;
int order;
+ unsigned int cpu_nr;
struct xlog_cil_pcp *cilpcp;
ASSERT(tp);
* can't be scheduled away between split sample/update operations that
* are done without outside locking to serialise them.
*/
- cilpcp = get_cpu_ptr(cil->xc_pcp);
+ cpu_nr = get_cpu();
+ cilpcp = this_cpu_ptr(cil->xc_pcp);
+
+ /* Tell the future push that there was work added by this CPU. */
+ if (!cpumask_test_cpu(cpu_nr, &ctx->cil_pcpmask))
+ cpumask_test_and_set_cpu(cpu_nr, &ctx->cil_pcpmask);
/*
* We need to take the CIL checkpoint unit reservation on the first
continue;
list_add_tail(&lip->li_cil, &cilpcp->log_items);
}
- put_cpu_ptr(cilpcp);
+ put_cpu();
/*
* If we've overrun the reservation, dump the tx details before we move
}
/*
- * Move dead percpu state to the relevant CIL context structures.
- *
- * We have to lock the CIL context here to ensure that nothing is modifying
- * the percpu state, either addition or removal. Both of these are done under
- * the CIL context lock, so grabbing that exclusively here will ensure we can
- * safely drain the cilpcp for the CPU that is dying.
- */
-void
-xlog_cil_pcp_dead(
- struct xlog *log,
- unsigned int cpu)
-{
- struct xfs_cil *cil = log->l_cilp;
- struct xlog_cil_pcp *cilpcp = per_cpu_ptr(cil->xc_pcp, cpu);
- struct xfs_cil_ctx *ctx;
-
- down_write(&cil->xc_ctx_lock);
- ctx = cil->xc_ctx;
- if (ctx->ticket)
- ctx->ticket->t_curr_res += cilpcp->space_reserved;
- cilpcp->space_reserved = 0;
-
- if (!list_empty(&cilpcp->log_items))
- list_splice_init(&cilpcp->log_items, &ctx->log_items);
- if (!list_empty(&cilpcp->busy_extents))
- list_splice_init(&cilpcp->busy_extents, &ctx->busy_extents);
- atomic_add(cilpcp->space_used, &ctx->space_used);
- cilpcp->space_used = 0;
- up_write(&cil->xc_ctx_lock);
-}
-
-/*
* Perform initial CIL structure initialisation.
*/
int
struct work_struct discard_endio_work;
struct work_struct push_work;
atomic_t order_id;
+
+ /*
+ * CPUs that could have added items to the percpu CIL data. Access is
+ * coordinated with xc_ctx_lock.
+ */
+ struct cpumask cil_pcpmask;
};
/*
wait_queue_head_t xc_push_wait; /* background push throttle */
void __percpu *xc_pcp; /* percpu CIL structures */
-#ifdef CONFIG_HOTPLUG_CPU
- struct list_head xc_pcp_list;
-#endif
} ____cacheline_aligned_in_smp;
/* xc_flags bit values */
return p;
}
-/*
- * CIL CPU dead notifier
- */
-void xlog_cil_pcp_dead(struct xlog *log, unsigned int cpu);
-
#endif /* __XFS_LOG_PRIV_H__ */
* try a smaller size. We need to be able to read at least
* a log sector, or we're out of luck.
*/
- bufblks = 1 << ffs(nbblks);
+ bufblks = roundup_pow_of_two(nbblks);
while (bufblks > log->l_logBBsize)
bufblks >>= 1;
while (!(buffer = xlog_alloc_buffer(log, bufblks))) {
* a smaller size. We need to be able to write at least a
* log sector, or we're out of luck.
*/
- bufblks = 1 << ffs(blocks);
+ bufblks = roundup_pow_of_two(blocks);
while (bufblks > log->l_logBBsize)
bufblks >>= 1;
while (!(buffer = xlog_alloc_buffer(log, bufblks))) {
* Per-cpu deferred inode inactivation GC lists.
*/
struct xfs_inodegc {
+ struct xfs_mount *mp;
struct llist_head list;
struct delayed_work work;
int error;
/* approximate count of inodes in the list */
unsigned int items;
unsigned int shrinker_hits;
-#if defined(DEBUG) || defined(XFS_WARN)
unsigned int cpu;
-#endif
};
/*
xfs_buftarg_t *m_ddev_targp; /* saves taking the address */
xfs_buftarg_t *m_logdev_targp;/* ptr to log device */
xfs_buftarg_t *m_rtdev_targp; /* ptr to rt device */
- struct list_head m_mount_list; /* global mount list */
void __percpu *m_inodegc; /* percpu inodegc structures */
/*
unsigned int *m_errortag;
struct xfs_kobj m_errortag_kobj;
#endif
+
+ /* cpus that have inodes queued for inactivation */
+ struct cpumask m_inodegc_cpumask;
} xfs_mount_t;
#define M_IGEO(mp) (&(mp)->m_ino_geo)
#define XFS_OPSTATE_WARNED_SHRINK 8
/* Kernel has logged a warning about logged xattr updates being used. */
#define XFS_OPSTATE_WARNED_LARP 9
+/* Mount time quotacheck is running */
+#define XFS_OPSTATE_QUOTACHECK_RUNNING 10
#define __XFS_IS_OPSTATE(name, NAME) \
static inline bool xfs_is_ ## name (struct xfs_mount *mp) \
__XFS_IS_OPSTATE(readonly, READONLY)
__XFS_IS_OPSTATE(inodegc_enabled, INODEGC_ENABLED)
__XFS_IS_OPSTATE(blockgc_enabled, BLOCKGC_ENABLED)
+#ifdef CONFIG_XFS_QUOTA
+__XFS_IS_OPSTATE(quotacheck_running, QUOTACHECK_RUNNING)
+#else
+# define xfs_is_quotacheck_running(mp) (false)
+#endif
static inline bool
xfs_should_warn(struct xfs_mount *mp, long nr)
{ (1UL << XFS_OPSTATE_BLOCKGC_ENABLED), "blockgc" }, \
{ (1UL << XFS_OPSTATE_WARNED_SCRUB), "wscrub" }, \
{ (1UL << XFS_OPSTATE_WARNED_SHRINK), "wshrink" }, \
- { (1UL << XFS_OPSTATE_WARNED_LARP), "wlarp" }
+ { (1UL << XFS_OPSTATE_WARNED_LARP), "wlarp" }, \
+ { (1UL << XFS_OPSTATE_QUOTACHECK_RUNNING), "quotacheck" }
/*
* Max and min values for mount-option defined I/O
if (error)
return error;
+ /*
+ * Reload the incore unlinked list to avoid failure in inodegc.
+ * Use an unlocked check here because unrecovered unlinked inodes
+ * should be somewhat rare.
+ */
+ if (xfs_inode_unlinked_incomplete(ip)) {
+ error = xfs_inode_reload_unlinked(ip);
+ if (error) {
+ xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
+ goto error0;
+ }
+ }
+
ASSERT(ip->i_delayed_blks == 0);
if (XFS_IS_REALTIME_INODE(ip)) {
}
nblks = (xfs_qcnt_t)ip->i_nblocks - rtblks;
+ xfs_iflags_clear(ip, XFS_IQUOTAUNCHECKED);
/*
* Add the (disk blocks and inode) resources occupied by this
flags |= XFS_PQUOTA_CHKD;
}
+ xfs_set_quotacheck_running(mp);
error = xfs_iwalk_threaded(mp, 0, 0, xfs_qm_dqusage_adjust, 0, true,
NULL);
+ xfs_clear_quotacheck_running(mp);
/*
* On error, the inode walk may have partially populated the dquot
struct xfs_log_item *lip,
struct list_head *capture_list)
{
+ struct xfs_trans_res resv;
struct xfs_cui_log_item *cuip = CUI_ITEM(lip);
struct xfs_cud_log_item *cudp;
struct xfs_trans *tp;
* doesn't fit. We need to reserve enough blocks to handle a
* full btree split on either end of the refcount range.
*/
- error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate,
- mp->m_refc_maxlevels * 2, 0, XFS_TRANS_RESERVE, &tp);
+ resv = xlog_recover_resv(&M_RES(mp)->tr_itruncate);
+ error = xfs_trans_alloc(mp, &resv, mp->m_refc_maxlevels * 2, 0,
+ XFS_TRANS_RESERVE, &tp);
if (error)
return error;
struct xfs_log_item *lip,
struct list_head *capture_list)
{
+ struct xfs_trans_res resv;
struct xfs_rui_log_item *ruip = RUI_ITEM(lip);
struct xfs_rud_log_item *rudp;
struct xfs_trans *tp;
}
}
- error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate,
- mp->m_rmap_maxlevels, 0, XFS_TRANS_RESERVE, &tp);
+ resv = xlog_recover_resv(&M_RES(mp)->tr_itruncate);
+ error = xfs_trans_alloc(mp, &resv, mp->m_rmap_maxlevels, 0,
+ XFS_TRANS_RESERVE, &tp);
if (error)
return error;
rudp = xfs_trans_get_rud(tp, ruip);
static struct xfs_kobj xfs_dbg_kobj; /* global debug sysfs attrs */
#endif
-#ifdef CONFIG_HOTPLUG_CPU
-static LIST_HEAD(xfs_mount_list);
-static DEFINE_SPINLOCK(xfs_mount_list_lock);
-
-static inline void xfs_mount_list_add(struct xfs_mount *mp)
-{
- spin_lock(&xfs_mount_list_lock);
- list_add(&mp->m_mount_list, &xfs_mount_list);
- spin_unlock(&xfs_mount_list_lock);
-}
-
-static inline void xfs_mount_list_del(struct xfs_mount *mp)
-{
- spin_lock(&xfs_mount_list_lock);
- list_del(&mp->m_mount_list);
- spin_unlock(&xfs_mount_list_lock);
-}
-#else /* !CONFIG_HOTPLUG_CPU */
-static inline void xfs_mount_list_add(struct xfs_mount *mp) {}
-static inline void xfs_mount_list_del(struct xfs_mount *mp) {}
-#endif
-
enum xfs_dax_mode {
XFS_DAX_INODE = 0,
XFS_DAX_ALWAYS = 1,
for_each_possible_cpu(cpu) {
gc = per_cpu_ptr(mp->m_inodegc, cpu);
-#if defined(DEBUG) || defined(XFS_WARN)
gc->cpu = cpu;
-#endif
+ gc->mp = mp;
init_llist_head(&gc->list);
gc->items = 0;
gc->error = 0;
xfs_freesb(mp);
xchk_mount_stats_free(mp);
free_percpu(mp->m_stats.xs_stats);
- xfs_mount_list_del(mp);
xfs_inodegc_free_percpu(mp);
xfs_destroy_percpu_counters(mp);
xfs_destroy_mount_workqueues(mp);
if (error)
goto out_destroy_counters;
- /*
- * All percpu data structures requiring cleanup when a cpu goes offline
- * must be allocated before adding this @mp to the cpu-dead handler's
- * mount list.
- */
- xfs_mount_list_add(mp);
-
/* Allocate stats memory before we do operations that might use it */
mp->m_stats.xs_stats = alloc_percpu(struct xfsstats);
if (!mp->m_stats.xs_stats) {
out_free_stats:
free_percpu(mp->m_stats.xs_stats);
out_destroy_inodegc:
- xfs_mount_list_del(mp);
xfs_inodegc_free_percpu(mp);
out_destroy_counters:
xfs_destroy_percpu_counters(mp);
.init_fs_context = xfs_init_fs_context,
.parameters = xfs_fs_parameters,
.kill_sb = xfs_kill_sb,
- .fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP | FS_MGTIME,
+ .fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP,
};
MODULE_ALIAS_FS("xfs");
destroy_workqueue(xfs_alloc_wq);
}
-#ifdef CONFIG_HOTPLUG_CPU
-static int
-xfs_cpu_dead(
- unsigned int cpu)
-{
- struct xfs_mount *mp, *n;
-
- spin_lock(&xfs_mount_list_lock);
- list_for_each_entry_safe(mp, n, &xfs_mount_list, m_mount_list) {
- spin_unlock(&xfs_mount_list_lock);
- xfs_inodegc_cpu_dead(mp, cpu);
- xlog_cil_pcp_dead(mp->m_log, cpu);
- spin_lock(&xfs_mount_list_lock);
- }
- spin_unlock(&xfs_mount_list_lock);
- return 0;
-}
-
-static int __init
-xfs_cpu_hotplug_init(void)
-{
- int error;
-
- error = cpuhp_setup_state_nocalls(CPUHP_XFS_DEAD, "xfs:dead", NULL,
- xfs_cpu_dead);
- if (error < 0)
- xfs_alert(NULL,
-"Failed to initialise CPU hotplug, error %d. XFS is non-functional.",
- error);
- return error;
-}
-
-static void
-xfs_cpu_hotplug_destroy(void)
-{
- cpuhp_remove_state_nocalls(CPUHP_XFS_DEAD);
-}
-
-#else /* !CONFIG_HOTPLUG_CPU */
-static inline int xfs_cpu_hotplug_init(void) { return 0; }
-static inline void xfs_cpu_hotplug_destroy(void) {}
-#endif
-
STATIC int __init
init_xfs_fs(void)
{
xfs_dir_startup();
- error = xfs_cpu_hotplug_init();
- if (error)
- goto out;
-
error = xfs_init_caches();
if (error)
- goto out_destroy_hp;
+ goto out;
error = xfs_init_workqueues();
if (error)
xfs_destroy_workqueues();
out_destroy_caches:
xfs_destroy_caches();
- out_destroy_hp:
- xfs_cpu_hotplug_destroy();
out:
return error;
}
xfs_destroy_workqueues();
xfs_destroy_caches();
xfs_uuid_table_free();
- xfs_cpu_hotplug_destroy();
}
module_init(init_xfs_fs);
__entry->new_ptr)
);
+TRACE_EVENT(xfs_iunlink_reload_next,
+ TP_PROTO(struct xfs_inode *ip),
+ TP_ARGS(ip),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_agnumber_t, agno)
+ __field(xfs_agino_t, agino)
+ __field(xfs_agino_t, prev_agino)
+ __field(xfs_agino_t, next_agino)
+ ),
+ TP_fast_assign(
+ __entry->dev = ip->i_mount->m_super->s_dev;
+ __entry->agno = XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino);
+ __entry->agino = XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino);
+ __entry->prev_agino = ip->i_prev_unlinked;
+ __entry->next_agino = ip->i_next_unlinked;
+ ),
+ TP_printk("dev %d:%d agno 0x%x agino 0x%x prev_unlinked 0x%x next_unlinked 0x%x",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->agno,
+ __entry->agino,
+ __entry->prev_agino,
+ __entry->next_agino)
+);
+
+TRACE_EVENT(xfs_inode_reload_unlinked_bucket,
+ TP_PROTO(struct xfs_inode *ip),
+ TP_ARGS(ip),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_agnumber_t, agno)
+ __field(xfs_agino_t, agino)
+ ),
+ TP_fast_assign(
+ __entry->dev = ip->i_mount->m_super->s_dev;
+ __entry->agno = XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino);
+ __entry->agino = XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino);
+ ),
+ TP_printk("dev %d:%d agno 0x%x agino 0x%x bucket %u",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->agno,
+ __entry->agino,
+ __entry->agino % XFS_AGI_UNLINKED_BUCKETS)
+);
+
DECLARE_EVENT_CLASS(xfs_ag_inode_class,
TP_PROTO(struct xfs_inode *ip),
TP_ARGS(ip),
if (xfs_sb_version_haslogxattrs(&mp->m_sb))
return 0;
+ /*
+ * Check if the filesystem featureset is new enough to set this log
+ * incompat feature bit. Strictly speaking, the minimum requirement is
+ * a V5 filesystem for the superblock field, but we'll require rmap
+ * or reflink to avoid having to deal with really old kernels.
+ */
+ if (!xfs_has_reflink(mp) && !xfs_has_rmapbt(mp)) {
+ error = -EOPNOTSUPP;
+ goto drop_incompat;
+ }
+
/* Enable log-assisted xattrs. */
error = xfs_add_incompat_log_feature(mp,
XFS_SB_FEAT_INCOMPAT_LOG_XATTRS);
#ifndef __HAVE_ARCH_HUGE_SET_HUGE_PTE_AT
static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pte)
+ pte_t *ptep, pte_t pte, unsigned long sz)
{
set_pte_at(mm, addr, ptep, pte);
}
* are handled as text/data or they can be discarded (which
* often happens at runtime)
*/
-#ifdef CONFIG_HOTPLUG_CPU
-#define CPU_KEEP(sec) *(.cpu##sec)
-#define CPU_DISCARD(sec)
-#else
-#define CPU_KEEP(sec)
-#define CPU_DISCARD(sec) *(.cpu##sec)
-#endif
#if defined(CONFIG_MEMORY_HOTPLUG)
#define MEM_KEEP(sec) *(.mem##sec)
};
/**
+ * drm_exec_obj() - Return the object for a give drm_exec index
+ * @exec: Pointer to the drm_exec context
+ * @index: The index.
+ *
+ * Return: Pointer to the locked object corresponding to @index if
+ * index is within the number of locked objects. NULL otherwise.
+ */
+static inline struct drm_gem_object *
+drm_exec_obj(struct drm_exec *exec, unsigned long index)
+{
+ return index < exec->num_objects ? exec->objects[index] : NULL;
+}
+
+/**
* drm_exec_for_each_locked_object - iterate over all the locked objects
* @exec: drm_exec object
* @index: unsigned long index for the iteration
*
* Iterate over all the locked GEM objects inside the drm_exec object.
*/
-#define drm_exec_for_each_locked_object(exec, index, obj) \
- for (index = 0, obj = (exec)->objects[0]; \
- index < (exec)->num_objects; \
- ++index, obj = (exec)->objects[index])
+#define drm_exec_for_each_locked_object(exec, index, obj) \
+ for ((index) = 0; ((obj) = drm_exec_obj(exec, index)); ++(index))
+
+/**
+ * drm_exec_for_each_locked_object_reverse - iterate over all the locked
+ * objects in reverse locking order
+ * @exec: drm_exec object
+ * @index: unsigned long index for the iteration
+ * @obj: the current GEM object
+ *
+ * Iterate over all the locked GEM objects inside the drm_exec object in
+ * reverse locking order. Note that @index may go below zero and wrap,
+ * but that will be caught by drm_exec_obj(), returning a NULL object.
+ */
+#define drm_exec_for_each_locked_object_reverse(exec, index, obj) \
+ for ((index) = (exec)->num_objects - 1; \
+ ((obj) = drm_exec_obj(exec, index)); --(index))
/**
* drm_exec_until_all_locked - loop until all GEM objects are locked
#ifndef DRM_KUNIT_HELPERS_H_
#define DRM_KUNIT_HELPERS_H_
+#include <linux/device.h>
+
#include <kunit/test.h>
struct drm_device;
{
struct drm_driver *driver;
- driver = kunit_kzalloc(test, sizeof(*driver), GFP_KERNEL);
+ driver = devm_kzalloc(dev, sizeof(*driver), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, driver);
driver->driver_features = features;
#if defined(CONFIG_PCIEAER)
int pci_aer_clear_nonfatal_status(struct pci_dev *dev);
+int pcie_aer_is_native(struct pci_dev *dev);
#else
static inline int pci_aer_clear_nonfatal_status(struct pci_dev *dev)
{
return -EINVAL;
}
+static inline int pcie_aer_is_native(struct pci_dev *dev) { return 0; }
#endif
void cper_print_aer(struct pci_dev *dev, int aer_severity,
#define ARM_SMCCC_VERSION_1_3 0x10003
#define ARM_SMCCC_1_3_SVE_HINT 0x10000
+#define ARM_SMCCC_CALL_HINTS ARM_SMCCC_1_3_SVE_HINT
+
#define ARM_SMCCC_VERSION_FUNC_ID \
ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
{
#if defined(arch_atomic_read_acquire)
return arch_atomic_read_acquire(v);
-#elif defined(arch_atomic_read)
- return arch_atomic_read(v);
#else
int ret;
{
#if defined(arch_atomic_set_release)
arch_atomic_set_release(v, i);
-#elif defined(arch_atomic_set)
- arch_atomic_set(v, i);
#else
if (__native_word(atomic_t)) {
smp_store_release(&(v)->counter, i);
{
#if defined(arch_atomic64_read_acquire)
return arch_atomic64_read_acquire(v);
-#elif defined(arch_atomic64_read)
- return arch_atomic64_read(v);
#else
s64 ret;
{
#if defined(arch_atomic64_set_release)
arch_atomic64_set_release(v, i);
-#elif defined(arch_atomic64_set)
- arch_atomic64_set(v, i);
#else
if (__native_word(atomic64_t)) {
smp_store_release(&(v)->counter, i);
}
#endif /* _LINUX_ATOMIC_FALLBACK_H */
-// 202b45c7db600ce36198eb1f1fc2c2d5268ace2d
+// 2fdd6702823fa842f9cea57a002e6e4476ae780c
____BTF_ID(symbol, word)
#define __ID(prefix) \
- __PASTE(prefix, __COUNTER__)
+ __PASTE(__PASTE(prefix, __COUNTER__), __LINE__)
/*
* The BTF_ID defines unique symbol for each ID pointing
return test_bit_acquire(BH_Uptodate, &bh->b_state);
}
-#define bh_offset(bh) ((unsigned long)(bh)->b_data & ~PAGE_MASK)
+static inline unsigned long bh_offset(const struct buffer_head *bh)
+{
+ return (unsigned long)(bh)->b_data & (page_size(bh->b_page) - 1);
+}
/* If we *know* page->private refers to buffer_heads */
#define page_buffers(page) \
} __attribute__ ((packed));
union ceph_mds_request_args_ext {
- union {
- union ceph_mds_request_args old;
- struct {
- __le32 mode;
- __le32 uid;
- __le32 gid;
- struct ceph_timespec mtime;
- struct ceph_timespec atime;
- __le64 size, old_size; /* old_size needed by truncate */
- __le32 mask; /* CEPH_SETATTR_* */
- struct ceph_timespec btime;
- } __attribute__ ((packed)) setattr_ext;
- };
+ union ceph_mds_request_args old;
+ struct {
+ __le32 mode;
+ __le32 uid;
+ __le32 gid;
+ struct ceph_timespec mtime;
+ struct ceph_timespec atime;
+ __le64 size, old_size; /* old_size needed by truncate */
+ __le32 mask; /* CEPH_SETATTR_* */
+ struct ceph_timespec btime;
+ } __attribute__ ((packed)) setattr_ext;
};
#define CEPH_MDS_FLAG_REPLAY 1 /* this is a replayed op */
CPUHP_FS_BUFF_DEAD,
CPUHP_PRINTK_DEAD,
CPUHP_MM_MEMCQ_DEAD,
- CPUHP_XFS_DEAD,
CPUHP_PERCPU_CNT_DEAD,
CPUHP_RADIX_DEAD,
CPUHP_PAGE_ALLOC,
#ifdef CONFIG_IA64
#define KSYM_FUNC(name) @fptr(name)
+#elif defined(CONFIG_PARISC) && defined(CONFIG_64BIT)
+#define KSYM_FUNC(name) P%name
#else
#define KSYM_FUNC(name) name
#endif
kgid_has_mapping(fs_userns, kgid);
}
-struct timespec64 current_mgtime(struct inode *inode);
struct timespec64 current_time(struct inode *inode);
struct timespec64 inode_set_ctime_current(struct inode *inode);
-/*
- * Multigrain timestamps
- *
- * Conditionally use fine-grained ctime and mtime timestamps when there
- * are users actively observing them via getattr. The primary use-case
- * for this is NFS clients that use the ctime to distinguish between
- * different states of the file, and that are often fooled by multiple
- * operations that occur in the same coarse-grained timer tick.
- *
- * The kernel always keeps normalized struct timespec64 values in the ctime,
- * which means that only the first 30 bits of the value are used. Use the
- * 31st bit of the ctime's tv_nsec field as a flag to indicate that the value
- * has been queried since it was last updated.
- */
-#define I_CTIME_QUERIED (1L<<30)
-
/**
* inode_get_ctime - fetch the current ctime from the inode
* @inode: inode from which to fetch ctime
*
- * Grab the current ctime tv_nsec field from the inode, mask off the
- * I_CTIME_QUERIED flag and return it. This is mostly intended for use by
- * internal consumers of the ctime that aren't concerned with ensuring a
- * fine-grained update on the next change (e.g. when preparing to store
- * the value in the backing store for later retrieval).
- *
- * This is safe to call regardless of whether the underlying filesystem
- * is using multigrain timestamps.
+ * Grab the current ctime from the inode and return it.
*/
static inline struct timespec64 inode_get_ctime(const struct inode *inode)
{
- struct timespec64 ctime;
-
- ctime.tv_sec = inode->__i_ctime.tv_sec;
- ctime.tv_nsec = inode->__i_ctime.tv_nsec & ~I_CTIME_QUERIED;
-
- return ctime;
+ return inode->__i_ctime;
}
/**
#define FS_USERNS_MOUNT 8 /* Can be mounted by userns root */
#define FS_DISALLOW_NOTIFY_PERM 16 /* Disable fanotify permission events */
#define FS_ALLOW_IDMAP 32 /* FS has been updated to handle vfs idmappings. */
-#define FS_MGTIME 64 /* FS uses multigrain timestamps */
#define FS_RENAME_DOES_D_MOVE 32768 /* FS will handle d_move() during rename() internally. */
int (*init_fs_context)(struct fs_context *);
const struct fs_parameter_spec *parameters;
#define MODULE_ALIAS_FS(NAME) MODULE_ALIAS("fs-" NAME)
-/**
- * is_mgtime: is this inode using multigrain timestamps
- * @inode: inode to test for multigrain timestamps
- *
- * Return true if the inode uses multigrain timestamps, false otherwise.
- */
-static inline bool is_mgtime(const struct inode *inode)
-{
- return inode->i_sb->s_type->fs_flags & FS_MGTIME;
-}
-
extern struct dentry *mount_bdev(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data,
int (*fill_super)(struct super_block *, void *, int));
extern int page_symlink(struct inode *inode, const char *symname, int len);
extern const struct inode_operations page_symlink_inode_operations;
extern void kfree_link(void *);
-void fill_mg_cmtime(struct kstat *stat, u32 request_mask, struct inode *inode);
void generic_fillattr(struct mnt_idmap *, u32, struct inode *, struct kstat *);
void generic_fill_statx_attr(struct inode *inode, struct kstat *stat);
extern int vfs_getattr_nosec(const struct path *, struct kstat *, u32, unsigned int);
unsigned long addr, pte_t *ptep,
pte_t old_pte, pte_t pte)
{
- set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
+ unsigned long psize = huge_page_size(hstate_vma(vma));
+
+ set_huge_pte_at(vma->vm_mm, addr, ptep, pte, psize);
}
#endif
}
static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pte)
+ pte_t *ptep, pte_t pte, unsigned long sz)
{
}
* struct i2c_driver - represent an I2C device driver
* @class: What kind of i2c device we instantiate (for detect)
* @probe: Callback for device binding
- * @probe_new: Transitional callback for device binding - do not use
* @remove: Callback for device unbinding
* @shutdown: Callback for device shutdown
* @alert: Alert callback, for example for the SMBus alert protocol
struct i2c_driver {
unsigned int class;
- union {
/* Standard driver model interfaces */
- int (*probe)(struct i2c_client *client);
- /*
- * Legacy callback that was part of a conversion of .probe().
- * Today it has the same semantic as .probe(). Don't use for new
- * code.
- */
- int (*probe_new)(struct i2c_client *client);
- };
+ int (*probe)(struct i2c_client *client);
void (*remove)(struct i2c_client *client);
struct net_device *dev; /* associated netdevice */
struct team_pcpu_stats __percpu *pcpu_stats;
+ const struct header_ops *header_ops_cache;
+
struct mutex lock; /* used for overall locking, e.g. port lists write */
/*
#ifndef _LINUX_INSTRUCTION_POINTER_H
#define _LINUX_INSTRUCTION_POINTER_H
+#include <asm/linkage.h>
+
#define _RET_IP_ (unsigned long)__builtin_return_address(0)
+
+#ifndef _THIS_IP_
#define _THIS_IP_ ({ __label__ __here; __here: (unsigned long)&&__here; })
+#endif
#endif /* _LINUX_INSTRUCTION_POINTER_H */
* 2) rcu_report_dead() reports the final quiescent states.
*
* _ IRQ_POLL: irq_poll_cpu_dead() migrates the queue
+ *
+ * _ (HR)TIMER_SOFTIRQ: (hr)timers_dead_cpu() migrates the queue
*/
-#define SOFTIRQ_HOTPLUG_SAFE_MASK (BIT(RCU_SOFTIRQ) | BIT(IRQ_POLL_SOFTIRQ))
+#define SOFTIRQ_HOTPLUG_SAFE_MASK (BIT(TIMER_SOFTIRQ) | BIT(IRQ_POLL_SOFTIRQ) |\
+ BIT(HRTIMER_SOFTIRQ) | BIT(RCU_SOFTIRQ))
+
/* map softirq index to softirq name. update 'softirq_to_name' in
* kernel/softirq.c when adding a new softirq.
int kasan_populate_early_shadow(const void *shadow_start,
const void *shadow_end);
-#ifndef __HAVE_ARCH_SHADOW_MAP
+#ifndef kasan_mem_to_shadow
static inline void *kasan_mem_to_shadow(const void *addr)
{
return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT)
ATA_PFLAG_UNLOADING = (1 << 9), /* driver is being unloaded */
ATA_PFLAG_UNLOADED = (1 << 10), /* driver is unloaded */
+ ATA_PFLAG_RESUMING = (1 << 16), /* port is being resumed */
ATA_PFLAG_SUSPENDED = (1 << 17), /* port is suspended (power) */
ATA_PFLAG_PM_PENDING = (1 << 18), /* PM operation pending */
ATA_PFLAG_INIT_GTM_VALID = (1 << 19), /* initial gtm data valid */
ATA_HOST_PARALLEL_SCAN = (1 << 2), /* Ports on this host can be scanned in parallel */
ATA_HOST_IGNORE_ATA = (1 << 3), /* Ignore ATA devices on this host. */
+ ATA_HOST_NO_PART = (1 << 4), /* Host does not support partial */
+ ATA_HOST_NO_SSC = (1 << 5), /* Host does not support slumber */
+ ATA_HOST_NO_DEVSLP = (1 << 6), /* Host does not support devslp */
+
/* bits 24:31 of host->flags are reserved for LLD specific flags */
/* various lengths of time */
* advised to wait only for the following duration before
* doing SRST.
*/
- ATA_TMOUT_PMP_SRST_WAIT = 5000,
+ ATA_TMOUT_PMP_SRST_WAIT = 10000,
/* When the LPM policy is set to ATA_LPM_MAX_POWER, there might
* be a spurious PHY event, so ignore the first PHY event that
ATA_EH_ENABLE_LINK = (1 << 3),
ATA_EH_PARK = (1 << 5), /* unload heads and stop I/O */
ATA_EH_GET_SUCCESS_SENSE = (1 << 6), /* Get sense data for successful cmd */
+ ATA_EH_SET_ACTIVE = (1 << 7), /* Set a device to active power mode */
ATA_EH_PERDEV_MASK = ATA_EH_REVALIDATE | ATA_EH_PARK |
- ATA_EH_GET_SUCCESS_SENSE,
+ ATA_EH_GET_SUCCESS_SENSE | ATA_EH_SET_ACTIVE,
ATA_EH_ALL_ACTIONS = ATA_EH_REVALIDATE | ATA_EH_RESET |
ATA_EH_ENABLE_LINK,
/* This should match the actual table size of
* ata_eh_cmd_timeout_table in libata-eh.c.
*/
- ATA_EH_CMD_TIMEOUT_TABLE_SIZE = 7,
+ ATA_EH_CMD_TIMEOUT_TABLE_SIZE = 8,
/* Horkage types. May be set by libata or controller on drives
(some horkage may be drive/controller pair dependent */
struct block_device *bdev,
sector_t capacity, int geom[]);
extern void ata_scsi_unlock_native_capacity(struct scsi_device *sdev);
+extern int ata_scsi_slave_alloc(struct scsi_device *sdev);
extern int ata_scsi_slave_config(struct scsi_device *sdev);
extern void ata_scsi_slave_destroy(struct scsi_device *sdev);
extern int ata_scsi_change_queue_depth(struct scsi_device *sdev,
.this_id = ATA_SHT_THIS_ID, \
.emulated = ATA_SHT_EMULATED, \
.proc_name = drv_name, \
+ .slave_alloc = ata_scsi_slave_alloc, \
.slave_destroy = ata_scsi_slave_destroy, \
.bios_param = ata_std_bios_param, \
.unlock_native_capacity = ata_scsi_unlock_native_capacity,\
#define MAS_ROOT ((struct maple_enode *)5UL)
#define MAS_NONE ((struct maple_enode *)9UL)
#define MAS_PAUSE ((struct maple_enode *)17UL)
+#define MAS_OVERFLOW ((struct maple_enode *)33UL)
+#define MAS_UNDERFLOW ((struct maple_enode *)65UL)
#define MA_ERROR(err) \
((struct maple_enode *)(((unsigned long)err << 2) | 2UL))
return mas->node == MAS_PAUSE;
}
+/* Check if the mas is pointing to a node or not */
+static inline bool mas_is_active(struct ma_state *mas)
+{
+ if ((unsigned long)mas->node >= MAPLE_RESERVED_RANGE)
+ return true;
+
+ return false;
+}
+
/**
* mas_reset() - Reset a Maple Tree operation state.
* @mas: Maple Tree operation state.
return READ_ONCE(mz->lru_zone_size[zone_idx][lru]);
}
-void mem_cgroup_handle_over_high(void);
+void mem_cgroup_handle_over_high(gfp_t gfp_mask);
unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg);
rcu_read_unlock();
}
-static inline void mem_cgroup_handle_over_high(void)
+static inline void mem_cgroup_handle_over_high(gfp_t gfp_mask)
{
}
#define NFS_CS_NOPING 6 /* - don't ping on connect */
#define NFS_CS_DS 7 /* - Server is a DS */
#define NFS_CS_REUSEPORT 8 /* - reuse src port on reconnect */
+#define NFS_CS_PNFS 9 /* - Server used for pnfs */
struct sockaddr_storage cl_addr; /* server identifier */
size_t cl_addrlen;
char * cl_hostname; /* hostname of server */
extern void nfs_unlock_and_release_request(struct nfs_page *);
extern struct nfs_page *nfs_page_group_lock_head(struct nfs_page *req);
extern int nfs_page_group_lock_subrequests(struct nfs_page *head);
-extern void nfs_join_page_group(struct nfs_page *head, struct inode *inode);
+extern void nfs_join_page_group(struct nfs_page *head,
+ struct nfs_commit_info *cinfo,
+ struct inode *inode);
extern int nfs_page_group_lock(struct nfs_page *);
extern void nfs_page_group_unlock(struct nfs_page *);
extern bool nfs_page_group_sync_on_bit(struct nfs_page *, unsigned int);
struct nvmefc_ls_req {
void *rqstaddr;
dma_addr_t rqstdma;
- __le32 rqstlen;
+ u32 rqstlen;
void *rspaddr;
dma_addr_t rspdma;
- __le32 rsplen;
+ u32 rsplen;
u32 timeout;
void *private;
struct nvmefc_ls_rsp {
void *rspbuf;
dma_addr_t rspdma;
- __le32 rsplen;
+ u16 rsplen;
void (*done)(struct nvmefc_ls_rsp *rsp);
void *nvme_fc_private; /* LLDD is not to access !! */
#endif
#ifndef set_ptes
+
+#ifndef pte_next_pfn
+static inline pte_t pte_next_pfn(pte_t pte)
+{
+ return __pte(pte_val(pte) + (1UL << PFN_PTE_SHIFT));
+}
+#endif
+
/**
* set_ptes - Map consecutive pages to a contiguous range of addresses.
* @mm: Address space to map the pages into.
if (--nr == 0)
break;
ptep++;
- pte = __pte(pte_val(pte) + (1UL << PFN_PTE_SHIFT));
+ pte = pte_next_pfn(pte);
}
arch_leave_lazy_mmu_mode();
}
}
#endif
- mem_cgroup_handle_over_high();
+ mem_cgroup_handle_over_high(GFP_KERNEL);
blkcg_maybe_throttle_current();
rseq_handle_notify_resume(NULL, regs);
static inline void do_write_seqcount_begin_nested(seqcount_t *s, int subclass)
{
- do_raw_write_seqcount_begin(s);
seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_);
+ do_raw_write_seqcount_begin(s);
}
/**
if (unlikely(xdr_stream_decode_u32(xdr, &len) < 0))
return -EBADMSG;
- p = xdr_inline_decode(xdr, size_mul(len, sizeof(*p)));
+ if (U32_MAX >= SIZE_MAX / sizeof(*p) && len > SIZE_MAX / sizeof(*p))
+ return -EBADMSG;
+ p = xdr_inline_decode(xdr, len * sizeof(*p));
if (unlikely(!p))
return -EBADMSG;
if (array == NULL)
if (!mem)
return false;
- if (IS_ENABLED(CONFIG_SWIOTLB_DYNAMIC)) {
- /* Pairs with smp_wmb() in swiotlb_find_slots() and
- * swiotlb_dyn_alloc(), which modify the RCU lists.
- */
- smp_rmb();
- return swiotlb_find_pool(dev, paddr);
- }
+#ifdef CONFIG_SWIOTLB_DYNAMIC
+ /*
+ * All SWIOTLB buffer addresses must have been returned by
+ * swiotlb_tbl_map_single() and passed to a device driver.
+ * If a SWIOTLB address is checked on another CPU, then it was
+ * presumably loaded by the device driver from an unspecified private
+ * data structure. Make sure that this load is ordered before reading
+ * dev->dma_uses_io_tlb here and mem->pools in swiotlb_find_pool().
+ *
+ * This barrier pairs with smp_mb() in swiotlb_find_slots().
+ */
+ smp_rmb();
+ return READ_ONCE(dev->dma_uses_io_tlb) &&
+ swiotlb_find_pool(dev, paddr);
+#else
return paddr >= mem->defpool.start && paddr < mem->defpool.end;
+#endif
}
static inline bool is_swiotlb_force_bounce(struct device *dev)
int (*set_trip_hyst) (struct thermal_zone_device *, int, int);
int (*get_crit_temp) (struct thermal_zone_device *, int *);
int (*set_emul_temp) (struct thermal_zone_device *, int);
- int (*get_trend) (struct thermal_zone_device *, struct thermal_trip *,
- enum thermal_trend *);
+ int (*get_trend) (struct thermal_zone_device *,
+ const struct thermal_trip *, enum thermal_trend *);
void (*hot)(struct thermal_zone_device *);
void (*critical)(struct thermal_zone_device *);
};
/* Used to find the offset and length of dynamic fields in trace events */
struct trace_dynamic_info {
#ifdef CONFIG_CPU_BIG_ENDIAN
- u16 offset;
u16 len;
+ u16 offset;
#else
- u16 len;
u16 offset;
+ u16 len;
#endif
-};
+} __packed;
/*
* The trace entry - the most basic unit of tracing. This is what
struct trace_event_call *event_call;
struct event_filter __rcu *filter;
struct eventfs_file *ef;
- struct dentry *dir;
struct trace_array *tr;
struct trace_subsystem_dir *system;
struct list_head triggers;
cpu_to_be32(0x0000ffff))) == 0UL;
}
+static inline bool ipv6_addr_v4mapped_any(const struct in6_addr *a)
+{
+ return ipv6_addr_v4mapped(a) && ipv4_is_zeronet(a->s6_addr32[3]);
+}
+
static inline bool ipv6_addr_v4mapped_loopback(const struct in6_addr *a)
{
return ipv6_addr_v4mapped(a) && ipv4_is_loopback(a->s6_addr32[3]);
return 0;
}
-static inline int ip6_sock_set_addr_preferences(struct sock *sk, bool val)
+static inline int ip6_sock_set_addr_preferences(struct sock *sk, int val)
{
int ret;
struct net *net;
struct nft_set *set;
u32 seq;
- u8 count;
+ u16 count;
void *priv[NFT_TRANS_GC_BATCHCOUNT];
struct rcu_head rcu;
};
void nft_trans_gc_elem_add(struct nft_trans_gc *gc, void *priv);
-struct nft_trans_gc *nft_trans_gc_catchall(struct nft_trans_gc *gc,
- unsigned int gc_seq);
+struct nft_trans_gc *nft_trans_gc_catchall_async(struct nft_trans_gc *gc,
+ unsigned int gc_seq);
+struct nft_trans_gc *nft_trans_gc_catchall_sync(struct nft_trans_gc *gc);
void nft_setelem_data_deactivate(const struct net *net,
const struct nft_set *set,
#define SCSI_3 4 /* SPC */
#define SCSI_SPC_2 5
#define SCSI_SPC_3 6
+#define SCSI_SPC_4 7
+#define SCSI_SPC_5 8
+#define SCSI_SPC_6 14
/*
* INQ PERIPHERAL QUALIFIERS
* pass settings from slave_alloc to scsi
* core. */
unsigned int eh_timeout; /* Error handling timeout */
+
+ bool manage_system_start_stop; /* Let HLD (sd) manage system start/stop */
+ bool manage_runtime_start_stop; /* Let HLD (sd) manage runtime start/stop */
+
unsigned removable:1;
unsigned changed:1; /* Data invalid due to media change */
unsigned busy:1; /* Used to prevent races */
unsigned use_192_bytes_for_3f:1; /* ask for 192 bytes from page 0x3f */
unsigned no_start_on_add:1; /* do not issue start on add */
unsigned allow_restart:1; /* issue START_UNIT in error handler */
- unsigned manage_start_stop:1; /* Let HLD (sd) manage start/stop */
unsigned no_start_on_resume:1; /* Do not issue START_STOP_UNIT on resume */
unsigned start_stop_pwr_cond:1; /* Set power cond. in START_STOP_UNIT */
unsigned no_uld_attach:1; /* disable connecting to upper level drivers */
#define scsi_template_proc_dir(sht) NULL
#endif
extern void scsi_scan_host(struct Scsi_Host *);
-extern void scsi_rescan_device(struct scsi_device *);
+extern int scsi_rescan_device(struct scsi_device *sdev);
extern void scsi_remove_host(struct Scsi_Host *);
extern struct Scsi_Host *scsi_host_get(struct Scsi_Host *);
extern int scsi_host_busy(struct Scsi_Host *shost);
#define _TRACE_XEN_H
#include <linux/tracepoint.h>
-#include <asm/paravirt_types.h>
+#include <asm/xen/hypervisor.h>
#include <asm/xen/trace_types.h>
struct multicall_entry;
/* Multicalls */
DECLARE_EVENT_CLASS(xen_mc__batch,
- TP_PROTO(enum paravirt_lazy_mode mode),
+ TP_PROTO(enum xen_lazy_mode mode),
TP_ARGS(mode),
TP_STRUCT__entry(
- __field(enum paravirt_lazy_mode, mode)
+ __field(enum xen_lazy_mode, mode)
),
TP_fast_assign(__entry->mode = mode),
TP_printk("start batch LAZY_%s",
- (__entry->mode == PARAVIRT_LAZY_MMU) ? "MMU" :
- (__entry->mode == PARAVIRT_LAZY_CPU) ? "CPU" : "NONE")
+ (__entry->mode == XEN_LAZY_MMU) ? "MMU" :
+ (__entry->mode == XEN_LAZY_CPU) ? "CPU" : "NONE")
);
#define DEFINE_XEN_MC_BATCH(name) \
DEFINE_EVENT(xen_mc__batch, name, \
- TP_PROTO(enum paravirt_lazy_mode mode), \
+ TP_PROTO(enum xen_lazy_mode mode), \
TP_ARGS(mode))
DEFINE_XEN_MC_BATCH(xen_mc_batch);
* performed again, if the helper is used in combination with
* direct packet access.
* Return
- * 0 on success, or a negative error in case of failure.
+ * 0 on success, or a negative error in case of failure. Positive
+ * error indicates a potential drop or congestion in the target
+ * device. The particular positive error codes are not defined.
*
* u64 bpf_get_current_pid_tgid(void)
* Description
struct TAG { MEMBERS } ATTRS NAME; \
}
+#ifdef __cplusplus
+/* sizeof(struct{}) is 1 in C++, not 0, can't use C version of the macro. */
+#define __DECLARE_FLEX_ARRAY(T, member) \
+ T member[0]
+#else
/**
* __DECLARE_FLEX_ARRAY() - Declare a flexible array usable in a union
*
#ifndef __counted_by
#define __counted_by(m)
#endif
+
+#endif /* _UAPI_LINUX_STDDEF_H */
extern struct shared_info *HYPERVISOR_shared_info;
extern struct start_info *xen_start_info;
-/* Lazy mode for batching updates / context switch */
-enum paravirt_lazy_mode {
- PARAVIRT_LAZY_NONE,
- PARAVIRT_LAZY_MMU,
- PARAVIRT_LAZY_CPU,
-};
-
-static inline enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
-{
- return PARAVIRT_LAZY_NONE;
-}
-
#ifdef CONFIG_XEN
void __init xen_early_init(void);
#else
evtchn_port_t evtchn_from_irq(unsigned irq);
int xen_set_callback_via(uint64_t via);
-void xen_evtchn_do_upcall(struct pt_regs *regs);
-int xen_hvm_evtchn_do_upcall(void);
+int xen_evtchn_do_upcall(void);
/* Bind a pirq for a physical interrupt to an irq. */
int xen_bind_pirq_gsi_to_irq(unsigned gsi,
struct io_link *lnk = io_kiocb_to_cmd(req, struct io_link);
const char __user *oldf, *newf;
- if (sqe->rw_flags || sqe->buf_index || sqe->splice_fd_in)
+ if (sqe->buf_index || sqe->splice_fd_in)
return -EINVAL;
if (unlikely(req->flags & REQ_F_FIXED_FILE))
return -EBADF;
memcpy(async_msg, kmsg, sizeof(*kmsg));
if (async_msg->msg.msg_name)
async_msg->msg.msg_name = &async_msg->addr;
+
+ if ((req->flags & REQ_F_BUFFER_SELECT) && !async_msg->msg.msg_iter.nr_segs)
+ return -EAGAIN;
+
/* if were using fast_iov, set it to the new one */
if (iter_is_iovec(&kmsg->msg.msg_iter) && !kmsg->free_iov) {
size_t fast_idx = iter_iov(&kmsg->msg.msg_iter) - kmsg->fast_iov;
struct io_async_msghdr *iomsg)
{
iomsg->msg.msg_name = &iomsg->addr;
+ iomsg->msg.msg_iter.nr_segs = 0;
#ifdef CONFIG_COMPAT
if (req->ctx->compat)
tname = btf_name_by_offset(btf, walk_type->name_off);
ret = snprintf(safe_tname, sizeof(safe_tname), "%s%s", tname, suffix);
- if (ret < 0)
+ if (ret >= sizeof(safe_tname))
return false;
safe_id = btf_find_by_name_kind(btf, safe_tname, BTF_INFO_KIND(walk_type->info));
* to descendants
* @cgrp: The cgroup which descendants to traverse
* @link: A link for which to replace BPF program
- * @type: Type of attach operation
+ * @new_prog: &struct bpf_prog for the target BPF program with its refcnt
+ * incremented
*
* Must be called with cgroup_mutex held.
*/
* __cgroup_bpf_run_filter_skb() - Run a program for packet filtering
* @sk: The socket sending or receiving traffic
* @skb: The skb that is being sent or received
- * @type: The type of program to be executed
+ * @atype: The type of program to be executed
*
* If no socket is passed, or the socket is not of type INET or INET6,
* this function does nothing and returns 0.
/**
* __cgroup_bpf_run_filter_sk() - Run a program on a sock
* @sk: sock structure to manipulate
- * @type: The type of program to be executed
+ * @atype: The type of program to be executed
*
* socket is passed is expected to be of type INET or INET6.
*
* provided by user sockaddr
* @sk: sock struct that will use sockaddr
* @uaddr: sockaddr struct provided by user
- * @type: The type of program to be executed
+ * @atype: The type of program to be executed
* @t_ctx: Pointer to attach type specific context
* @flags: Pointer to u32 which contains higher bits of BPF program
* return value (OR'ed together).
* @sock_ops: bpf_sock_ops_kern struct to pass to program. Contains
* sk with connection information (IP addresses, etc.) May not contain
* cgroup info if it is a req sock.
- * @type: The type of program to be executed
+ * @atype: The type of program to be executed
*
* socket passed is expected to be of type INET or INET6.
*
* @ppos: value-result argument: value is position at which read from or write
* to sysctl is happening, result is new position if program overrode it,
* initial value otherwise
- * @type: type of program to be executed
+ * @atype: type of program to be executed
*
* Program is run when sysctl is being accessed, either read or written, and
* can allow or deny such access.
* Typical case will be between 11K and 116K closer to 11K.
* bpf progs can and should share bpf_mem_cache when possible.
*/
-
-static void prefill_mem_cache(struct bpf_mem_cache *c, int cpu)
+static void init_refill_work(struct bpf_mem_cache *c)
{
init_irq_work(&c->refill_work, bpf_mem_refill);
if (c->unit_size <= 256) {
c->high_watermark = max(96 * 256 / c->unit_size, 3);
}
c->batch = max((c->high_watermark - c->low_watermark) / 4 * 3, 1);
+}
+static void prefill_mem_cache(struct bpf_mem_cache *c, int cpu)
+{
/* To avoid consuming memory assume that 1st run of bpf
* prog won't be doing more than 4 map_update_elem from
* irq disabled region
alloc_bulk(c, c->unit_size <= 256 ? 4 : 1, cpu_to_node(cpu), false);
}
+static int check_obj_size(struct bpf_mem_cache *c, unsigned int idx)
+{
+ struct llist_node *first;
+ unsigned int obj_size;
+
+ /* For per-cpu allocator, the size of free objects in free list doesn't
+ * match with unit_size and now there is no way to get the size of
+ * per-cpu pointer saved in free object, so just skip the checking.
+ */
+ if (c->percpu_size)
+ return 0;
+
+ first = c->free_llist.first;
+ if (!first)
+ return 0;
+
+ obj_size = ksize(first);
+ if (obj_size != c->unit_size) {
+ WARN_ONCE(1, "bpf_mem_cache[%u]: unexpected object size %u, expect %u\n",
+ idx, obj_size, c->unit_size);
+ return -EINVAL;
+ }
+ return 0;
+}
+
/* When size != 0 bpf_mem_cache for each cpu.
* This is typical bpf hash map use case when all elements have equal size.
*
int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu)
{
static u16 sizes[NUM_CACHES] = {96, 192, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096};
+ int cpu, i, err, unit_size, percpu_size = 0;
struct bpf_mem_caches *cc, __percpu *pcc;
struct bpf_mem_cache *c, __percpu *pc;
struct obj_cgroup *objcg = NULL;
- int cpu, i, unit_size, percpu_size = 0;
if (size) {
pc = __alloc_percpu_gfp(sizeof(*pc), 8, GFP_KERNEL);
c->objcg = objcg;
c->percpu_size = percpu_size;
c->tgt = c;
+ init_refill_work(c);
prefill_mem_cache(c, cpu);
}
ma->cache = pc;
pcc = __alloc_percpu_gfp(sizeof(*cc), 8, GFP_KERNEL);
if (!pcc)
return -ENOMEM;
+ err = 0;
#ifdef CONFIG_MEMCG_KMEM
objcg = get_obj_cgroup_from_current();
#endif
c->unit_size = sizes[i];
c->objcg = objcg;
c->tgt = c;
+
+ init_refill_work(c);
+ /* Another bpf_mem_cache will be used when allocating
+ * c->unit_size in bpf_mem_alloc(), so doesn't prefill
+ * for the bpf_mem_cache because these free objects will
+ * never be used.
+ */
+ if (i != bpf_mem_cache_idx(c->unit_size))
+ continue;
prefill_mem_cache(c, cpu);
+ err = check_obj_size(c, i);
+ if (err)
+ goto out;
}
}
+
+out:
ma->caches = pcc;
- return 0;
+ /* refill_work is either zeroed or initialized, so it is safe to
+ * call irq_work_sync().
+ */
+ if (err)
+ bpf_mem_alloc_destroy(ma);
+ return err;
}
static void drain_mem_cache(struct bpf_mem_cache *c)
return !ret ? NULL : ret + LLIST_NODE_SZ;
}
+
+/* Most of the logic is taken from setup_kmalloc_cache_index_table() */
+static __init int bpf_mem_cache_adjust_size(void)
+{
+ unsigned int size, index;
+
+ /* Normally KMALLOC_MIN_SIZE is 8-bytes, but it can be
+ * up-to 256-bytes.
+ */
+ size = KMALLOC_MIN_SIZE;
+ if (size <= 192)
+ index = size_index[(size - 1) / 8];
+ else
+ index = fls(size - 1) - 1;
+ for (size = 8; size < KMALLOC_MIN_SIZE && size <= 192; size += 8)
+ size_index[(size - 1) / 8] = index;
+
+ /* The minimal alignment is 64-bytes, so disable 96-bytes cache and
+ * use 128-bytes cache instead.
+ */
+ if (KMALLOC_MIN_SIZE >= 64) {
+ index = size_index[(128 - 1) / 8];
+ for (size = 64 + 8; size <= 96; size += 8)
+ size_index[(size - 1) / 8] = index;
+ }
+
+ /* The minimal alignment is 128-bytes, so disable 192-bytes cache and
+ * use 256-bytes cache instead.
+ */
+ if (KMALLOC_MIN_SIZE >= 128) {
+ index = fls(256 - 1) - 1;
+ for (size = 128 + 8; size <= 192; size += 8)
+ size_index[(size - 1) / 8] = index;
+ }
+
+ return 0;
+}
+subsys_initcall(bpf_mem_cache_adjust_size);
offload->netdev = netdev;
ondev = bpf_offload_find_netdev(offload->netdev);
+ /* When program is offloaded require presence of "true"
+ * bpf_offload_netdev, avoid the one created for !ondev case below.
+ */
+ if (bpf_prog_is_offloaded(prog->aux) && (!ondev || !ondev->offdev)) {
+ err = -EINVAL;
+ goto err_free;
+ }
if (!ondev) {
- if (bpf_prog_is_offloaded(prog->aux)) {
- err = -EINVAL;
- goto err_free;
- }
-
/* When only binding to the device, explicitly
* create an entry in the hashtable.
*/
int err = 0;
void *ptr;
- raw_spin_lock_irqsave(&qs->lock, flags);
+ if (in_nmi()) {
+ if (!raw_spin_trylock_irqsave(&qs->lock, flags))
+ return -EBUSY;
+ } else {
+ raw_spin_lock_irqsave(&qs->lock, flags);
+ }
if (queue_stack_map_is_empty(qs)) {
memset(value, 0, qs->map.value_size);
void *ptr;
u32 index;
- raw_spin_lock_irqsave(&qs->lock, flags);
+ if (in_nmi()) {
+ if (!raw_spin_trylock_irqsave(&qs->lock, flags))
+ return -EBUSY;
+ } else {
+ raw_spin_lock_irqsave(&qs->lock, flags);
+ }
if (queue_stack_map_is_empty(qs)) {
memset(value, 0, qs->map.value_size);
if (flags & BPF_NOEXIST || flags > BPF_EXIST)
return -EINVAL;
- raw_spin_lock_irqsave(&qs->lock, irq_flags);
+ if (in_nmi()) {
+ if (!raw_spin_trylock_irqsave(&qs->lock, irq_flags))
+ return -EBUSY;
+ } else {
+ raw_spin_lock_irqsave(&qs->lock, irq_flags);
+ }
if (queue_stack_map_is_full(qs)) {
if (!replace) {
#define pr_fmt(fmt) "crash hp: " fmt
/*
+ * Different than kexec/kdump loading/unloading/jumping/shrinking which
+ * usually rarely happen, there will be many crash hotplug events notified
+ * during one short period, e.g one memory board is hot added and memory
+ * regions are online. So mutex lock __crash_hotplug_lock is used to
+ * serialize the crash hotplug handling specifically.
+ */
+DEFINE_MUTEX(__crash_hotplug_lock);
+#define crash_hotplug_lock() mutex_lock(&__crash_hotplug_lock)
+#define crash_hotplug_unlock() mutex_unlock(&__crash_hotplug_lock)
+
+/*
* This routine utilized when the crash_hotplug sysfs node is read.
* It reflects the kernel's ability/permission to update the crash
* elfcorehdr directly.
{
int rc = 0;
+ crash_hotplug_lock();
/* Obtain lock while reading crash information */
if (!kexec_trylock()) {
pr_info("kexec_trylock() failed, elfcorehdr may be inaccurate\n");
+ crash_hotplug_unlock();
return 0;
}
if (kexec_crash_image) {
}
/* Release lock now that update complete */
kexec_unlock();
+ crash_hotplug_unlock();
return rc;
}
{
struct kimage *image;
+ crash_hotplug_lock();
/* Obtain lock while changing crash information */
if (!kexec_trylock()) {
pr_info("kexec_trylock() failed, elfcorehdr may be inaccurate\n");
+ crash_hotplug_unlock();
return;
}
out:
/* Release lock now that update complete */
kexec_unlock();
+ crash_hotplug_unlock();
}
static int crash_memhp_notifier(struct notifier_block *nb, unsigned long val, void *v)
}
mem->areas = memblock_alloc(array_size(sizeof(struct io_tlb_area),
- default_nareas), SMP_CACHE_BYTES);
+ nareas), SMP_CACHE_BYTES);
if (!mem->areas) {
pr_warn("%s: Failed to allocate mem->areas.\n", __func__);
return;
}
- swiotlb_init_io_tlb_pool(mem, __pa(tlb), nslabs, false,
- default_nareas);
+ swiotlb_init_io_tlb_pool(mem, __pa(tlb), nslabs, false, nareas);
add_mem_pool(&io_tlb_default_mem, mem);
if (flags & SWIOTLB_VERBOSE)
}
add_mem_pool(mem, pool);
-
- /* Pairs with smp_rmb() in is_swiotlb_buffer(). */
- smp_wmb();
}
/**
spin_unlock_irqrestore(&dev->dma_io_tlb_lock, flags);
found:
- dev->dma_uses_io_tlb = true;
- /* Pairs with smp_rmb() in is_swiotlb_buffer() */
- smp_wmb();
+ WRITE_ONCE(dev->dma_uses_io_tlb, true);
+
+ /*
+ * The general barrier orders reads and writes against a presumed store
+ * of the SWIOTLB buffer address by a device driver (to a driver private
+ * data structure). It serves two purposes.
+ *
+ * First, the store to dev->dma_uses_io_tlb must be ordered before the
+ * presumed store. This guarantees that the returned buffer address
+ * cannot be passed to another CPU before updating dev->dma_uses_io_tlb.
+ *
+ * Second, the load from mem->pools must be ordered before the same
+ * presumed store. This guarantees that the returned buffer address
+ * cannot be observed by another CPU before an update of the RCU list
+ * that was made by swiotlb_dyn_alloc() on a third CPU (cf. multicopy
+ * atomicity).
+ *
+ * See also the comment in is_swiotlb_buffer().
+ */
+ smp_mb();
*retpool = pool;
return index;
if (!fmt) {
__warn(file, line, __builtin_return_address(0), taint,
NULL, NULL);
+ warn_rcu_exit(rcu);
return;
}
}
/**
- * pidfd_open() - Open new pid file descriptor.
+ * sys_pidfd_open() - Open new pid file descriptor.
*
* @pid: pid for which to retrieve a pidfd
* @flags: flags to pass
unlock_device_hotplug();
if (snapshot_test) {
pm_pr_dbg("Checking hibernation image\n");
- error = swsusp_check(snapshot_test);
+ error = swsusp_check(false);
if (!error)
- error = load_image_and_restore(snapshot_test);
+ error = load_image_and_restore(false);
}
thaw_processes();
pm_pr_dbg("Looking for hibernation image.\n");
mutex_lock(&system_transition_mutex);
- error = swsusp_check(false);
+ error = swsusp_check(true);
if (error)
goto Unlock;
/* The snapshot device should not be opened while we're running */
if (!hibernate_acquire()) {
error = -EBUSY;
- swsusp_close(false);
+ swsusp_close(true);
goto Unlock;
}
goto Close_Finish;
}
- error = load_image_and_restore(false);
+ error = load_image_and_restore(true);
thaw_processes();
Finish:
pm_notifier_call_chain(PM_POST_RESTORE);
pm_pr_dbg("Hibernation image not present or could not be loaded.\n");
return error;
Close_Finish:
- swsusp_close(false);
+ swsusp_close(true);
goto Finish;
}
#define SF_HW_SIG 8
/* kernel/power/hibernate.c */
-int swsusp_check(bool snapshot_test);
+int swsusp_check(bool exclusive);
extern void swsusp_free(void);
extern int swsusp_read(unsigned int *flags_p);
extern int swsusp_write(unsigned int flags);
-void swsusp_close(bool snapshot_test);
+void swsusp_close(bool exclusive);
#ifdef CONFIG_SUSPEND
extern int swsusp_unmark(void);
#endif
static void *swsusp_holder;
/**
- * swsusp_check - Check for swsusp signature in the resume device
+ * swsusp_check - Check for swsusp signature in the resume device
+ * @exclusive: Open the resume device exclusively.
*/
-int swsusp_check(bool snapshot_test)
+int swsusp_check(bool exclusive)
{
- void *holder = snapshot_test ? &swsusp_holder : NULL;
+ void *holder = exclusive ? &swsusp_holder : NULL;
int error;
hib_resume_bdev = blkdev_get_by_dev(swsusp_resume_device, BLK_OPEN_READ,
}
/**
- * swsusp_close - close swap device.
+ * swsusp_close - close swap device.
+ * @exclusive: Close the resume device which is exclusively opened.
*/
-void swsusp_close(bool snapshot_test)
+void swsusp_close(bool exclusive)
{
if (IS_ERR(hib_resume_bdev)) {
pr_debug("Image device not initialised\n");
return;
}
- blkdev_put(hib_resume_bdev, snapshot_test ? &swsusp_holder : NULL);
+ blkdev_put(hib_resume_bdev, exclusive ? &swsusp_holder : NULL);
}
/**
* PF_KTHREAD should already be set at this point; regardless, make it
* look like a proper per-CPU kthread.
*/
- idle->flags |= PF_IDLE | PF_KTHREAD | PF_NO_SETAFFINITY;
+ idle->flags |= PF_KTHREAD | PF_NO_SETAFFINITY;
kthread_set_per_cpu(idle, cpu);
#ifdef CONFIG_SMP
if (lowest_mask) {
cpumask_and(lowest_mask, &p->cpus_mask, vec->mask);
+ cpumask_and(lowest_mask, lowest_mask, cpu_active_mask);
/*
* We have to ensure that we have at least one bit
/* Working cpumask for: load_balance, load_balance_newidle. */
static DEFINE_PER_CPU(cpumask_var_t, load_balance_mask);
static DEFINE_PER_CPU(cpumask_var_t, select_rq_mask);
+static DEFINE_PER_CPU(cpumask_var_t, should_we_balance_tmpmask);
#ifdef CONFIG_NO_HZ_COMMON
imbalance /= ncores_local + ncores_busiest;
/* Take advantage of resource in an empty sched group */
- if (imbalance == 0 && local->sum_nr_running == 0 &&
+ if (imbalance <= 1 && local->sum_nr_running == 0 &&
busiest->sum_nr_running > 1)
imbalance = 2;
break;
case group_smt_balance:
+ /*
+ * Check if we have spare CPUs on either SMT group to
+ * choose has spare or fully busy handling.
+ */
+ if (sgs->idle_cpus != 0 || busiest->idle_cpus != 0)
+ goto has_spare;
+
+ fallthrough;
+
case group_fully_busy:
/*
* Select the fully busy group with highest avg_load. In
else
return true;
}
+has_spare:
/*
* Select not overloaded group with lowest number of idle cpus
static int should_we_balance(struct lb_env *env)
{
+ struct cpumask *swb_cpus = this_cpu_cpumask_var_ptr(should_we_balance_tmpmask);
struct sched_group *sg = env->sd->groups;
int cpu, idle_smt = -1;
return 1;
}
+ cpumask_copy(swb_cpus, group_balance_mask(sg));
/* Try to find first idle CPU */
- for_each_cpu_and(cpu, group_balance_mask(sg), env->cpus) {
+ for_each_cpu_and(cpu, swb_cpus, env->cpus) {
if (!idle_cpu(cpu))
continue;
if (!(env->sd->flags & SD_SHARE_CPUCAPACITY) && !is_core_idle(cpu)) {
if (idle_smt == -1)
idle_smt = cpu;
+ /*
+ * If the core is not idle, and first SMT sibling which is
+ * idle has been found, then its not needed to check other
+ * SMT siblings for idleness:
+ */
+#ifdef CONFIG_SCHED_SMT
+ cpumask_andnot(swb_cpus, swb_cpus, cpu_smt_mask(cpu));
+#endif
continue;
}
for_each_possible_cpu(i) {
zalloc_cpumask_var_node(&per_cpu(load_balance_mask, i), GFP_KERNEL, cpu_to_node(i));
zalloc_cpumask_var_node(&per_cpu(select_rq_mask, i), GFP_KERNEL, cpu_to_node(i));
+ zalloc_cpumask_var_node(&per_cpu(should_we_balance_tmpmask, i),
+ GFP_KERNEL, cpu_to_node(i));
#ifdef CONFIG_CFS_BANDWIDTH
INIT_CSD(&cpu_rq(i)->cfsb_csd, __cfsb_csd_unthrottle, cpu_rq(i));
void cpu_startup_entry(enum cpuhp_state state)
{
+ current->flags |= PF_IDLE;
arch_cpu_idle_prepare();
cpuhp_online_idle(state);
while (1)
* task_work_cancel_match - cancel a pending work added by task_work_add()
* @task: the task which should execute the work
* @match: match function to call
+ * @data: data to be passed in to match function
*
* RETURNS:
* The found work or NULL if not found.
return arr.mods_cnt;
}
+static int addrs_check_error_injection_list(unsigned long *addrs, u32 cnt)
+{
+ u32 i;
+
+ for (i = 0; i < cnt; i++) {
+ if (!within_error_injection_list(addrs[i]))
+ return -EINVAL;
+ }
+ return 0;
+}
+
int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
{
struct bpf_kprobe_multi_link *link = NULL;
goto error;
}
+ if (prog->kprobe_override && addrs_check_error_injection_list(addrs, cnt)) {
+ err = -EINVAL;
+ goto error;
+ }
+
link = kzalloc(sizeof(*link), GFP_KERNEL);
if (!link) {
err = -ENOMEM;
rcu_read_lock();
task = get_pid_task(find_vpid(pid), PIDTYPE_PID);
rcu_read_unlock();
- if (!task)
+ if (!task) {
+ err = -ESRCH;
goto error_path_put;
+ }
}
err = -ENOMEM;
local_set(&bpage->commit, 0);
}
+static __always_inline unsigned int rb_page_commit(struct buffer_page *bpage)
+{
+ return local_read(&bpage->page->commit);
+}
+
static void free_buffer_page(struct buffer_page *bpage)
{
free_page((unsigned long)bpage->page);
if (full) {
poll_wait(filp, &work->full_waiters, poll_table);
work->full_waiters_pending = true;
+ if (!cpu_buffer->shortest_full ||
+ cpu_buffer->shortest_full > full)
+ cpu_buffer->shortest_full = full;
} else {
poll_wait(filp, &work->waiters, poll_table);
work->waiters_pending = true;
* Increment overrun to account for the lost events.
*/
local_add(page_entries, &cpu_buffer->overrun);
- local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
+ local_sub(rb_page_commit(to_remove_page), &cpu_buffer->entries_bytes);
local_inc(&cpu_buffer->pages_lost);
}
err = -ENOMEM;
goto out_err;
}
+
+ cond_resched();
}
cpus_read_lock();
cpu_buffer->reader_page->read);
}
-static __always_inline unsigned rb_page_commit(struct buffer_page *bpage)
-{
- return local_read(&bpage->page->commit);
-}
-
static struct ring_buffer_event *
rb_iter_head_event(struct ring_buffer_iter *iter)
{
*/
commit = rb_page_commit(iter_head_page);
smp_rmb();
+
+ /* An event needs to be at least 8 bytes in size */
+ if (iter->head > commit - 8)
+ goto reset;
+
event = __rb_page_index(iter_head_page, iter->head);
length = rb_event_length(event);
* the counters.
*/
local_add(entries, &cpu_buffer->overrun);
- local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
+ local_sub(rb_page_commit(next_page), &cpu_buffer->entries_bytes);
local_inc(&cpu_buffer->pages_lost);
/*
event = __rb_page_index(tail_page, tail);
- /* account for padding bytes */
- local_add(BUF_PAGE_SIZE - tail, &cpu_buffer->entries_bytes);
-
/*
* Save the original length to the meta data.
* This will be used by the reader to add lost event
* write counter enough to allow another writer to slip
* in on this page.
* We put in a discarded commit instead, to make sure
- * that this space is not used again.
+ * that this space is not used again, and this space will
+ * not be accounted into 'entries_bytes'.
*
* If we are less than the minimum size, we don't need to
* worry about it.
/* time delta must be non zero */
event->time_delta = 1;
+ /* account for padding bytes */
+ local_add(BUF_PAGE_SIZE - tail, &cpu_buffer->entries_bytes);
+
/* Make sure the padding is visible before the tail_page->write update */
smp_wmb();
EXPORT_SYMBOL_GPL(ring_buffer_oldest_event_ts);
/**
- * ring_buffer_bytes_cpu - get the number of bytes consumed in a cpu buffer
+ * ring_buffer_bytes_cpu - get the number of bytes unconsumed in a cpu buffer
* @buffer: The ring buffer
* @cpu: The per CPU buffer to read from.
*/
length = rb_event_length(event);
cpu_buffer->reader_page->read += length;
+ cpu_buffer->read_bytes += length;
}
static void rb_advance_iter(struct ring_buffer_iter *iter)
} else {
/* update the entry counter */
cpu_buffer->read += rb_page_entries(reader);
- cpu_buffer->read_bytes += BUF_PAGE_SIZE;
+ cpu_buffer->read_bytes += rb_page_commit(reader);
/* swap the pages */
rb_init_page(bpage);
init_irq_work(&tr->fsnotify_irqwork, latency_fsnotify_workfn_irq);
tr->d_max_latency = trace_create_file("tracing_max_latency",
TRACE_MODE_WRITE,
- d_tracer, &tr->max_latency,
+ d_tracer, tr,
&tracing_max_lat_fops);
}
#define trace_create_maxlat_file(tr, d_tracer) \
trace_create_file("tracing_max_latency", TRACE_MODE_WRITE, \
- d_tracer, &tr->max_latency, &tracing_max_lat_fops)
+ d_tracer, tr, &tracing_max_lat_fops)
#endif
return 0;
}
+/*
+ * The private pointer of the inode is the trace_event_file.
+ * Update the tr ref count associated to it.
+ */
+int tracing_open_file_tr(struct inode *inode, struct file *filp)
+{
+ struct trace_event_file *file = inode->i_private;
+ int ret;
+
+ ret = tracing_check_open_get_tr(file->tr);
+ if (ret)
+ return ret;
+
+ filp->private_data = inode->i_private;
+
+ return 0;
+}
+
+int tracing_release_file_tr(struct inode *inode, struct file *filp)
+{
+ struct trace_event_file *file = inode->i_private;
+
+ trace_array_put(file->tr);
+
+ return 0;
+}
+
static int tracing_mark_open(struct inode *inode, struct file *filp)
{
stream_open(inode, filp);
tracing_max_lat_read(struct file *filp, char __user *ubuf,
size_t cnt, loff_t *ppos)
{
- return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
+ struct trace_array *tr = filp->private_data;
+
+ return tracing_nsecs_read(&tr->max_latency, ubuf, cnt, ppos);
}
static ssize_t
tracing_max_lat_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
- return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
+ struct trace_array *tr = filp->private_data;
+
+ return tracing_nsecs_write(&tr->max_latency, ubuf, cnt, ppos);
}
#endif
#ifdef CONFIG_TRACER_MAX_TRACE
static const struct file_operations tracing_max_lat_fops = {
- .open = tracing_open_generic,
+ .open = tracing_open_generic_tr,
.read = tracing_max_lat_read,
.write = tracing_max_lat_write,
.llseek = generic_file_llseek,
+ .release = tracing_release_generic_tr,
};
#endif
static const struct file_operations set_tracer_fops = {
- .open = tracing_open_generic,
+ .open = tracing_open_generic_tr,
.read = tracing_set_trace_read,
.write = tracing_set_trace_write,
.llseek = generic_file_llseek,
+ .release = tracing_release_generic_tr,
};
static const struct file_operations tracing_pipe_fops = {
return cnt;
}
+static int tracing_open_options(struct inode *inode, struct file *filp)
+{
+ struct trace_option_dentry *topt = inode->i_private;
+ int ret;
+
+ ret = tracing_check_open_get_tr(topt->tr);
+ if (ret)
+ return ret;
+
+ filp->private_data = inode->i_private;
+ return 0;
+}
+
+static int tracing_release_options(struct inode *inode, struct file *file)
+{
+ struct trace_option_dentry *topt = file->private_data;
+
+ trace_array_put(topt->tr);
+ return 0;
+}
static const struct file_operations trace_options_fops = {
- .open = tracing_open_generic,
+ .open = tracing_open_options,
.read = trace_options_read,
.write = trace_options_write,
.llseek = generic_file_llseek,
+ .release = tracing_release_options,
};
/*
tr, &tracing_mark_fops);
file = __find_event_file(tr, "ftrace", "print");
- if (file && file->dir)
- trace_create_file("trigger", TRACE_MODE_WRITE, file->dir,
+ if (file && file->ef)
+ eventfs_add_file("trigger", TRACE_MODE_WRITE, file->ef,
file, &event_trigger_fops);
tr->trace_marker_file = file;
void tracing_reset_all_online_cpus_unlocked(void);
int tracing_open_generic(struct inode *inode, struct file *filp);
int tracing_open_generic_tr(struct inode *inode, struct file *filp);
+int tracing_open_file_tr(struct inode *inode, struct file *filp);
+int tracing_release_file_tr(struct inode *inode, struct file *filp);
bool tracing_is_disabled(void);
bool tracer_tracing_is_on(struct trace_array *tr);
void tracer_tracing_on(struct trace_array *tr);
static void remove_event_file_dir(struct trace_event_file *file)
{
- struct dentry *dir = file->dir;
- struct dentry *child;
-
- if (dir) {
- spin_lock(&dir->d_lock); /* probably unneeded */
- list_for_each_entry(child, &dir->d_subdirs, d_child) {
- if (d_really_is_positive(child)) /* probably unneeded */
- d_inode(child)->i_private = NULL;
- }
- spin_unlock(&dir->d_lock);
-
- tracefs_remove(dir);
- }
eventfs_remove(file->ef);
list_del(&file->list);
remove_subsystem(file->system);
};
static const struct file_operations ftrace_enable_fops = {
- .open = tracing_open_generic,
+ .open = tracing_open_file_tr,
.read = event_enable_read,
.write = event_enable_write,
+ .release = tracing_release_file_tr,
.llseek = default_llseek,
};
};
static const struct file_operations ftrace_event_filter_fops = {
- .open = tracing_open_generic,
+ .open = tracing_open_file_tr,
.read = event_filter_read,
.write = event_filter_write,
+ .release = tracing_release_file_tr,
.llseek = default_llseek,
};
{
struct event_subsystem *system, *iter;
struct trace_subsystem_dir *dir;
+ struct eventfs_file *ef;
int res;
/* First see if we did not already create this dir */
} else
__get_system(system);
- dir->ef = eventfs_add_subsystem_dir(name, parent);
- if (IS_ERR(dir->ef)) {
+ ef = eventfs_add_subsystem_dir(name, parent);
+ if (IS_ERR(ef)) {
pr_warn("Failed to create system directory %s\n", name);
__put_system(system);
goto out_free;
}
+ dir->ef = ef;
dir->tr = tr;
dir->ref_count = 1;
dir->nr_events = 1;
struct trace_event_call *call = file->event_call;
struct eventfs_file *ef_subsystem = NULL;
struct trace_array *tr = file->tr;
+ struct eventfs_file *ef;
const char *name;
int ret;
return -ENOMEM;
name = trace_event_name(call);
- file->ef = eventfs_add_dir(name, ef_subsystem);
- if (IS_ERR(file->ef)) {
+ ef = eventfs_add_dir(name, ef_subsystem);
+ if (IS_ERR(ef)) {
pr_warn("Could not create tracefs '%s' directory\n", name);
return -1;
}
+ file->ef = ef;
+
if (call->class->reg && !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
eventfs_add_file("enable", TRACE_MODE_WRITE, file->ef, file,
&ftrace_enable_fops);
update_event_fields(call, map[i]);
}
}
+ cond_resched();
}
up_write(&trace_event_sem);
}
}
const struct file_operations event_inject_fops = {
- .open = tracing_open_generic,
+ .open = tracing_open_file_tr,
.read = event_inject_read,
.write = event_inject_write,
+ .release = tracing_release_file_tr,
};
break;
default:
- trace_seq_printf(s, print_fmt, name, val, space);
+ trace_seq_printf(s, print_fmt, name, val->as_u64, space);
break;
}
}
/* Bit 7 is for freeing status of enablement */
#define ENABLE_VAL_FREEING_BIT 7
-/* Only duplicate the bit value */
-#define ENABLE_VAL_DUP_MASK ENABLE_VAL_BIT_MASK
+/* Bit 8 is for marking 32-bit on 64-bit */
+#define ENABLE_VAL_32_ON_64_BIT 8
+
+#define ENABLE_VAL_COMPAT_MASK (1 << ENABLE_VAL_32_ON_64_BIT)
+
+/* Only duplicate the bit and compat values */
+#define ENABLE_VAL_DUP_MASK (ENABLE_VAL_BIT_MASK | ENABLE_VAL_COMPAT_MASK)
#define ENABLE_BITOPS(e) (&(e)->values)
int flags;
};
+static inline void align_addr_bit(unsigned long *addr, int *bit,
+ unsigned long *flags)
+{
+ if (IS_ALIGNED(*addr, sizeof(long))) {
+#ifdef __BIG_ENDIAN
+ /* 32 bit on BE 64 bit requires a 32 bit offset when aligned. */
+ if (test_bit(ENABLE_VAL_32_ON_64_BIT, flags))
+ *bit += 32;
+#endif
+ return;
+ }
+
+ *addr = ALIGN_DOWN(*addr, sizeof(long));
+
+ /*
+ * We only support 32 and 64 bit values. The only time we need
+ * to align is a 32 bit value on a 64 bit kernel, which on LE
+ * is always 32 bits, and on BE requires no change when unaligned.
+ */
+#ifdef __LITTLE_ENDIAN
+ *bit += 32;
+#endif
+}
+
typedef void (*user_event_func_t) (struct user_event *user, struct iov_iter *i,
void *tpdata, bool *faulted);
unsigned long *ptr;
struct page *page;
void *kaddr;
+ int bit = ENABLE_BIT(enabler);
int ret;
lockdep_assert_held(&event_mutex);
test_bit(ENABLE_VAL_FREEING_BIT, ENABLE_BITOPS(enabler))))
return -EBUSY;
+ align_addr_bit(&uaddr, &bit, ENABLE_BITOPS(enabler));
+
ret = pin_user_pages_remote(mm->mm, uaddr, 1, FOLL_WRITE | FOLL_NOFAULT,
&page, NULL);
/* Update bit atomically, user tracers must be atomic as well */
if (enabler->event && enabler->event->status)
- set_bit(ENABLE_BIT(enabler), ptr);
+ set_bit(bit, ptr);
else
- clear_bit(ENABLE_BIT(enabler), ptr);
+ clear_bit(bit, ptr);
kunmap_local(kaddr);
unpin_user_pages_dirty_lock(&page, 1, true);
enabler->event = user;
enabler->addr = uaddr;
enabler->values = reg->enable_bit;
+
+#if BITS_PER_LONG >= 64
+ if (reg->enable_size == 4)
+ set_bit(ENABLE_VAL_32_ON_64_BIT, ENABLE_BITOPS(enabler));
+#endif
+
retry:
/* Prevents state changes from racing with new enablers */
mutex_lock(&event_mutex);
}
static int user_event_mm_clear_bit(struct user_event_mm *user_mm,
- unsigned long uaddr, unsigned char bit)
+ unsigned long uaddr, unsigned char bit,
+ unsigned long flags)
{
struct user_event_enabler enabler;
int result;
memset(&enabler, 0, sizeof(enabler));
enabler.addr = uaddr;
- enabler.values = bit;
+ enabler.values = bit | flags;
retry:
/* Prevents state changes from racing with new enablers */
mutex_lock(&event_mutex);
struct user_event_mm *mm = current->user_event_mm;
struct user_event_enabler *enabler, *next;
struct user_unreg reg;
+ unsigned long flags;
long ret;
ret = user_unreg_get(ureg, ®);
if (!mm)
return -ENOENT;
+ flags = 0;
ret = -ENOENT;
/*
ENABLE_BIT(enabler) == reg.disable_bit) {
set_bit(ENABLE_VAL_FREEING_BIT, ENABLE_BITOPS(enabler));
+ /* We must keep compat flags for the clear */
+ flags |= enabler->values & ENABLE_VAL_COMPAT_MASK;
+
if (!test_bit(ENABLE_VAL_FAULTING_BIT, ENABLE_BITOPS(enabler)))
user_event_enabler_destroy(enabler, true);
/* Ensure bit is now cleared for user, regardless of event status */
if (!ret)
ret = user_event_mm_clear_bit(mm, reg.disable_addr,
- reg.disable_bit);
+ reg.disable_bit, flags);
return ret;
}
BUG_ON(!zalloc_cpumask_var_node(&pt->pod_cpus[0], GFP_KERNEL, NUMA_NO_NODE));
- wq_update_pod_attrs_buf = alloc_workqueue_attrs();
- BUG_ON(!wq_update_pod_attrs_buf);
-
pt->nr_pods = 1;
cpumask_copy(pt->pod_cpus[0], cpu_possible_mask);
pt->pod_node[0] = NUMA_NO_NODE;
unsigned long thresh;
unsigned long bogo;
+ pwq_release_worker = kthread_create_worker(0, "pool_workqueue_release");
+ BUG_ON(IS_ERR(pwq_release_worker));
+
/* if the user set it to a specific value, keep it */
if (wq_cpu_intensive_thresh_us != ULONG_MAX)
return;
- pwq_release_worker = kthread_create_worker(0, "pool_workqueue_release");
- BUG_ON(IS_ERR(pwq_release_worker));
-
/*
* The default of 10ms is derived from the fact that most modern (as of
* 2023) processors can do a lot in 10ms and that it's just below what
/**
* argv_free - free an argv
- * @argv - the argument vector to be freed
+ * @argv: the argument vector to be freed
*
* Frees an argv and the strings it points to.
*/
* @str: the string to be split
* @argcp: returned argument count
*
- * Returns an array of pointers to strings which are split out from
+ * Returns: an array of pointers to strings which are split out from
* @str. This is performed by strictly splitting on white-space; no
* quote processing is performed. Multiple whitespace characters are
* considered to be a single argument separator. The returned array
};
/* Split "suite_glob.test_glob" into two. Assumes filter_glob is not empty. */
-static void kunit_parse_glob_filter(struct kunit_glob_filter *parsed,
+static int kunit_parse_glob_filter(struct kunit_glob_filter *parsed,
const char *filter_glob)
{
const int len = strlen(filter_glob);
if (!period) {
parsed->suite_glob = kzalloc(len + 1, GFP_KERNEL);
+ if (!parsed->suite_glob)
+ return -ENOMEM;
+
parsed->test_glob = NULL;
strcpy(parsed->suite_glob, filter_glob);
- return;
+ return 0;
}
parsed->suite_glob = kzalloc(period - filter_glob + 1, GFP_KERNEL);
+ if (!parsed->suite_glob)
+ return -ENOMEM;
+
parsed->test_glob = kzalloc(len - (period - filter_glob) + 1, GFP_KERNEL);
+ if (!parsed->test_glob) {
+ kfree(parsed->suite_glob);
+ return -ENOMEM;
+ }
strncpy(parsed->suite_glob, filter_glob, period - filter_glob);
strncpy(parsed->test_glob, period + 1, len - (period - filter_glob));
+
+ return 0;
}
/* Create a copy of suite with only tests that match test_glob. */
}
copy_start = copy;
- if (filter_glob)
- kunit_parse_glob_filter(&parsed_glob, filter_glob);
+ if (filter_glob) {
+ *err = kunit_parse_glob_filter(&parsed_glob, filter_glob);
+ if (*err)
+ goto free_copy;
+ }
/* Parse attribute filters */
if (filters) {
filter_count = kunit_get_filter_count(filters);
parsed_filters = kcalloc(filter_count, sizeof(*parsed_filters), GFP_KERNEL);
if (!parsed_filters) {
- kfree(copy);
- return filtered;
+ *err = -ENOMEM;
+ goto free_parsed_glob;
}
for (j = 0; j < filter_count; j++)
parsed_filters[j] = kunit_next_attr_filter(&filters, err);
if (*err)
- goto err;
+ goto free_parsed_filters;
}
for (i = 0; &suite_set->start[i] != suite_set->end; i++) {
parsed_glob.test_glob);
if (IS_ERR(filtered_suite)) {
*err = PTR_ERR(filtered_suite);
- goto err;
+ goto free_parsed_filters;
}
}
if (filter_count > 0 && parsed_filters != NULL) {
filtered_suite = new_filtered_suite;
if (*err)
- goto err;
+ goto free_parsed_filters;
+
if (IS_ERR(filtered_suite)) {
*err = PTR_ERR(filtered_suite);
- goto err;
+ goto free_parsed_filters;
}
if (!filtered_suite)
break;
filtered.start = copy_start;
filtered.end = copy;
-err:
- if (*err)
- kfree(copy);
+free_parsed_filters:
+ if (filter_count)
+ kfree(parsed_filters);
+free_parsed_glob:
if (filter_glob) {
kfree(parsed_glob.suite_glob);
kfree(parsed_glob.test_glob);
}
- if (filter_count)
- kfree(parsed_filters);
+free_copy:
+ if (*err)
+ kfree(copy);
return filtered;
}
{
int j, filter_count;
struct kunit_attr_filter *parsed_filters;
- char *filters = "speed>slow, module!=example";
+ char filters[] = "speed>slow, module!=example", *filter = filters;
int err = 0;
filter_count = kunit_get_filter_count(filters);
parsed_filters = kunit_kcalloc(test, filter_count, sizeof(*parsed_filters),
GFP_KERNEL);
for (j = 0; j < filter_count; j++) {
- parsed_filters[j] = kunit_next_attr_filter(&filters, &err);
+ parsed_filters[j] = kunit_next_attr_filter(&filter, &err);
KUNIT_ASSERT_EQ_MSG(test, err, 0, "failed to parse filter '%s'", filters[j]);
}
.start = subsuite, .end = &subsuite[2],
};
struct kunit_suite_set got;
+ char filter[] = "speed>slow";
int err = 0;
subsuite[0] = alloc_fake_suite(test, "normal_suite", dummy_attr_test_cases);
* attribute is unset and thus, the filtering is based on the parent attribute
* of slow.
*/
- got = kunit_filter_suites(&suite_set, NULL, "speed>slow", NULL, &err);
+ got = kunit_filter_suites(&suite_set, NULL, filter, NULL, &err);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start);
KUNIT_ASSERT_EQ(test, err, 0);
kfree_at_end(test, got.start);
.start = subsuite, .end = &subsuite[2],
};
struct kunit_suite_set got;
+ char filter[] = "module!=dummy";
int err = 0;
subsuite[0] = alloc_fake_suite(test, "suite1", dummy_attr_test_cases);
subsuite[1] = alloc_fake_suite(test, "suite2", dummy_attr_test_cases);
- got = kunit_filter_suites(&suite_set, NULL, "module!=dummy", NULL, &err);
+ got = kunit_filter_suites(&suite_set, NULL, filter, NULL, &err);
KUNIT_ASSERT_EQ(test, err, 0);
kfree_at_end(test, got.start); /* just in case */
.start = subsuite, .end = &subsuite[1],
};
struct kunit_suite_set got;
+ char filter[] = "speed>slow";
int err = 0;
subsuite[0] = alloc_fake_suite(test, "suite", dummy_attr_test_cases);
/* Want: suite(slow, normal), NULL -> suite(slow with SKIP, normal), NULL */
- got = kunit_filter_suites(&suite_set, NULL, "speed>slow", "skip", &err);
+ got = kunit_filter_suites(&suite_set, NULL, filter, "skip", &err);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start);
KUNIT_ASSERT_EQ(test, err, 0);
kfree_at_end(test, got.start);
switch (val) {
case MODULE_STATE_LIVE:
- kunit_module_init(mod);
break;
case MODULE_STATE_GOING:
kunit_module_exit(mod);
break;
case MODULE_STATE_COMING:
+ kunit_module_init(mod);
+ break;
case MODULE_STATE_UNFORMED:
break;
}
return xa_is_err(mas->node);
}
+static __always_inline bool mas_is_overflow(struct ma_state *mas)
+{
+ if (unlikely(mas->node == MAS_OVERFLOW))
+ return true;
+
+ return false;
+}
+
+static __always_inline bool mas_is_underflow(struct ma_state *mas)
+{
+ if (unlikely(mas->node == MAS_UNDERFLOW))
+ return true;
+
+ return false;
+}
+
static inline bool mas_searchable(struct ma_state *mas)
{
if (mas_is_none(mas))
*
* @mas: The maple state
* @max: The minimum starting range
+ * @empty: Can be empty
+ * @set_underflow: Set the @mas->node to underflow state on limit.
*
* Return: The entry in the previous slot which is possibly NULL
*/
-static void *mas_prev_slot(struct ma_state *mas, unsigned long min, bool empty)
+static void *mas_prev_slot(struct ma_state *mas, unsigned long min, bool empty,
+ bool set_underflow)
{
void *entry;
void __rcu **slots;
if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
goto retry;
-again:
if (mas->min <= min) {
pivot = mas_safe_min(mas, pivots, mas->offset);
goto retry;
if (pivot <= min)
- return NULL;
+ goto underflow;
}
+again:
if (likely(mas->offset)) {
mas->offset--;
mas->last = mas->index - 1;
}
if (mas_is_none(mas))
- return NULL;
+ goto underflow;
mas->last = mas->max;
node = mas_mn(mas);
if (likely(entry))
return entry;
- if (!empty)
+ if (!empty) {
+ if (mas->index <= min)
+ goto underflow;
+
goto again;
+ }
return entry;
+
+underflow:
+ if (set_underflow)
+ mas->node = MAS_UNDERFLOW;
+ return NULL;
}
/*
* @mas: The maple state
* @max: The maximum starting range
* @empty: Can be empty
+ * @set_overflow: Should @mas->node be set to overflow when the limit is
+ * reached.
*
* Return: The entry in the next slot which is possibly NULL
*/
-static void *mas_next_slot(struct ma_state *mas, unsigned long max, bool empty)
+static void *mas_next_slot(struct ma_state *mas, unsigned long max, bool empty,
+ bool set_overflow)
{
void __rcu **slots;
unsigned long *pivots;
if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
goto retry;
-again:
if (mas->max >= max) {
if (likely(mas->offset < data_end))
pivot = pivots[mas->offset];
else
- return NULL; /* must be mas->max */
+ goto overflow;
if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
goto retry;
if (pivot >= max)
- return NULL;
+ goto overflow;
}
if (likely(mas->offset < data_end)) {
mas->index = pivots[mas->offset] + 1;
+again:
mas->offset++;
if (likely(mas->offset < data_end))
mas->last = pivots[mas->offset];
goto retry;
}
- if (mas_is_none(mas))
+ if (WARN_ON_ONCE(mas_is_none(mas))) {
+ mas->node = MAS_OVERFLOW;
return NULL;
+ goto overflow;
+ }
mas->offset = 0;
mas->index = mas->min;
return entry;
if (!empty) {
- if (!mas->offset)
- data_end = 2;
+ if (mas->last >= max)
+ goto overflow;
+
+ mas->index = mas->last + 1;
+ /* Node cannot end on NULL, so it's safe to short-cut here */
goto again;
}
return entry;
+
+overflow:
+ if (set_overflow)
+ mas->node = MAS_OVERFLOW;
+ return NULL;
}
/*
*
* Set the @mas->node to the next entry and the range_start to
* the beginning value for the entry. Does not check beyond @limit.
- * Sets @mas->index and @mas->last to the limit if it is hit.
+ * Sets @mas->index and @mas->last to the range, Does not update @mas->index and
+ * @mas->last on overflow.
* Restarts on dead nodes.
*
* Return: the next entry or %NULL.
*/
static inline void *mas_next_entry(struct ma_state *mas, unsigned long limit)
{
- if (mas->last >= limit)
+ if (mas->last >= limit) {
+ mas->node = MAS_OVERFLOW;
return NULL;
+ }
- return mas_next_slot(mas, limit, false);
+ return mas_next_slot(mas, limit, false, true);
}
/*
{
void *entry;
- if (mas_is_none(mas) || mas_is_paused(mas) || mas_is_ptr(mas))
+ if (!mas_is_active(mas) || !mas_is_start(mas))
mas->node = MAS_START;
retry:
entry = mas_state_walk(mas);
static void mas_wr_store_setup(struct ma_wr_state *wr_mas)
{
- if (mas_is_start(wr_mas->mas))
- return;
+ if (!mas_is_active(wr_mas->mas)) {
+ if (mas_is_start(wr_mas->mas))
+ return;
- if (unlikely(mas_is_paused(wr_mas->mas)))
- goto reset;
+ if (unlikely(mas_is_paused(wr_mas->mas)))
+ goto reset;
- if (unlikely(mas_is_none(wr_mas->mas)))
- goto reset;
+ if (unlikely(mas_is_none(wr_mas->mas)))
+ goto reset;
+
+ if (unlikely(mas_is_overflow(wr_mas->mas)))
+ goto reset;
+
+ if (unlikely(mas_is_underflow(wr_mas->mas)))
+ goto reset;
+ }
/*
* A less strict version of mas_is_span_wr() where we allow spanning
{
bool was_none = mas_is_none(mas);
- if (mas_is_none(mas) || mas_is_paused(mas))
+ if (unlikely(mas->last >= max)) {
+ mas->node = MAS_OVERFLOW;
+ return true;
+ }
+
+ if (mas_is_active(mas))
+ return false;
+
+ if (mas_is_none(mas) || mas_is_paused(mas)) {
+ mas->node = MAS_START;
+ } else if (mas_is_overflow(mas)) {
+ /* Overflowed before, but the max changed */
mas->node = MAS_START;
+ } else if (mas_is_underflow(mas)) {
+ mas->node = MAS_START;
+ *entry = mas_walk(mas);
+ if (*entry)
+ return true;
+ }
if (mas_is_start(mas))
*entry = mas_walk(mas); /* Retries on dead nodes handled by mas_walk */
if (mas_is_none(mas))
return true;
+
return false;
}
return entry;
/* Retries on dead nodes handled by mas_next_slot */
- return mas_next_slot(mas, max, false);
+ return mas_next_slot(mas, max, false, true);
}
EXPORT_SYMBOL_GPL(mas_next);
return entry;
/* Retries on dead nodes handled by mas_next_slot */
- return mas_next_slot(mas, max, true);
+ return mas_next_slot(mas, max, true, true);
}
EXPORT_SYMBOL_GPL(mas_next_range);
static inline bool mas_prev_setup(struct ma_state *mas, unsigned long min,
void **entry)
{
- if (mas->index <= min)
- goto none;
+ if (unlikely(mas->index <= min)) {
+ mas->node = MAS_UNDERFLOW;
+ return true;
+ }
- if (mas_is_none(mas) || mas_is_paused(mas))
+ if (mas_is_active(mas))
+ return false;
+
+ if (mas_is_overflow(mas)) {
mas->node = MAS_START;
+ *entry = mas_walk(mas);
+ if (*entry)
+ return true;
+ }
- if (mas_is_start(mas)) {
- mas_walk(mas);
- if (!mas->index)
- goto none;
+ if (mas_is_none(mas) || mas_is_paused(mas)) {
+ mas->node = MAS_START;
+ } else if (mas_is_underflow(mas)) {
+ /* underflowed before but the min changed */
+ mas->node = MAS_START;
}
+ if (mas_is_start(mas))
+ mas_walk(mas);
+
if (unlikely(mas_is_ptr(mas))) {
if (!mas->index)
goto none;
if (mas_prev_setup(mas, min, &entry))
return entry;
- return mas_prev_slot(mas, min, false);
+ return mas_prev_slot(mas, min, false, true);
}
EXPORT_SYMBOL_GPL(mas_prev);
if (mas_prev_setup(mas, min, &entry))
return entry;
- return mas_prev_slot(mas, min, true);
+ return mas_prev_slot(mas, min, true, true);
}
EXPORT_SYMBOL_GPL(mas_prev_range);
static inline bool mas_find_setup(struct ma_state *mas, unsigned long max,
void **entry)
{
- *entry = NULL;
+ if (mas_is_active(mas)) {
+ if (mas->last < max)
+ return false;
- if (unlikely(mas_is_none(mas))) {
+ return true;
+ }
+
+ if (mas_is_paused(mas)) {
if (unlikely(mas->last >= max))
return true;
- mas->index = mas->last;
+ mas->index = ++mas->last;
mas->node = MAS_START;
- } else if (unlikely(mas_is_paused(mas))) {
+ } else if (mas_is_none(mas)) {
if (unlikely(mas->last >= max))
return true;
+ mas->index = mas->last;
mas->node = MAS_START;
- mas->index = ++mas->last;
- } else if (unlikely(mas_is_ptr(mas)))
- goto ptr_out_of_range;
+ } else if (mas_is_overflow(mas) || mas_is_underflow(mas)) {
+ if (mas->index > max) {
+ mas->node = MAS_OVERFLOW;
+ return true;
+ }
+
+ mas->node = MAS_START;
+ }
- if (unlikely(mas_is_start(mas))) {
+ if (mas_is_start(mas)) {
/* First run or continue */
if (mas->index > max)
return true;
return entry;
/* Retries on dead nodes handled by mas_next_slot */
- return mas_next_slot(mas, max, false);
+ return mas_next_slot(mas, max, false, false);
}
EXPORT_SYMBOL_GPL(mas_find);
*/
void *mas_find_range(struct ma_state *mas, unsigned long max)
{
- void *entry;
+ void *entry = NULL;
if (mas_find_setup(mas, max, &entry))
return entry;
/* Retries on dead nodes handled by mas_next_slot */
- return mas_next_slot(mas, max, true);
+ return mas_next_slot(mas, max, true, false);
}
EXPORT_SYMBOL_GPL(mas_find_range);
static inline bool mas_find_rev_setup(struct ma_state *mas, unsigned long min,
void **entry)
{
- *entry = NULL;
-
- if (unlikely(mas_is_none(mas))) {
- if (mas->index <= min)
- goto none;
+ if (mas_is_active(mas)) {
+ if (mas->index > min)
+ return false;
- mas->last = mas->index;
- mas->node = MAS_START;
+ return true;
}
- if (unlikely(mas_is_paused(mas))) {
+ if (mas_is_paused(mas)) {
if (unlikely(mas->index <= min)) {
mas->node = MAS_NONE;
return true;
}
mas->node = MAS_START;
mas->last = --mas->index;
+ } else if (mas_is_none(mas)) {
+ if (mas->index <= min)
+ goto none;
+
+ mas->last = mas->index;
+ mas->node = MAS_START;
+ } else if (mas_is_underflow(mas) || mas_is_overflow(mas)) {
+ if (mas->last <= min) {
+ mas->node = MAS_UNDERFLOW;
+ return true;
+ }
+
+ mas->node = MAS_START;
}
- if (unlikely(mas_is_start(mas))) {
+ if (mas_is_start(mas)) {
/* First run or continue */
if (mas->index < min)
return true;
*/
void *mas_find_rev(struct ma_state *mas, unsigned long min)
{
- void *entry;
+ void *entry = NULL;
if (mas_find_rev_setup(mas, min, &entry))
return entry;
/* Retries on dead nodes handled by mas_prev_slot */
- return mas_prev_slot(mas, min, false);
+ return mas_prev_slot(mas, min, false, false);
}
EXPORT_SYMBOL_GPL(mas_find_rev);
*/
void *mas_find_range_rev(struct ma_state *mas, unsigned long min)
{
- void *entry;
+ void *entry = NULL;
if (mas_find_rev_setup(mas, min, &entry))
return entry;
/* Retries on dead nodes handled by mas_prev_slot */
- return mas_prev_slot(mas, min, true);
+ return mas_prev_slot(mas, min, true, false);
}
EXPORT_SYMBOL_GPL(mas_find_range_rev);
* @table: The sg table header to use
* @nents: Number of entries in sg list
* @max_ents: The maximum number of entries the allocator returns per call
- * @nents_first_chunk: Number of entries int the (preallocated) first
+ * @first_chunk: first SGL if preallocated (may be %NULL)
+ * @nents_first_chunk: Number of entries in the (preallocated) first
* scatterlist chunk, 0 means no such preallocated chunk provided by user
* @gfp_mask: GFP allocation mask
* @alloc_fn: Allocator to use
* @miter: sg mapping iter to be started
* @sgl: sg list to iterate over
* @nents: number of sg entries
+ * @flags: sg iterator flags
*
* Description:
* Starts mapping iterator @miter.
MT_BUG_ON(mt, val != NULL);
MT_BUG_ON(mt, mas.index != 0);
MT_BUG_ON(mt, mas.last != 5);
- MT_BUG_ON(mt, mas.node != MAS_NONE);
+ MT_BUG_ON(mt, mas.node != MAS_UNDERFLOW);
mas.index = 0;
mas.last = 5;
* exists MAS_NONE active range
* exists active active range
* DNE active active set to last range
+ * ERANGE active MAS_OVERFLOW last range
*
* Function ENTRY Start Result index & last
* mas_prev()
* any MAS_ROOT MAS_NONE 0
* exists active active range
* DNE active active last range
+ * ERANGE active MAS_UNDERFLOW last range
*
* Function ENTRY Start Result index & last
* mas_find()
* DNE MAS_START MAS_NONE 0
* DNE MAS_PAUSE MAS_NONE 0
* DNE MAS_ROOT MAS_NONE 0
- * DNE MAS_NONE MAS_NONE 0
+ * DNE MAS_NONE MAS_NONE 1
* if index == 0
* exists MAS_START MAS_ROOT 0
* exists MAS_PAUSE MAS_ROOT 0
* DNE MAS_START active set to max
* exists MAS_PAUSE active range
* DNE MAS_PAUSE active set to max
- * exists MAS_NONE active range
+ * exists MAS_NONE active range (start at last)
* exists active active range
* DNE active active last range (max < last)
*
* DNE MAS_START active set to min
* exists MAS_PAUSE active range
* DNE MAS_PAUSE active set to min
- * exists MAS_NONE active range
+ * exists MAS_NONE active range (start at index)
* exists active active range
* DNE active active last range (min > index)
*
mtree_store_range(mt, 0, 0, ptr, GFP_KERNEL);
mas_lock(&mas);
- /* prev: Start -> none */
+ /* prev: Start -> underflow*/
entry = mas_prev(&mas, 0);
MT_BUG_ON(mt, entry != NULL);
- MT_BUG_ON(mt, mas.node != MAS_NONE);
+ MT_BUG_ON(mt, mas.node != MAS_UNDERFLOW);
/* prev: Start -> root */
mas_set(&mas, 10);
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.node != MAS_NONE);
- /* next: start -> none */
+ /* next: start -> none*/
mas_set(&mas, 10);
entry = mas_next(&mas, ULONG_MAX);
MT_BUG_ON(mt, mas.index != 1);
MT_BUG_ON(mt, mas.last != 0x2500);
MT_BUG_ON(mt, !mas_active(mas));
- /* next:active -> active out of range*/
+ /* next:active -> active beyond data */
entry = mas_next(&mas, 0x2999);
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 0x2501);
MT_BUG_ON(mt, mas.last != 0x2fff);
MT_BUG_ON(mt, !mas_active(mas));
- /* Continue after out of range*/
+ /* Continue after last range ends after max */
entry = mas_next(&mas, ULONG_MAX);
MT_BUG_ON(mt, entry != ptr3);
MT_BUG_ON(mt, mas.index != 0x3000);
MT_BUG_ON(mt, mas.last != 0x3500);
MT_BUG_ON(mt, !mas_active(mas));
- /* next:active -> active out of range*/
+ /* next:active -> active continued */
+ entry = mas_next(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, entry != NULL);
+ MT_BUG_ON(mt, mas.index != 0x3501);
+ MT_BUG_ON(mt, mas.last != ULONG_MAX);
+ MT_BUG_ON(mt, !mas_active(mas));
+
+ /* next:active -> overflow */
entry = mas_next(&mas, ULONG_MAX);
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 0x3501);
MT_BUG_ON(mt, mas.last != ULONG_MAX);
+ MT_BUG_ON(mt, mas.node != MAS_OVERFLOW);
+
+ /* next:overflow -> overflow */
+ entry = mas_next(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, entry != NULL);
+ MT_BUG_ON(mt, mas.index != 0x3501);
+ MT_BUG_ON(mt, mas.last != ULONG_MAX);
+ MT_BUG_ON(mt, mas.node != MAS_OVERFLOW);
+
+ /* prev:overflow -> active */
+ entry = mas_prev(&mas, 0);
+ MT_BUG_ON(mt, entry != ptr3);
+ MT_BUG_ON(mt, mas.index != 0x3000);
+ MT_BUG_ON(mt, mas.last != 0x3500);
MT_BUG_ON(mt, !mas_active(mas));
/* next: none -> active, skip value at location */
MT_BUG_ON(mt, mas.last != 0x1500);
MT_BUG_ON(mt, !mas_active(mas));
- /* prev:active -> active out of range*/
+ /* prev:active -> active spanning end range */
+ entry = mas_prev(&mas, 0x0100);
+ MT_BUG_ON(mt, entry != NULL);
+ MT_BUG_ON(mt, mas.index != 0);
+ MT_BUG_ON(mt, mas.last != 0x0FFF);
+ MT_BUG_ON(mt, !mas_active(mas));
+
+ /* prev:active -> underflow */
+ entry = mas_prev(&mas, 0);
+ MT_BUG_ON(mt, entry != NULL);
+ MT_BUG_ON(mt, mas.index != 0);
+ MT_BUG_ON(mt, mas.last != 0x0FFF);
+ MT_BUG_ON(mt, mas.node != MAS_UNDERFLOW);
+
+ /* prev:underflow -> underflow */
entry = mas_prev(&mas, 0);
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 0);
MT_BUG_ON(mt, mas.last != 0x0FFF);
+ MT_BUG_ON(mt, mas.node != MAS_UNDERFLOW);
+
+ /* next:underflow -> active */
+ entry = mas_next(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, entry != ptr);
+ MT_BUG_ON(mt, mas.index != 0x1000);
+ MT_BUG_ON(mt, mas.last != 0x1500);
+ MT_BUG_ON(mt, !mas_active(mas));
+
+ /* prev:first value -> underflow */
+ entry = mas_prev(&mas, 0x1000);
+ MT_BUG_ON(mt, entry != NULL);
+ MT_BUG_ON(mt, mas.index != 0x1000);
+ MT_BUG_ON(mt, mas.last != 0x1500);
+ MT_BUG_ON(mt, mas.node != MAS_UNDERFLOW);
+
+ /* find:underflow -> first value */
+ entry = mas_find(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, entry != ptr);
+ MT_BUG_ON(mt, mas.index != 0x1000);
+ MT_BUG_ON(mt, mas.last != 0x1500);
MT_BUG_ON(mt, !mas_active(mas));
/* prev: pause ->active */
MT_BUG_ON(mt, mas.last != 0x2500);
MT_BUG_ON(mt, !mas_active(mas));
- /* prev:active -> active out of range*/
+ /* prev:active -> active spanning min */
entry = mas_prev(&mas, 0x1600);
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 0x1501);
MT_BUG_ON(mt, mas.last != 0x1FFF);
MT_BUG_ON(mt, !mas_active(mas));
- /* prev: active ->active, continue*/
+ /* prev: active ->active, continue */
entry = mas_prev(&mas, 0);
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0x1000);
MT_BUG_ON(mt, mas.last != 0x2FFF);
MT_BUG_ON(mt, !mas_active(mas));
- /* find: none ->active */
+ /* find: overflow ->active */
entry = mas_find(&mas, 0x5000);
MT_BUG_ON(mt, entry != ptr3);
MT_BUG_ON(mt, mas.index != 0x3000);
check_empty_area_fill(&tree);
mtree_destroy(&tree);
-
mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
check_state_handling(&tree);
mtree_destroy(&tree);
KUNIT_EXPECT_EQ(test, r->ar.start, expected[i * 2]);
KUNIT_EXPECT_EQ(test, r->ar.end, expected[i * 2 + 1]);
}
+
+ damon_destroy_target(t);
}
/*
bool referenced = false;
pte_t entry = huge_ptep_get(pte);
struct folio *folio = pfn_folio(pte_pfn(entry));
+ unsigned long psize = huge_page_size(hstate_vma(vma));
folio_get(folio);
if (pte_young(entry)) {
referenced = true;
entry = pte_mkold(entry);
- set_huge_pte_at(mm, addr, pte, entry);
+ set_huge_pte_at(mm, addr, pte, entry, psize);
}
#ifdef CONFIG_MMU_NOTIFIER
*/
static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf,
struct folio *folio, unsigned long start,
- unsigned long addr, unsigned int nr_pages)
+ unsigned long addr, unsigned int nr_pages,
+ unsigned int *mmap_miss)
{
vm_fault_t ret = 0;
- struct vm_area_struct *vma = vmf->vma;
- struct file *file = vma->vm_file;
struct page *page = folio_page(folio, start);
- unsigned int mmap_miss = READ_ONCE(file->f_ra.mmap_miss);
unsigned int count = 0;
pte_t *old_ptep = vmf->pte;
if (PageHWPoison(page + count))
goto skip;
- if (mmap_miss > 0)
- mmap_miss--;
+ (*mmap_miss)++;
/*
* NOTE: If there're PTE markers, we'll leave them to be
if (count) {
set_pte_range(vmf, folio, page, count, addr);
folio_ref_add(folio, count);
- if (in_range(vmf->address, addr, count))
+ if (in_range(vmf->address, addr, count * PAGE_SIZE))
ret = VM_FAULT_NOPAGE;
}
if (count) {
set_pte_range(vmf, folio, page, count, addr);
folio_ref_add(folio, count);
- if (in_range(vmf->address, addr, count))
+ if (in_range(vmf->address, addr, count * PAGE_SIZE))
ret = VM_FAULT_NOPAGE;
}
vmf->pte = old_ptep;
- WRITE_ONCE(file->f_ra.mmap_miss, mmap_miss);
+
+ return ret;
+}
+
+static vm_fault_t filemap_map_order0_folio(struct vm_fault *vmf,
+ struct folio *folio, unsigned long addr,
+ unsigned int *mmap_miss)
+{
+ vm_fault_t ret = 0;
+ struct page *page = &folio->page;
+
+ if (PageHWPoison(page))
+ return ret;
+
+ (*mmap_miss)++;
+
+ /*
+ * NOTE: If there're PTE markers, we'll leave them to be
+ * handled in the specific fault path, and it'll prohibit
+ * the fault-around logic.
+ */
+ if (!pte_none(ptep_get(vmf->pte)))
+ return ret;
+
+ if (vmf->address == addr)
+ ret = VM_FAULT_NOPAGE;
+
+ set_pte_range(vmf, folio, page, 1, addr);
+ folio_ref_inc(folio);
return ret;
}
XA_STATE(xas, &mapping->i_pages, start_pgoff);
struct folio *folio;
vm_fault_t ret = 0;
- int nr_pages = 0;
+ unsigned int nr_pages = 0, mmap_miss = 0, mmap_miss_saved;
rcu_read_lock();
folio = next_uptodate_folio(&xas, mapping, end_pgoff);
end = folio->index + folio_nr_pages(folio) - 1;
nr_pages = min(end, end_pgoff) - xas.xa_index + 1;
- /*
- * NOTE: If there're PTE markers, we'll leave them to be
- * handled in the specific fault path, and it'll prohibit the
- * fault-around logic.
- */
- if (!pte_none(ptep_get(vmf->pte)))
- goto unlock;
-
- ret |= filemap_map_folio_range(vmf, folio,
- xas.xa_index - folio->index, addr, nr_pages);
+ if (!folio_test_large(folio))
+ ret |= filemap_map_order0_folio(vmf,
+ folio, addr, &mmap_miss);
+ else
+ ret |= filemap_map_folio_range(vmf, folio,
+ xas.xa_index - folio->index, addr,
+ nr_pages, &mmap_miss);
-unlock:
folio_unlock(folio);
folio_put(folio);
- folio = next_uptodate_folio(&xas, mapping, end_pgoff);
- } while (folio);
+ } while ((folio = next_uptodate_folio(&xas, mapping, end_pgoff)) != NULL);
pte_unmap_unlock(vmf->pte, vmf->ptl);
out:
rcu_read_unlock();
+
+ mmap_miss_saved = READ_ONCE(file->f_ra.mmap_miss);
+ if (mmap_miss >= mmap_miss_saved)
+ WRITE_ONCE(file->f_ra.mmap_miss, 0);
+ else
+ WRITE_ONCE(file->f_ra.mmap_miss, mmap_miss_saved - mmap_miss);
+
return ret;
}
EXPORT_SYMBOL(filemap_map_pages);
static void
hugetlb_install_folio(struct vm_area_struct *vma, pte_t *ptep, unsigned long addr,
- struct folio *new_folio, pte_t old)
+ struct folio *new_folio, pte_t old, unsigned long sz)
{
pte_t newpte = make_huge_pte(vma, &new_folio->page, 1);
hugepage_add_new_anon_rmap(new_folio, vma, addr);
if (userfaultfd_wp(vma) && huge_pte_uffd_wp(old))
newpte = huge_pte_mkuffd_wp(newpte);
- set_huge_pte_at(vma->vm_mm, addr, ptep, newpte);
+ set_huge_pte_at(vma->vm_mm, addr, ptep, newpte, sz);
hugetlb_count_add(pages_per_huge_page(hstate_vma(vma)), vma->vm_mm);
folio_set_hugetlb_migratable(new_folio);
}
} else if (unlikely(is_hugetlb_entry_hwpoisoned(entry))) {
if (!userfaultfd_wp(dst_vma))
entry = huge_pte_clear_uffd_wp(entry);
- set_huge_pte_at(dst, addr, dst_pte, entry);
+ set_huge_pte_at(dst, addr, dst_pte, entry, sz);
} else if (unlikely(is_hugetlb_entry_migration(entry))) {
swp_entry_t swp_entry = pte_to_swp_entry(entry);
bool uffd_wp = pte_swp_uffd_wp(entry);
entry = swp_entry_to_pte(swp_entry);
if (userfaultfd_wp(src_vma) && uffd_wp)
entry = pte_swp_mkuffd_wp(entry);
- set_huge_pte_at(src, addr, src_pte, entry);
+ set_huge_pte_at(src, addr, src_pte, entry, sz);
}
if (!userfaultfd_wp(dst_vma))
entry = huge_pte_clear_uffd_wp(entry);
- set_huge_pte_at(dst, addr, dst_pte, entry);
+ set_huge_pte_at(dst, addr, dst_pte, entry, sz);
} else if (unlikely(is_pte_marker(entry))) {
pte_marker marker = copy_pte_marker(
pte_to_swp_entry(entry), dst_vma);
if (marker)
set_huge_pte_at(dst, addr, dst_pte,
- make_pte_marker(marker));
+ make_pte_marker(marker), sz);
} else {
entry = huge_ptep_get(src_pte);
pte_folio = page_folio(pte_page(entry));
goto again;
}
hugetlb_install_folio(dst_vma, dst_pte, addr,
- new_folio, src_pte_old);
+ new_folio, src_pte_old, sz);
spin_unlock(src_ptl);
spin_unlock(dst_ptl);
continue;
if (!userfaultfd_wp(dst_vma))
entry = huge_pte_clear_uffd_wp(entry);
- set_huge_pte_at(dst, addr, dst_pte, entry);
+ set_huge_pte_at(dst, addr, dst_pte, entry, sz);
hugetlb_count_add(npages, dst);
}
spin_unlock(src_ptl);
}
static void move_huge_pte(struct vm_area_struct *vma, unsigned long old_addr,
- unsigned long new_addr, pte_t *src_pte, pte_t *dst_pte)
+ unsigned long new_addr, pte_t *src_pte, pte_t *dst_pte,
+ unsigned long sz)
{
struct hstate *h = hstate_vma(vma);
struct mm_struct *mm = vma->vm_mm;
spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
pte = huge_ptep_get_and_clear(mm, old_addr, src_pte);
- set_huge_pte_at(mm, new_addr, dst_pte, pte);
+ set_huge_pte_at(mm, new_addr, dst_pte, pte, sz);
if (src_ptl != dst_ptl)
spin_unlock(src_ptl);
if (!dst_pte)
break;
- move_huge_pte(vma, old_addr, new_addr, src_pte, dst_pte);
+ move_huge_pte(vma, old_addr, new_addr, src_pte, dst_pte, sz);
}
if (shared_pmd)
if (pte_swp_uffd_wp_any(pte) &&
!(zap_flags & ZAP_FLAG_DROP_MARKER))
set_huge_pte_at(mm, address, ptep,
- make_pte_marker(PTE_MARKER_UFFD_WP));
+ make_pte_marker(PTE_MARKER_UFFD_WP),
+ sz);
else
huge_pte_clear(mm, address, ptep, sz);
spin_unlock(ptl);
if (huge_pte_uffd_wp(pte) &&
!(zap_flags & ZAP_FLAG_DROP_MARKER))
set_huge_pte_at(mm, address, ptep,
- make_pte_marker(PTE_MARKER_UFFD_WP));
+ make_pte_marker(PTE_MARKER_UFFD_WP),
+ sz);
hugetlb_count_sub(pages_per_huge_page(h), mm);
page_remove_rmap(page, vma, true);
hugepage_add_new_anon_rmap(new_folio, vma, haddr);
if (huge_pte_uffd_wp(pte))
newpte = huge_pte_mkuffd_wp(newpte);
- set_huge_pte_at(mm, haddr, ptep, newpte);
+ set_huge_pte_at(mm, haddr, ptep, newpte, huge_page_size(h));
folio_set_hugetlb_migratable(new_folio);
/* Make the old page be freed below */
new_folio = old_folio;
*/
if (unlikely(pte_marker_uffd_wp(old_pte)))
new_pte = huge_pte_mkuffd_wp(new_pte);
- set_huge_pte_at(mm, haddr, ptep, new_pte);
+ set_huge_pte_at(mm, haddr, ptep, new_pte, huge_page_size(h));
hugetlb_count_add(pages_per_huge_page(h), mm);
if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
}
_dst_pte = make_pte_marker(PTE_MARKER_POISONED);
- set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
+ set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte,
+ huge_page_size(h));
/* No need to invalidate - it was non-present before */
update_mmu_cache(dst_vma, dst_addr, dst_pte);
if (wp_enabled)
_dst_pte = huge_pte_mkuffd_wp(_dst_pte);
- set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
+ set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte, huge_page_size(h));
hugetlb_count_add(pages_per_huge_page(h), dst_mm);
else if (uffd_wp_resolve)
newpte = pte_swp_clear_uffd_wp(newpte);
if (!pte_same(pte, newpte))
- set_huge_pte_at(mm, address, ptep, newpte);
+ set_huge_pte_at(mm, address, ptep, newpte, psize);
} else if (unlikely(is_pte_marker(pte))) {
/* No other markers apply for now. */
WARN_ON_ONCE(!pte_marker_uffd_wp(pte));
if (unlikely(uffd_wp))
/* Safe to modify directly (none->non-present). */
set_huge_pte_at(mm, address, ptep,
- make_pte_marker(PTE_MARKER_UFFD_WP));
+ make_pte_marker(PTE_MARKER_UFFD_WP),
+ psize);
}
spin_unlock(ptl);
}
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
-#ifndef __HAVE_ARCH_SHADOW_MAP
+#ifndef kasan_shadow_to_mem
static inline const void *kasan_shadow_to_mem(const void *shadow_addr)
{
return (void *)(((unsigned long)shadow_addr - KASAN_SHADOW_OFFSET)
}
#endif
+#ifndef addr_has_metadata
static __always_inline bool addr_has_metadata(const void *addr)
{
-#ifdef __HAVE_ARCH_SHADOW_MAP
- return (kasan_mem_to_shadow((void *)addr) != NULL);
-#else
return (kasan_reset_tag(addr) >=
kasan_shadow_to_mem((void *)KASAN_SHADOW_START));
-#endif
}
+#endif
/**
* kasan_check_range - Check memory region, and report if invalid access.
* Scheduled by try_charge() to be executed from the userland return path
* and reclaims memory over the high limit.
*/
-void mem_cgroup_handle_over_high(void)
+void mem_cgroup_handle_over_high(gfp_t gfp_mask)
{
unsigned long penalty_jiffies;
unsigned long pflags;
*/
nr_reclaimed = reclaim_high(memcg,
in_retry ? SWAP_CLUSTER_MAX : nr_pages,
- GFP_KERNEL);
+ gfp_mask);
/*
* memory.high is breached and reclaim is unable to keep up. Throttle
if (current->memcg_nr_pages_over_high > MEMCG_CHARGE_BATCH &&
!(current->flags & PF_MEMALLOC) &&
gfpflags_allow_blocking(gfp_mask)) {
- mem_cgroup_handle_over_high();
+ mem_cgroup_handle_over_high(gfp_mask);
}
return 0;
}
case _MEMSWAP:
ret = mem_cgroup_resize_max(memcg, nr_pages, true);
break;
+ case _KMEM:
+ pr_warn_once("kmem.limit_in_bytes is deprecated and will be removed. "
+ "Writing any value to this file has no effect. "
+ "Please report your usecase to linux-mm@kvack.org if you "
+ "depend on this functionality.\n");
+ ret = 0;
+ break;
case _TCP:
ret = memcg_update_tcp_max(memcg, nr_pages);
break;
},
#endif
{
+ .name = "kmem.limit_in_bytes",
+ .private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT),
+ .write = mem_cgroup_write,
+ .read_u64 = mem_cgroup_read_u64,
+ },
+ {
.name = "kmem.usage_in_bytes",
.private = MEMFILE_PRIVATE(_KMEM, RES_USAGE),
.read_u64 = mem_cgroup_read_u64,
unsigned long start;
unsigned long end;
struct vm_area_struct *first;
+ bool has_unmovable;
};
/*
/*
* queue_folios_pmd() has three possible return values:
* 0 - folios are placed on the right node or queued successfully, or
- * special page is met, i.e. huge zero page.
- * 1 - there is unmovable folio, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
- * specified.
+ * special page is met, i.e. zero page, or unmovable page is found
+ * but continue walking (indicated by queue_pages.has_unmovable).
* -EIO - is migration entry or only MPOL_MF_STRICT was specified and an
* existing folio was already on a node that does not follow the
* policy.
if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
if (!vma_migratable(walk->vma) ||
migrate_folio_add(folio, qp->pagelist, flags)) {
- ret = 1;
+ qp->has_unmovable = true;
goto unlock;
}
} else
*
* queue_folios_pte_range() has three possible return values:
* 0 - folios are placed on the right node or queued successfully, or
- * special page is met, i.e. zero page.
- * 1 - there is unmovable folio, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
- * specified.
+ * special page is met, i.e. zero page, or unmovable page is found
+ * but continue walking (indicated by queue_pages.has_unmovable).
* -EIO - only MPOL_MF_STRICT was specified and an existing folio was already
* on a node that does not follow the policy.
*/
struct folio *folio;
struct queue_pages *qp = walk->private;
unsigned long flags = qp->flags;
- bool has_unmovable = false;
pte_t *pte, *mapped_pte;
pte_t ptent;
spinlock_t *ptl;
if (!queue_folio_required(folio, qp))
continue;
if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
- /* MPOL_MF_STRICT must be specified if we get here */
- if (!vma_migratable(vma)) {
- has_unmovable = true;
- break;
- }
+ /*
+ * MPOL_MF_STRICT must be specified if we get here.
+ * Continue walking vmas due to MPOL_MF_MOVE* flags.
+ */
+ if (!vma_migratable(vma))
+ qp->has_unmovable = true;
/*
* Do not abort immediately since there may be
* need migrate other LRU pages.
*/
if (migrate_folio_add(folio, qp->pagelist, flags))
- has_unmovable = true;
+ qp->has_unmovable = true;
} else
break;
}
pte_unmap_unlock(mapped_pte, ptl);
cond_resched();
- if (has_unmovable)
- return 1;
-
return addr != end ? -EIO : 0;
}
* Detecting misplaced folio but allow migrating folios which
* have been queued.
*/
- ret = 1;
+ qp->has_unmovable = true;
goto unlock;
}
* Failed to isolate folio but allow migrating pages
* which have been queued.
*/
- ret = 1;
+ qp->has_unmovable = true;
}
unlock:
spin_unlock(ptl);
.start = start,
.end = end,
.first = NULL,
+ .has_unmovable = false,
};
const struct mm_walk_ops *ops = lock_vma ?
&queue_pages_lock_vma_walk_ops : &queue_pages_walk_ops;
err = walk_page_range(mm, start, end, ops, &qp);
+ if (qp.has_unmovable)
+ err = 1;
if (!qp.first)
/* whole range in hole */
err = -EFAULT;
putback_movable_pages(&pagelist);
}
- if ((ret > 0) || (nr_failed && (flags & MPOL_MF_STRICT)))
+ if (((ret > 0) || nr_failed) && (flags & MPOL_MF_STRICT))
err = -EIO;
} else {
up_out:
#ifdef CONFIG_HUGETLB_PAGE
if (folio_test_hugetlb(folio)) {
- unsigned int shift = huge_page_shift(hstate_vma(vma));
+ struct hstate *h = hstate_vma(vma);
+ unsigned int shift = huge_page_shift(h);
+ unsigned long psize = huge_page_size(h);
pte = arch_make_huge_pte(pte, shift, vma->vm_flags);
if (folio_test_anon(folio))
rmap_flags);
else
page_dup_file_rmap(new, true);
- set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
+ set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte,
+ psize);
} else
#endif
{
}
vma_iter_init(&vmi, mm, old_addr);
- if (!do_vmi_munmap(&vmi, mm, old_addr, old_len, uf_unmap, false)) {
+ if (do_vmi_munmap(&vmi, mm, old_addr, old_len, uf_unmap, false) < 0) {
/* OOM: unable to split vma, just get accounts right */
if (vm_flags & VM_ACCOUNT && !(flags & MREMAP_DONTUNMAP))
vm_acct_memory(old_len >> PAGE_SHIFT);
struct per_cpu_pages *pcp;
struct zone *zone;
unsigned long pfn = page_to_pfn(page);
- int migratetype;
+ int migratetype, pcpmigratetype;
if (!free_unref_page_prepare(page, pfn, order))
return;
/*
* We only track unmovable, reclaimable and movable on pcp lists.
* Place ISOLATE pages on the isolated list because they are being
- * offlined but treat HIGHATOMIC as movable pages so we can get those
- * areas back if necessary. Otherwise, we may have to free
+ * offlined but treat HIGHATOMIC and CMA as movable pages so we can
+ * get those areas back if necessary. Otherwise, we may have to free
* excessively into the page allocator
*/
- migratetype = get_pcppage_migratetype(page);
+ migratetype = pcpmigratetype = get_pcppage_migratetype(page);
if (unlikely(migratetype >= MIGRATE_PCPTYPES)) {
if (unlikely(is_migrate_isolate(migratetype))) {
free_one_page(page_zone(page), page, pfn, order, migratetype, FPI_NONE);
return;
}
- migratetype = MIGRATE_MOVABLE;
+ pcpmigratetype = MIGRATE_MOVABLE;
}
zone = page_zone(page);
pcp_trylock_prepare(UP_flags);
pcp = pcp_spin_trylock(zone->per_cpu_pageset);
if (pcp) {
- free_unref_page_commit(zone, pcp, page, migratetype, order);
+ free_unref_page_commit(zone, pcp, page, pcpmigratetype, order);
pcp_spin_unlock(pcp);
} else {
free_one_page(zone, page, pfn, order, migratetype, FPI_NONE);
struct mmu_notifier_range range;
enum ttu_flags flags = (enum ttu_flags)(long)arg;
unsigned long pfn;
+ unsigned long hsz = 0;
/*
* When racing against e.g. zap_pte_range() on another cpu,
*/
adjust_range_if_pmd_sharing_possible(vma, &range.start,
&range.end);
+
+ /* We need the huge page size for set_huge_pte_at() */
+ hsz = huge_page_size(hstate_vma(vma));
}
mmu_notifier_invalidate_range_start(&range);
pteval = swp_entry_to_pte(make_hwpoison_entry(subpage));
if (folio_test_hugetlb(folio)) {
hugetlb_count_sub(folio_nr_pages(folio), mm);
- set_huge_pte_at(mm, address, pvmw.pte, pteval);
+ set_huge_pte_at(mm, address, pvmw.pte, pteval,
+ hsz);
} else {
dec_mm_counter(mm, mm_counter(&folio->page));
set_pte_at(mm, address, pvmw.pte, pteval);
struct mmu_notifier_range range;
enum ttu_flags flags = (enum ttu_flags)(long)arg;
unsigned long pfn;
+ unsigned long hsz = 0;
/*
* When racing against e.g. zap_pte_range() on another cpu,
*/
adjust_range_if_pmd_sharing_possible(vma, &range.start,
&range.end);
+
+ /* We need the huge page size for set_huge_pte_at() */
+ hsz = huge_page_size(hstate_vma(vma));
}
mmu_notifier_invalidate_range_start(&range);
pteval = swp_entry_to_pte(make_hwpoison_entry(subpage));
if (folio_test_hugetlb(folio)) {
hugetlb_count_sub(folio_nr_pages(folio), mm);
- set_huge_pte_at(mm, address, pvmw.pte, pteval);
+ set_huge_pte_at(mm, address, pvmw.pte, pteval,
+ hsz);
} else {
dec_mm_counter(mm, mm_counter(&folio->page));
set_pte_at(mm, address, pvmw.pte, pteval);
if (arch_unmap_one(mm, vma, address, pteval) < 0) {
if (folio_test_hugetlb(folio))
- set_huge_pte_at(mm, address, pvmw.pte, pteval);
+ set_huge_pte_at(mm, address, pvmw.pte,
+ pteval, hsz);
else
set_pte_at(mm, address, pvmw.pte, pteval);
ret = false;
if (anon_exclusive &&
page_try_share_anon_rmap(subpage)) {
if (folio_test_hugetlb(folio))
- set_huge_pte_at(mm, address, pvmw.pte, pteval);
+ set_huge_pte_at(mm, address, pvmw.pte,
+ pteval, hsz);
else
set_pte_at(mm, address, pvmw.pte, pteval);
ret = false;
if (pte_uffd_wp(pteval))
swp_pte = pte_swp_mkuffd_wp(swp_pte);
if (folio_test_hugetlb(folio))
- set_huge_pte_at(mm, address, pvmw.pte, swp_pte);
+ set_huge_pte_at(mm, address, pvmw.pte, swp_pte,
+ hsz);
else
set_pte_at(mm, address, pvmw.pte, swp_pte);
trace_set_migration_pte(address, pte_val(swp_pte),
#endif
.kill_sb = kill_litter_super,
#ifdef CONFIG_SHMEM
- .fs_flags = FS_USERNS_MOUNT | FS_ALLOW_IDMAP | FS_MGTIME,
+ .fs_flags = FS_USERNS_MOUNT | FS_ALLOW_IDMAP,
#else
.fs_flags = FS_USERNS_MOUNT,
#endif
void kmem_cache_destroy(struct kmem_cache *s)
{
- int refcnt;
+ int err = -EBUSY;
bool rcu_set;
if (unlikely(!s) || !kasan_check_byte(s))
rcu_set = s->flags & SLAB_TYPESAFE_BY_RCU;
- refcnt = --s->refcount;
- if (refcnt)
+ s->refcount--;
+ if (s->refcount)
goto out_unlock;
- WARN(shutdown_cache(s),
- "%s %s: Slab cache still has objects when called from %pS",
+ err = shutdown_cache(s);
+ WARN(err, "%s %s: Slab cache still has objects when called from %pS",
__func__, s->name, (void *)_RET_IP_);
out_unlock:
mutex_unlock(&slab_mutex);
cpus_read_unlock();
- if (!refcnt && !rcu_set)
+ if (!err && !rcu_set)
kmem_cache_release(s);
}
EXPORT_SYMBOL(kmem_cache_destroy);
size_t kmalloc_size_roundup(size_t size)
{
- struct kmem_cache *c;
+ if (size && size <= KMALLOC_MAX_CACHE_SIZE) {
+ /*
+ * The flags don't matter since size_index is common to all.
+ * Neither does the caller for just getting ->object_size.
+ */
+ return kmalloc_slab(size, GFP_KERNEL, 0)->object_size;
+ }
- /* Short-circuit the 0 size case. */
- if (unlikely(size == 0))
- return 0;
- /* Short-circuit saturated "too-large" case. */
- if (unlikely(size == SIZE_MAX))
- return SIZE_MAX;
/* Above the smaller buckets, size is a multiple of page size. */
- if (size > KMALLOC_MAX_CACHE_SIZE)
+ if (size && size <= KMALLOC_MAX_SIZE)
return PAGE_SIZE << get_order(size);
/*
- * The flags don't matter since size_index is common to all.
- * Neither does the caller for just getting ->object_size.
+ * Return 'size' for 0 - kmalloc() returns ZERO_SIZE_PTR
+ * and very large size - kmalloc() may fail.
*/
- c = kmalloc_slab(size, GFP_KERNEL, 0);
- return c ? c->object_size : 0;
+ return size;
+
}
EXPORT_SYMBOL(kmalloc_size_roundup);
pte_t entry = pfn_pte(pfn, prot);
entry = arch_make_huge_pte(entry, ilog2(size), 0);
- set_huge_pte_at(&init_mm, addr, pte, entry);
+ set_huge_pte_at(&init_mm, addr, pte, entry, size);
pfn += PFN_DOWN(size);
continue;
}
return false;
/*
+ * If this is a duplicate, it must be removed before attempting to store
+ * it, otherwise, if the store fails the old page won't be removed from
+ * the tree, and it might be written back overriding the new data.
+ */
+ spin_lock(&tree->lock);
+ dupentry = zswap_rb_search(&tree->rbroot, offset);
+ if (dupentry) {
+ zswap_duplicate_entry++;
+ zswap_invalidate_entry(tree, dupentry);
+ }
+ spin_unlock(&tree->lock);
+
+ /*
* XXX: zswap reclaim does not work with cgroups yet. Without a
* cgroup-aware entry LRU, we will push out entries system-wide based on
* local cgroup limits.
/* map */
spin_lock(&tree->lock);
+ /*
+ * A duplicate entry should have been removed at the beginning of this
+ * function. Since the swap entry should be pinned, if a duplicate is
+ * found again here it means that something went wrong in the swap
+ * cache.
+ */
while (zswap_rb_insert(&tree->rbroot, entry, &dupentry) == -EEXIST) {
+ WARN_ON(1);
zswap_duplicate_entry++;
zswap_invalidate_entry(tree, dupentry);
}
If you want to connect your Linux box to an amateur radio, answer Y
here. You want to read <https://www.tapr.org/>
and more specifically about AX.25 on Linux
- <http://www.linux-ax25.org/>.
+ <https://linux-ax25.in-berlin.de>.
Note that the answer to this question won't directly affect the
kernel: saying N will just cause the configurator to skip all
configuration. Linux cannot yet act as a DAMA server. This option
only compiles DAMA slave support into the kernel. It still needs to
be enabled at runtime. For more about DAMA see
- <http://www.linux-ax25.org>. If unsure, say Y.
+ <https://linux-ax25.in-berlin.de>. If unsure, say Y.
# placeholder until implemented
config AX25_DAMA_MASTER
A comprehensive listing of all the software for Linux amateur radio
users as well as information about how to configure an AX.25 port is
contained in the Linux Ham Wiki, available from
- <http://www.linux-ax25.org>. You also might want to check out the
- file <file:Documentation/networking/ax25.rst>. More information about
- digital amateur radio in general is on the WWW at
+ <https://linux-ax25.in-berlin.de>. You also might want to check out
+ the file <file:Documentation/networking/ax25.rst>. More information
+ about digital amateur radio in general is on the WWW at
<https://www.tapr.org/>.
To compile this driver as a module, choose M here: the
A comprehensive listing of all the software for Linux amateur radio
users as well as information about how to configure an AX.25 port is
contained in the Linux Ham Wiki, available from
- <http://www.linux-ax25.org>. You also might want to check out the
- file <file:Documentation/networking/ax25.rst>. More information about
- digital amateur radio in general is on the WWW at
+ <https://linux-ax25.in-berlin.de>. You also might want to check out
+ the file <file:Documentation/networking/ax25.rst>. More information
+ about digital amateur radio in general is on the WWW at
<https://www.tapr.org/>.
To compile this driver as a module, choose M here: the
skb = skb_clone(skb, GFP_ATOMIC);
if (!skb) {
- dev->stats.tx_dropped++;
+ DEV_STATS_INC(dev, tx_dropped);
return -ENOMEM;
}
skb = skb_copy(skb, GFP_ATOMIC);
if (!skb) {
- dev->stats.tx_dropped++;
+ DEV_STATS_INC(dev, tx_dropped);
return;
}
if ((mdst && mdst->host_joined) ||
br_multicast_is_router(brmctx, skb)) {
local_rcv = true;
- br->dev->stats.multicast++;
+ DEV_STATS_INC(br->dev, multicast);
}
mcast_hit = true;
} else {
local_rcv = true;
- br->dev->stats.multicast++;
+ DEV_STATS_INC(br->dev, multicast);
}
break;
case BR_PKT_UNICAST:
*/
#include <linux/uaccess.h>
-#include <linux/bitops.h>
+#include <linux/bitmap.h>
#include <linux/capability.h>
#include <linux/cpu.h>
#include <linux/types.h>
return -EINVAL;
/* Use one page as a bit array of possible slots */
- inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
+ inuse = bitmap_zalloc(max_netdevices, GFP_ATOMIC);
if (!inuse)
return -ENOMEM;
}
i = find_first_zero_bit(inuse, max_netdevices);
- free_page((unsigned long) inuse);
+ bitmap_free(inuse);
}
snprintf(buf, IFNAMSIZ, name, i);
break;
}
- nhoff += ntohs(hdr->message_length);
+ nhoff += sizeof(struct ptp_header);
fdret = FLOW_DISSECT_RET_OUT_GOOD;
break;
}
int err;
struct net *net = dev_net(skb->dev);
- /* For the first __dccp_basic_hdr_len() check, we only need dh->dccph_x,
- * which is in byte 7 of the dccp header.
- * Our caller (icmp_socket_deliver()) already pulled 8 bytes for us.
- *
- * Later on, we want to access the sequence number fields, which are
- * beyond 8 bytes, so we have to pskb_may_pull() ourselves.
- */
+ if (!pskb_may_pull(skb, offset + sizeof(*dh)))
+ return -EINVAL;
dh = (struct dccp_hdr *)(skb->data + offset);
if (!pskb_may_pull(skb, offset + __dccp_basic_hdr_len(dh)))
return -EINVAL;
__u64 seq;
struct net *net = dev_net(skb->dev);
- /* For the first __dccp_basic_hdr_len() check, we only need dh->dccph_x,
- * which is in byte 7 of the dccp header.
- * Our caller (icmpv6_notify()) already pulled 8 bytes for us.
- *
- * Later on, we want to access the sequence number fields, which are
- * beyond 8 bytes, so we have to pskb_may_pull() ourselves.
- */
+ if (!pskb_may_pull(skb, offset + sizeof(*dh)))
+ return -EINVAL;
dh = (struct dccp_hdr *)(skb->data + offset);
if (!pskb_may_pull(skb, offset + __dccp_basic_hdr_len(dh)))
return -EINVAL;
KUNIT_EXPECT_PTR_EQ(test, req, result);
handshake_req_cancel(sock->sk);
- sock_release(sock);
+ fput(filp);
}
static void handshake_req_submit_test5(struct kunit *test)
/* Assert */
KUNIT_EXPECT_EQ(test, err, -EAGAIN);
- sock_release(sock);
+ fput(filp);
hn->hn_pending = saved;
}
KUNIT_EXPECT_EQ(test, err, -EBUSY);
handshake_req_cancel(sock->sk);
- sock_release(sock);
+ fput(filp);
}
static void handshake_req_cancel_test1(struct kunit *test)
/* Assert */
KUNIT_EXPECT_TRUE(test, result);
- sock_release(sock);
+ fput(filp);
}
static void handshake_req_cancel_test2(struct kunit *test)
/* Assert */
KUNIT_EXPECT_TRUE(test, result);
- sock_release(sock);
+ fput(filp);
}
static void handshake_req_cancel_test3(struct kunit *test)
/* Assert */
KUNIT_EXPECT_FALSE(test, result);
- sock_release(sock);
+ fput(filp);
}
static struct handshake_req *handshake_req_destroy_test;
handshake_req_cancel(sock->sk);
/* Act */
- sock_release(sock);
+ fput(filp);
/* Assert */
KUNIT_EXPECT_PTR_EQ(test, handshake_req_destroy_test, req);
proto = vlan_hdr->vlanhdr.h_vlan_encapsulated_proto;
/* FIXME: */
netdev_warn_once(skb->dev, "VLAN not yet supported");
+ return -EINVAL;
}
frame->is_from_san = false;
/* And leave the HSR tag. */
if (ethhdr->h_proto == htons(ETH_P_HSR)) {
- pull_size = sizeof(struct ethhdr);
+ pull_size = sizeof(struct hsr_tag);
skb_pull(skb, pull_size);
total_pull_size += pull_size;
}
/* And leave the HSR sup tag. */
- pull_size = sizeof(struct hsr_tag);
+ pull_size = sizeof(struct hsr_sup_tag);
skb_pull(skb, pull_size);
total_pull_size += pull_size;
struct hsr_sup_tlv {
u8 HSR_TLV_type;
u8 HSR_TLV_length;
-};
+} __packed;
/* HSR/PRP Supervision Frame data types.
* Field names as defined in the IEC:2010 standard for HSR.
{
struct in_ifaddr *promote = NULL;
struct in_ifaddr *ifa, *ifa1;
- struct in_ifaddr *last_prim;
+ struct in_ifaddr __rcu **last_prim;
struct in_ifaddr *prev_prom = NULL;
int do_promote = IN_DEV_PROMOTE_SECONDARIES(in_dev);
ASSERT_RTNL();
ifa1 = rtnl_dereference(*ifap);
- last_prim = rtnl_dereference(in_dev->ifa_list);
+ last_prim = ifap;
if (in_dev->dead)
goto no_promotions;
while ((ifa = rtnl_dereference(*ifap1)) != NULL) {
if (!(ifa->ifa_flags & IFA_F_SECONDARY) &&
ifa1->ifa_scope <= ifa->ifa_scope)
- last_prim = ifa;
+ last_prim = &ifa->ifa_next;
if (!(ifa->ifa_flags & IFA_F_SECONDARY) ||
ifa1->ifa_mask != ifa->ifa_mask ||
rcu_assign_pointer(prev_prom->ifa_next, next_sec);
- last_sec = rtnl_dereference(last_prim->ifa_next);
+ last_sec = rtnl_dereference(*last_prim);
rcu_assign_pointer(promote->ifa_next, last_sec);
- rcu_assign_pointer(last_prim->ifa_next, promote);
+ rcu_assign_pointer(*last_prim, promote);
}
promote->ifa_flags &= ~IFA_F_SECONDARY;
const struct net *net, unsigned short port,
int l3mdev, const struct sock *sk)
{
+ if (!net_eq(ib2_net(tb), net) || tb->port != port ||
+ tb->l3mdev != l3mdev)
+ return false;
+
#if IS_ENABLED(CONFIG_IPV6)
- if (sk->sk_family != tb->family)
+ if (sk->sk_family != tb->family) {
+ if (sk->sk_family == AF_INET)
+ return ipv6_addr_v4mapped(&tb->v6_rcv_saddr) &&
+ tb->v6_rcv_saddr.s6_addr32[3] == sk->sk_rcv_saddr;
+
return false;
+ }
if (sk->sk_family == AF_INET6)
- return net_eq(ib2_net(tb), net) && tb->port == port &&
- tb->l3mdev == l3mdev &&
- ipv6_addr_equal(&tb->v6_rcv_saddr, &sk->sk_v6_rcv_saddr);
- else
+ return ipv6_addr_equal(&tb->v6_rcv_saddr, &sk->sk_v6_rcv_saddr);
#endif
- return net_eq(ib2_net(tb), net) && tb->port == port &&
- tb->l3mdev == l3mdev && tb->rcv_saddr == sk->sk_rcv_saddr;
+ return tb->rcv_saddr == sk->sk_rcv_saddr;
}
bool inet_bind2_bucket_match_addr_any(const struct inet_bind2_bucket *tb, const struct net *net,
unsigned short port, int l3mdev, const struct sock *sk)
{
+ if (!net_eq(ib2_net(tb), net) || tb->port != port ||
+ tb->l3mdev != l3mdev)
+ return false;
+
#if IS_ENABLED(CONFIG_IPV6)
if (sk->sk_family != tb->family) {
if (sk->sk_family == AF_INET)
- return net_eq(ib2_net(tb), net) && tb->port == port &&
- tb->l3mdev == l3mdev &&
- ipv6_addr_any(&tb->v6_rcv_saddr);
+ return ipv6_addr_any(&tb->v6_rcv_saddr) ||
+ ipv6_addr_v4mapped_any(&tb->v6_rcv_saddr);
return false;
}
if (sk->sk_family == AF_INET6)
- return net_eq(ib2_net(tb), net) && tb->port == port &&
- tb->l3mdev == l3mdev &&
- ipv6_addr_any(&tb->v6_rcv_saddr);
- else
+ return ipv6_addr_any(&tb->v6_rcv_saddr);
#endif
- return net_eq(ib2_net(tb), net) && tb->port == port &&
- tb->l3mdev == l3mdev && tb->rcv_saddr == 0;
+ return tb->rcv_saddr == 0;
}
/* The socket's bhash2 hashbucket spinlock must be held when this is called */
static void ipv4_send_dest_unreach(struct sk_buff *skb)
{
+ struct net_device *dev;
struct ip_options opt;
int res;
opt.optlen = ip_hdr(skb)->ihl * 4 - sizeof(struct iphdr);
rcu_read_lock();
- res = __ip_options_compile(dev_net(skb->dev), &opt, skb, NULL);
+ dev = skb->dev ? skb->dev : skb_rtable(skb)->dst.dev;
+ res = __ip_options_compile(dev_net(dev), &opt, skb, NULL);
rcu_read_unlock();
if (res)
out_error:
kcm_push(kcm);
- if (copied && sock->type == SOCK_SEQPACKET) {
+ if (sock->type == SOCK_SEQPACKET) {
/* Wrote some bytes before encountering an
* error, return partial success.
*/
- goto partial_message;
- }
-
- if (head != kcm->seq_skb)
+ if (copied)
+ goto partial_message;
+ if (head != kcm->seq_skb)
+ kfree_skb(head);
+ } else {
kfree_skb(head);
+ kcm->seq_skb = NULL;
+ }
err = sk_stream_error(sk, msg->msg_flags, err);
if (rcv_wnd == rcv_wnd_old)
break;
- if (before64(rcv_wnd_new, rcv_wnd)) {
+
+ rcv_wnd_old = rcv_wnd;
+ if (before64(rcv_wnd_new, rcv_wnd_old)) {
MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_RCVWNDCONFLICTUPDATE);
goto raise_win;
}
MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_RCVWNDCONFLICT);
- rcv_wnd_old = rcv_wnd;
}
return;
}
return false;
}
-static void mptcp_stop_timer(struct sock *sk)
+static void mptcp_stop_rtx_timer(struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
return moved;
}
+static bool __mptcp_subflow_error_report(struct sock *sk, struct sock *ssk)
+{
+ int err = sock_error(ssk);
+ int ssk_state;
+
+ if (!err)
+ return false;
+
+ /* only propagate errors on fallen-back sockets or
+ * on MPC connect
+ */
+ if (sk->sk_state != TCP_SYN_SENT && !__mptcp_check_fallback(mptcp_sk(sk)))
+ return false;
+
+ /* We need to propagate only transition to CLOSE state.
+ * Orphaned socket will see such state change via
+ * subflow_sched_work_if_closed() and that path will properly
+ * destroy the msk as needed.
+ */
+ ssk_state = inet_sk_state_load(ssk);
+ if (ssk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DEAD))
+ inet_sk_state_store(sk, ssk_state);
+ WRITE_ONCE(sk->sk_err, -err);
+
+ /* This barrier is coupled with smp_rmb() in mptcp_poll() */
+ smp_wmb();
+ sk_error_report(sk);
+ return true;
+}
+
+void __mptcp_error_report(struct sock *sk)
+{
+ struct mptcp_subflow_context *subflow;
+ struct mptcp_sock *msk = mptcp_sk(sk);
+
+ mptcp_for_each_subflow(msk, subflow)
+ if (__mptcp_subflow_error_report(sk, mptcp_subflow_tcp_sock(subflow)))
+ break;
+}
+
/* In most cases we will be able to lock the mptcp socket. If its already
* owned, we need to defer to the work queue to avoid ABBA deadlock.
*/
mptcp_subflow_ctx(ssk)->subflow_id = msk->subflow_id++;
mptcp_sockopt_sync_locked(msk, ssk);
mptcp_subflow_joined(msk, ssk);
+ mptcp_stop_tout_timer(sk);
return true;
}
}
}
-static bool mptcp_timer_pending(struct sock *sk)
+static bool mptcp_rtx_timer_pending(struct sock *sk)
{
return timer_pending(&inet_csk(sk)->icsk_retransmit_timer);
}
-static void mptcp_reset_timer(struct sock *sk)
+static void mptcp_reset_rtx_timer(struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
unsigned long tout;
out:
if (snd_una == READ_ONCE(msk->snd_nxt) &&
snd_una == READ_ONCE(msk->write_seq)) {
- if (mptcp_timer_pending(sk) && !mptcp_data_fin_enabled(msk))
- mptcp_stop_timer(sk);
+ if (mptcp_rtx_timer_pending(sk) && !mptcp_data_fin_enabled(msk))
+ mptcp_stop_rtx_timer(sk);
} else {
- mptcp_reset_timer(sk);
+ mptcp_reset_rtx_timer(sk);
}
}
mptcp_push_release(ssk, &info);
/* ensure the rtx timer is running */
- if (!mptcp_timer_pending(sk))
- mptcp_reset_timer(sk);
+ if (!mptcp_rtx_timer_pending(sk))
+ mptcp_reset_rtx_timer(sk);
if (do_check_data_fin)
mptcp_check_send_data_fin(sk);
}
if (copied) {
tcp_push(ssk, 0, info.mss_now, tcp_sk(ssk)->nonagle,
info.size_goal);
- if (!mptcp_timer_pending(sk))
- mptcp_reset_timer(sk);
+ if (!mptcp_rtx_timer_pending(sk))
+ mptcp_reset_rtx_timer(sk);
if (msk->snd_data_fin_enable &&
msk->snd_nxt + 1 == msk->write_seq)
sock_put(sk);
}
-static void mptcp_timeout_timer(struct timer_list *t)
+static void mptcp_tout_timer(struct timer_list *t)
{
struct sock *sk = from_timer(sk, t, sk_timer);
bool dispose_it, need_push = false;
/* If the first subflow moved to a close state before accept, e.g. due
- * to an incoming reset, mptcp either:
- * - if either the subflow or the msk are dead, destroy the context
- * (the subflow socket is deleted by inet_child_forget) and the msk
- * - otherwise do nothing at the moment and take action at accept and/or
- * listener shutdown - user-space must be able to accept() the closed
- * socket.
+ * to an incoming reset or listener shutdown, the subflow socket is
+ * already deleted by inet_child_forget() and the mptcp socket can't
+ * survive too.
*/
- if (msk->in_accept_queue && msk->first == ssk) {
- if (!sock_flag(sk, SOCK_DEAD) && !sock_flag(ssk, SOCK_DEAD))
- return;
-
+ if (msk->in_accept_queue && msk->first == ssk &&
+ (sock_flag(sk, SOCK_DEAD) || sock_flag(ssk, SOCK_DEAD))) {
/* ensure later check in mptcp_worker() will dispose the msk */
+ mptcp_set_close_tout(sk, tcp_jiffies32 - (TCP_TIMEWAIT_LEN + 1));
sock_set_flag(sk, SOCK_DEAD);
lock_sock_nested(ssk, SINGLE_DEPTH_NESTING);
mptcp_subflow_drop_ctx(ssk);
}
out_release:
+ __mptcp_subflow_error_report(sk, ssk);
release_sock(ssk);
sock_put(ssk);
out:
if (need_push)
__mptcp_push_pending(sk, 0);
+
+ /* Catch every 'all subflows closed' scenario, including peers silently
+ * closing them, e.g. due to timeout.
+ * For established sockets, allow an additional timeout before closing,
+ * as the protocol can still create more subflows.
+ */
+ if (list_is_singular(&msk->conn_list) && msk->first &&
+ inet_sk_state_load(msk->first) == TCP_CLOSE) {
+ if (sk->sk_state != TCP_ESTABLISHED ||
+ msk->in_accept_queue || sock_flag(sk, SOCK_DEAD)) {
+ inet_sk_state_store(sk, TCP_CLOSE);
+ mptcp_close_wake_up(sk);
+ } else {
+ mptcp_start_tout_timer(sk);
+ }
+ }
}
void mptcp_close_ssk(struct sock *sk, struct sock *ssk,
}
-static bool mptcp_should_close(const struct sock *sk)
+static bool mptcp_close_tout_expired(const struct sock *sk)
{
- s32 delta = tcp_jiffies32 - inet_csk(sk)->icsk_mtup.probe_timestamp;
- struct mptcp_subflow_context *subflow;
-
- if (delta >= TCP_TIMEWAIT_LEN || mptcp_sk(sk)->in_accept_queue)
- return true;
+ if (!inet_csk(sk)->icsk_mtup.probe_timestamp ||
+ sk->sk_state == TCP_CLOSE)
+ return false;
- /* if all subflows are in closed status don't bother with additional
- * timeout
- */
- mptcp_for_each_subflow(mptcp_sk(sk), subflow) {
- if (inet_sk_state_load(mptcp_subflow_tcp_sock(subflow)) !=
- TCP_CLOSE)
- return false;
- }
- return true;
+ return time_after32(tcp_jiffies32,
+ inet_csk(sk)->icsk_mtup.probe_timestamp + TCP_TIMEWAIT_LEN);
}
static void mptcp_check_fastclose(struct mptcp_sock *msk)
reset_timer:
mptcp_check_and_set_pending(sk);
- if (!mptcp_timer_pending(sk))
- mptcp_reset_timer(sk);
+ if (!mptcp_rtx_timer_pending(sk))
+ mptcp_reset_rtx_timer(sk);
}
/* schedule the timeout timer for the relevant event: either close timeout
* or mp_fail timeout. The close timeout takes precedence on the mp_fail one
*/
-void mptcp_reset_timeout(struct mptcp_sock *msk, unsigned long fail_tout)
+void mptcp_reset_tout_timer(struct mptcp_sock *msk, unsigned long fail_tout)
{
struct sock *sk = (struct sock *)msk;
unsigned long timeout, close_timeout;
- if (!fail_tout && !sock_flag(sk, SOCK_DEAD))
+ if (!fail_tout && !inet_csk(sk)->icsk_mtup.probe_timestamp)
return;
- close_timeout = inet_csk(sk)->icsk_mtup.probe_timestamp - tcp_jiffies32 + jiffies + TCP_TIMEWAIT_LEN;
+ close_timeout = inet_csk(sk)->icsk_mtup.probe_timestamp - tcp_jiffies32 + jiffies +
+ TCP_TIMEWAIT_LEN;
/* the close timeout takes precedence on the fail one, and here at least one of
* them is active
*/
- timeout = sock_flag(sk, SOCK_DEAD) ? close_timeout : fail_tout;
+ timeout = inet_csk(sk)->icsk_mtup.probe_timestamp ? close_timeout : fail_tout;
sk_reset_timer(sk, &sk->sk_timer, timeout);
}
mptcp_subflow_reset(ssk);
WRITE_ONCE(mptcp_subflow_ctx(ssk)->fail_tout, 0);
unlock_sock_fast(ssk, slow);
-
- mptcp_reset_timeout(msk, 0);
}
static void mptcp_do_fastclose(struct sock *sk)
if (test_and_clear_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags))
__mptcp_close_subflow(sk);
- /* There is no point in keeping around an orphaned sk timedout or
- * closed, but we need the msk around to reply to incoming DATA_FIN,
- * even if it is orphaned and in FIN_WAIT2 state
- */
- if (sock_flag(sk, SOCK_DEAD)) {
- if (mptcp_should_close(sk))
- mptcp_do_fastclose(sk);
+ if (mptcp_close_tout_expired(sk)) {
+ mptcp_do_fastclose(sk);
+ mptcp_close_wake_up(sk);
+ }
- if (sk->sk_state == TCP_CLOSE) {
- __mptcp_destroy_sock(sk);
- goto unlock;
- }
+ if (sock_flag(sk, SOCK_DEAD) && sk->sk_state == TCP_CLOSE) {
+ __mptcp_destroy_sock(sk);
+ goto unlock;
}
if (test_and_clear_bit(MPTCP_WORK_RTX, &msk->flags))
/* re-use the csk retrans timer for MPTCP-level retrans */
timer_setup(&msk->sk.icsk_retransmit_timer, mptcp_retransmit_timer, 0);
- timer_setup(&sk->sk_timer, mptcp_timeout_timer, 0);
+ timer_setup(&sk->sk_timer, mptcp_tout_timer, 0);
}
static void mptcp_ca_reset(struct sock *sk)
} else {
pr_debug("Sending DATA_FIN on subflow %p", ssk);
tcp_send_ack(ssk);
- if (!mptcp_timer_pending(sk))
- mptcp_reset_timer(sk);
+ if (!mptcp_rtx_timer_pending(sk))
+ mptcp_reset_rtx_timer(sk);
}
break;
}
might_sleep();
- mptcp_stop_timer(sk);
+ mptcp_stop_rtx_timer(sk);
sk_stop_timer(sk, &sk->sk_timer);
msk->pm.status = 0;
mptcp_release_sched(msk);
cleanup:
/* orphan all the subflows */
- inet_csk(sk)->icsk_mtup.probe_timestamp = tcp_jiffies32;
mptcp_for_each_subflow(msk, subflow) {
struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
bool slow = lock_sock_fast_nested(ssk);
__mptcp_destroy_sock(sk);
do_cancel_work = true;
} else {
- mptcp_reset_timeout(msk, 0);
+ mptcp_start_tout_timer(sk);
}
return do_cancel_work;
mptcp_check_listen_stop(sk);
inet_sk_state_store(sk, TCP_CLOSE);
- mptcp_stop_timer(sk);
- sk_stop_timer(sk, &sk->sk_timer);
+ mptcp_stop_rtx_timer(sk);
+ mptcp_stop_tout_timer(sk);
if (msk->token)
mptcp_event(MPTCP_EVENT_CLOSED, msk, NULL, GFP_KERNEL);
void mptcp_finish_connect(struct sock *sk);
void __mptcp_set_connected(struct sock *sk);
-void mptcp_reset_timeout(struct mptcp_sock *msk, unsigned long fail_tout);
+void mptcp_reset_tout_timer(struct mptcp_sock *msk, unsigned long fail_tout);
+
+static inline void mptcp_stop_tout_timer(struct sock *sk)
+{
+ if (!inet_csk(sk)->icsk_mtup.probe_timestamp)
+ return;
+
+ sk_stop_timer(sk, &sk->sk_timer);
+ inet_csk(sk)->icsk_mtup.probe_timestamp = 0;
+}
+
+static inline void mptcp_set_close_tout(struct sock *sk, unsigned long tout)
+{
+ /* avoid 0 timestamp, as that means no close timeout */
+ inet_csk(sk)->icsk_mtup.probe_timestamp = tout ? : 1;
+}
+
+static inline void mptcp_start_tout_timer(struct sock *sk)
+{
+ mptcp_set_close_tout(sk, tcp_jiffies32);
+ mptcp_reset_tout_timer(mptcp_sk(sk), 0);
+}
+
static inline bool mptcp_is_fully_established(struct sock *sk)
{
return inet_sk_state_load(sk) == TCP_ESTABLISHED &&
WRITE_ONCE(subflow->fail_tout, fail_tout);
tcp_send_ack(ssk);
- mptcp_reset_timeout(msk, subflow->fail_tout);
+ mptcp_reset_tout_timer(msk, subflow->fail_tout);
}
static bool subflow_check_data_avail(struct sock *ssk)
*full_space = mptcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf));
}
-void __mptcp_error_report(struct sock *sk)
-{
- struct mptcp_subflow_context *subflow;
- struct mptcp_sock *msk = mptcp_sk(sk);
-
- mptcp_for_each_subflow(msk, subflow) {
- struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
- int err = sock_error(ssk);
- int ssk_state;
-
- if (!err)
- continue;
-
- /* only propagate errors on fallen-back sockets or
- * on MPC connect
- */
- if (sk->sk_state != TCP_SYN_SENT && !__mptcp_check_fallback(msk))
- continue;
-
- /* We need to propagate only transition to CLOSE state.
- * Orphaned socket will see such state change via
- * subflow_sched_work_if_closed() and that path will properly
- * destroy the msk as needed.
- */
- ssk_state = inet_sk_state_load(ssk);
- if (ssk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DEAD))
- inet_sk_state_store(sk, ssk_state);
- WRITE_ONCE(sk->sk_err, -err);
-
- /* This barrier is coupled with smp_rmb() in mptcp_poll() */
- smp_wmb();
- sk_error_report(sk);
- break;
- }
-}
-
static void subflow_error_report(struct sock *ssk)
{
struct sock *sk = mptcp_subflow_ctx(ssk)->conn;
mptcp_sock_graft(ssk, sk->sk_socket);
iput(SOCK_INODE(sf));
WRITE_ONCE(msk->allow_infinite_fallback, false);
+ mptcp_stop_tout_timer(sk);
return 0;
failed_unlink:
if ((had_link == has_link) || chained)
return 0;
+ if (had_link)
+ netif_carrier_off(ndp->ndev.dev);
+ else
+ netif_carrier_on(ndp->ndev.dev);
+
if (!ndp->multi_package && !nc->package->multi_channel) {
if (had_link) {
ndp->flags |= NCSI_DEV_RESHUFFLE;
* a separate reference counter
*/
static void
+__ip_set_get_netlink(struct ip_set *set)
+{
+ write_lock_bh(&ip_set_ref_lock);
+ set->ref_netlink++;
+ write_unlock_bh(&ip_set_ref_lock);
+}
+
+static void
__ip_set_put_netlink(struct ip_set *set)
{
write_lock_bh(&ip_set_ref_lock);
do {
if (retried) {
- __ip_set_get(set);
+ __ip_set_get_netlink(set);
nfnl_unlock(NFNL_SUBSYS_IPSET);
cond_resched();
nfnl_lock(NFNL_SUBSYS_IPSET);
- __ip_set_put(set);
+ __ip_set_put_netlink(set);
}
ip_set_lock(set);
struct nf_conn *nfct = (struct nf_conn *)nfct_i;
int err;
+ if (!nf_ct_is_confirmed(nfct))
+ nfct->timeout += nfct_time_stamp;
nfct->status |= IPS_CONFIRMED;
err = nf_conntrack_hash_check_insert(nfct);
if (err < 0) {
[NF_CT_EXT_ECACHE] = sizeof(struct nf_conntrack_ecache),
#endif
#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
- [NF_CT_EXT_TSTAMP] = sizeof(struct nf_conn_acct),
+ [NF_CT_EXT_TSTAMP] = sizeof(struct nf_conn_tstamp),
#endif
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
- [NF_CT_EXT_TIMEOUT] = sizeof(struct nf_conn_tstamp),
+ [NF_CT_EXT_TIMEOUT] = sizeof(struct nf_conn_timeout),
#endif
#ifdef CONFIG_NF_CONNTRACK_LABELS
[NF_CT_EXT_LABELS] = sizeof(struct nf_conn_labels),
flags & NFT_TABLE_F_OWNER))
return -EOPNOTSUPP;
+ /* No dormant off/on/off/on games in single transaction */
+ if (ctx->table->flags & __NFT_TABLE_F_UPDATE)
+ return -EINVAL;
+
trans = nft_trans_alloc(ctx, NFT_MSG_NEWTABLE,
sizeof(struct nft_trans_table));
if (trans == NULL)
if (!nft_is_active_next(ctx->net, chain))
continue;
- if (nft_chain_is_bound(chain))
+ if (nft_chain_binding(chain))
continue;
ctx->chain = chain;
if (!nft_is_active_next(ctx->net, set))
continue;
- if (nft_set_is_anonymous(set) &&
- !list_empty(&set->bindings))
+ if (nft_set_is_anonymous(set))
continue;
err = nft_delset(ctx, set);
if (!nft_is_active_next(ctx->net, chain))
continue;
- if (nft_chain_is_bound(chain))
+ if (nft_chain_binding(chain))
continue;
ctx->chain = chain;
return PTR_ERR(chain);
}
+ if (nft_chain_binding(chain))
+ return -EOPNOTSUPP;
+
nft_ctx_init(&ctx, net, skb, info->nlh, family, table, chain, nla);
if (nla[NFTA_CHAIN_HOOK]) {
struct net *net = sock_net(skb->sk);
const struct nft_rule *rule, *prule;
unsigned int s_idx = cb->args[0];
+ unsigned int entries = 0;
+ int ret = 0;
u64 handle;
prule = NULL;
NFT_MSG_NEWRULE,
NLM_F_MULTI | NLM_F_APPEND,
table->family,
- table, chain, rule, handle, reset) < 0)
- return 1;
-
+ table, chain, rule, handle, reset) < 0) {
+ ret = 1;
+ break;
+ }
+ entries++;
nl_dump_check_consistent(cb, nlmsg_hdr(skb));
cont:
prule = rule;
(*idx)++;
}
- if (reset && *idx)
- audit_log_rule_reset(table, cb->seq, *idx);
+ if (reset && entries)
+ audit_log_rule_reset(table, cb->seq, entries);
- return 0;
+ return ret;
}
static int nf_tables_dump_rules(struct sk_buff *skb,
}
if (info->nlh->nlmsg_flags & NLM_F_REPLACE) {
+ if (nft_chain_binding(chain)) {
+ err = -EOPNOTSUPP;
+ goto err_destroy_flow_rule;
+ }
+
err = nft_delrule(&ctx, old_rule);
if (err < 0)
goto err_destroy_flow_rule;
NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_CHAIN]);
return PTR_ERR(chain);
}
- if (nft_chain_is_bound(chain))
+ if (nft_chain_binding(chain))
return -EOPNOTSUPP;
}
list_for_each_entry(chain, &table->chains, list) {
if (!nft_is_active_next(net, chain))
continue;
- if (nft_chain_is_bound(chain))
+ if (nft_chain_binding(chain))
continue;
ctx.chain = chain;
if (IS_ERR(set))
return PTR_ERR(set);
- if (!list_empty(&set->bindings) &&
- (set->flags & (NFT_SET_CONSTANT | NFT_SET_ANONYMOUS)))
+ if (nft_set_is_anonymous(set))
+ return -EOPNOTSUPP;
+
+ if (!list_empty(&set->bindings) && (set->flags & NFT_SET_CONSTANT))
return -EBUSY;
nft_ctx_init(&ctx, net, skb, info->nlh, family, table, NULL, nla);
struct nft_trans_gc *nft_trans_gc_queue_async(struct nft_trans_gc *gc,
unsigned int gc_seq, gfp_t gfp)
{
+ struct nft_set *set;
+
if (nft_trans_gc_space(gc))
return gc;
+ set = gc->set;
nft_trans_gc_queue_work(gc);
- return nft_trans_gc_alloc(gc->set, gc_seq, gfp);
+ return nft_trans_gc_alloc(set, gc_seq, gfp);
}
void nft_trans_gc_queue_async_done(struct nft_trans_gc *trans)
struct nft_trans_gc *nft_trans_gc_queue_sync(struct nft_trans_gc *gc, gfp_t gfp)
{
+ struct nft_set *set;
+
if (WARN_ON_ONCE(!lockdep_commit_lock_is_held(gc->net)))
return NULL;
if (nft_trans_gc_space(gc))
return gc;
+ set = gc->set;
call_rcu(&gc->rcu, nft_trans_gc_trans_free);
- return nft_trans_gc_alloc(gc->set, 0, gfp);
+ return nft_trans_gc_alloc(set, 0, gfp);
}
void nft_trans_gc_queue_sync_done(struct nft_trans_gc *trans)
call_rcu(&trans->rcu, nft_trans_gc_trans_free);
}
-struct nft_trans_gc *nft_trans_gc_catchall(struct nft_trans_gc *gc,
- unsigned int gc_seq)
+static struct nft_trans_gc *nft_trans_gc_catchall(struct nft_trans_gc *gc,
+ unsigned int gc_seq,
+ bool sync)
{
struct nft_set_elem_catchall *catchall;
const struct nft_set *set = gc->set;
nft_set_elem_dead(ext);
dead_elem:
- gc = nft_trans_gc_queue_async(gc, gc_seq, GFP_ATOMIC);
+ if (sync)
+ gc = nft_trans_gc_queue_sync(gc, GFP_ATOMIC);
+ else
+ gc = nft_trans_gc_queue_async(gc, gc_seq, GFP_ATOMIC);
+
if (!gc)
return NULL;
return gc;
}
+struct nft_trans_gc *nft_trans_gc_catchall_async(struct nft_trans_gc *gc,
+ unsigned int gc_seq)
+{
+ return nft_trans_gc_catchall(gc, gc_seq, false);
+}
+
+struct nft_trans_gc *nft_trans_gc_catchall_sync(struct nft_trans_gc *gc)
+{
+ return nft_trans_gc_catchall(gc, 0, true);
+}
+
static void nf_tables_module_autoload_cleanup(struct net *net)
{
struct nftables_pernet *nft_net = nft_pernet(net);
ctx.family = table->family;
ctx.table = table;
list_for_each_entry(chain, &table->chains, list) {
- if (nft_chain_is_bound(chain))
+ if (nft_chain_binding(chain))
continue;
ctx.chain = chain;
while ((he = rhashtable_walk_next(&hti))) {
if (IS_ERR(he)) {
- if (PTR_ERR(he) != -EAGAIN) {
- nft_trans_gc_destroy(gc);
- gc = NULL;
- goto try_later;
- }
- continue;
+ nft_trans_gc_destroy(gc);
+ gc = NULL;
+ goto try_later;
}
/* Ruleset has been updated, try later. */
nft_trans_gc_elem_add(gc, he);
}
- gc = nft_trans_gc_catchall(gc, gc_seq);
+ gc = nft_trans_gc_catchall_async(gc, gc_seq);
try_later:
/* catchall list iteration requires rcu read side lock. */
gc = nft_trans_gc_queue_sync(gc, GFP_ATOMIC);
if (!gc)
- break;
+ return;
nft_pipapo_gc_deactivate(net, set, e);
pipapo_drop(m, rulemap);
}
}
- gc = nft_trans_gc_catchall(gc, 0);
+ gc = nft_trans_gc_catchall_sync(gc);
if (gc) {
nft_trans_gc_queue_sync_done(gc);
priv->last_gc = jiffies;
if (!gc)
goto done;
- write_lock_bh(&priv->lock);
- write_seqcount_begin(&priv->count);
+ read_lock_bh(&priv->lock);
for (node = rb_first(&priv->root); node != NULL; node = rb_next(node)) {
/* Ruleset has been updated, try later. */
nft_trans_gc_elem_add(gc, rbe);
}
- gc = nft_trans_gc_catchall(gc, gc_seq);
+ gc = nft_trans_gc_catchall_async(gc, gc_seq);
try_later:
- write_seqcount_end(&priv->count);
- write_unlock_bh(&priv->lock);
+ read_unlock_bh(&priv->lock);
if (gc)
nft_trans_gc_queue_async_done(gc);
break;
case RDMA_CM_EVENT_ADDR_RESOLVED:
- rdma_set_service_type(cm_id, conn->c_tos);
- rdma_set_min_rnr_timer(cm_id, IB_RNR_TIMER_000_32);
- /* XXX do we need to clean up if this fails? */
- ret = rdma_resolve_route(cm_id,
- RDS_RDMA_RESOLVE_TIMEOUT_MS);
+ if (conn) {
+ rdma_set_service_type(cm_id, conn->c_tos);
+ rdma_set_min_rnr_timer(cm_id, IB_RNR_TIMER_000_32);
+ /* XXX do we need to clean up if this fails? */
+ ret = rdma_resolve_route(cm_id,
+ RDS_RDMA_RESOLVE_TIMEOUT_MS);
+ }
break;
case RDMA_CM_EVENT_ROUTE_RESOLVED:
{
struct smc_link_group *lgr, *n;
+ spin_lock_bh(&smc_lgr_list.lock);
list_for_each_entry_safe(lgr, n, &smc_lgr_list.list, list) {
struct smc_link *link;
if (link)
smc_llc_add_link_local(link);
}
+ spin_unlock_bh(&smc_lgr_list.lock);
}
/* link is down - switch connections to alternate link,
#define SMC_STAT_SERV_SUCC_INC(net, _ini) \
do { \
typeof(_ini) i = (_ini); \
- bool is_v2 = (i->smcd_version & SMC_V2); \
bool is_smcd = (i->is_smcd); \
+ u8 version = is_smcd ? i->smcd_version : i->smcr_version; \
+ bool is_v2 = (version & SMC_V2); \
typeof(net->smc.smc_stats) smc_stats = (net)->smc.smc_stats; \
if (is_v2 && is_smcd) \
this_cpu_inc(smc_stats->smc[SMC_TYPE_D].srv_v2_succ_cnt); \
out_verifier:
trace_rpc_bad_verifier(task);
- goto out_err;
+ goto out_garbage;
out_msg_denied:
error = -EACCES;
case rpc_autherr_rejectedverf:
case rpcsec_gsserr_credproblem:
case rpcsec_gsserr_ctxproblem:
+ rpcauth_invalcred(task);
if (!task->tk_cred_retry)
break;
task->tk_cred_retry--;
* @clnt: pointer to struct rpc_clnt
* @xps: pointer to struct rpc_xprt_switch,
* @xprt: pointer struct rpc_xprt
- * @dummy: unused
+ * @in_max_connect: pointer to the max_connect value for the passed in xprt transport
*/
int rpc_clnt_test_and_add_xprt(struct rpc_clnt *clnt,
struct rpc_xprt_switch *xps, struct rpc_xprt *xprt,
- void *dummy)
+ void *in_max_connect)
{
struct rpc_cb_add_xprt_calldata *data;
struct rpc_task *task;
+ int max_connect = clnt->cl_max_connect;
- if (xps->xps_nunique_destaddr_xprts + 1 > clnt->cl_max_connect) {
+ if (in_max_connect)
+ max_connect = *(int *)in_max_connect;
+ if (xps->xps_nunique_destaddr_xprts + 1 > max_connect) {
rcu_read_lock();
pr_warn("SUNRPC: reached max allowed number (%d) did not add "
- "transport to server: %s\n", clnt->cl_max_connect,
+ "transport to server: %s\n", max_connect,
rpc_peeraddr2str(clnt, RPC_DISPLAY_ADDR));
rcu_read_unlock();
return -EINVAL;
psock = sk_psock_get(sk);
if (!psock || !policy) {
err = tls_push_record(sk, flags, record_type);
- if (err && sk->sk_err == EBADMSG) {
+ if (err && err != -EINPROGRESS && sk->sk_err == EBADMSG) {
*copied -= sk_msg_free(sk, msg);
tls_free_open_rec(sk);
err = -sk->sk_err;
switch (psock->eval) {
case __SK_PASS:
err = tls_push_record(sk, flags, record_type);
- if (err && sk->sk_err == EBADMSG) {
+ if (err && err != -EINPROGRESS && sk->sk_err == EBADMSG) {
*copied -= sk_msg_free(sk, msg);
tls_free_open_rec(sk);
err = -sk->sk_err;
endif
# Create necessary directories
-$(shell mkdir -p $(sort $(dir $(install-y))))
+$(foreach dir, $(sort $(dir $(install-y))), $(shell mkdir -p $(dir)))
$(dst)/%.ko: $(extmod_prefix)%.ko FORCE
$(call cmd,install)
quiet_cmd_gzip = GZIP $@
cmd_gzip = $(KGZIP) -n -f $<
quiet_cmd_xz = XZ $@
- cmd_xz = $(XZ) --lzma2=dict=2MiB -f $<
+ cmd_xz = $(XZ) --check=crc32 --lzma2=dict=1MiB -f $<
quiet_cmd_zstd = ZSTD $@
cmd_zstd = $(ZSTD) -T0 --rm -f -q $<
fi
# Allow ACQUIRE/RELEASE/RELAXED ops to be defined in terms of FULL ops
- if [ ! -z "${order}" ]; then
+ if [ ! -z "${order}" ] && ! meta_is_implicitly_relaxed "${meta}"; then
printf "#elif defined(arch_${basename})\n"
printf "\t${retstmt}arch_${basename}(${args});\n"
fi
return "{textaddr} {sections}".format(
textaddr=textaddr, sections="".join(args))
- def load_module_symbols(self, module, module_file=None):
+ def load_module_symbols(self, module):
module_name = module['name'].string()
module_addr = str(module['mem'][constants.LX_MOD_TEXT]['base']).split()[0]
- if not module_file:
- module_file = self._get_module_file(module_name)
+ module_file = self._get_module_file(module_name)
if not module_file and not self.module_files_updated:
self._update_module_files()
module_file = self._get_module_file(module_name)
else:
gdb.write("no module object found for '{0}'\n".format(module_name))
- def load_ko_symbols(self, mod_path):
- self.loaded_modules = []
- module_list = modules.module_list()
-
- for module in module_list:
- module_name = module['name'].string()
- module_pattern = ".*/{0}\.ko(?:.debug)?$".format(
- module_name.replace("_", r"[_\-]"))
- if re.match(module_pattern, mod_path) and os.path.exists(mod_path):
- self.load_module_symbols(module, mod_path)
- return
- raise gdb.GdbError("%s is not a valid .ko\n" % mod_path)
-
def load_all_symbols(self):
gdb.write("loading vmlinux\n")
self.module_files = []
self.module_files_updated = False
- argv = gdb.string_to_argv(arg)
- if len(argv) == 1:
- self.load_ko_symbols(argv[0])
- return
-
self.load_all_symbols()
if hasattr(gdb, 'Breakpoint'):
/* First handle the "special" cases */
if (sym_is(name, namelen, "usb"))
do_usb_table(symval, sym->st_size, mod);
- if (sym_is(name, namelen, "of"))
+ else if (sym_is(name, namelen, "of"))
do_of_table(symval, sym->st_size, mod);
else if (sym_is(name, namelen, "pnp"))
do_pnp_device_entry(symval, sym->st_size, mod);
"*_console")))
return 0;
- /* symbols in data sections that may refer to meminit/exit sections */
+ /* symbols in data sections that may refer to meminit sections */
if (match(fromsec, PATTERNS(DATA_SECTIONS)) &&
- match(tosec, PATTERNS(ALL_XXXINIT_SECTIONS, ALL_EXIT_SECTIONS)) &&
+ match(tosec, PATTERNS(ALL_XXXINIT_SECTIONS, ALL_XXXEXIT_SECTIONS)) &&
+ match(fromsym, PATTERNS("*driver")))
+ return 0;
+
+ /*
+ * symbols in data sections must not refer to .exit.*, but there are
+ * quite a few offenders, so hide these unless for W=1 builds until
+ * these are fixed.
+ */
+ if (!extra_warn &&
+ match(fromsec, PATTERNS(DATA_SECTIONS)) &&
+ match(tosec, PATTERNS(EXIT_SECTIONS)) &&
match(fromsym, PATTERNS("*driver")))
return 0;
*/
s->is_func = (ELF_ST_TYPE(sym->st_info) == STT_FUNC);
+ /*
+ * For parisc64, symbols prefixed $$ from the library have the symbol type
+ * STT_LOPROC. They should be handled as functions too.
+ */
+ if (elf->hdr->e_ident[EI_CLASS] == ELFCLASS64 &&
+ elf->hdr->e_machine == EM_PARISC &&
+ ELF_ST_TYPE(sym->st_info) == STT_LOPROC)
+ s->is_func = true;
+
if (match(secname, PATTERNS(INIT_SECTIONS)))
warn("%s: %s: EXPORT_SYMBOL used for init symbol. Remove __init or EXPORT_SYMBOL.\n",
mod->name, name);
${MAKE} -f ${srctree}/Makefile INSTALL_MOD_PATH="${pdir}" modules_install
rm -f "${pdir}/lib/modules/${KERNELRELEASE}/build"
- rm -f "${pdir}/lib/modules/${KERNELRELEASE}/source"
# Install the kernel
if [ "${ARCH}" = um ] ; then
find "arch/${SRCARCH}" -maxdepth 1 -name 'Makefile*'
find include scripts -type f -o -type l
find "arch/${SRCARCH}" -name Kbuild.platforms -o -name Platform
- find "$(find "arch/${SRCARCH}" -name include -o -name scripts -type d)" -type f
+ find "arch/${SRCARCH}" -name include -o -name scripts -type d
) | tar -c -f - -C "${srctree}" -T - | tar -xf - -C "${destdir}"
{
cp System.map %{buildroot}/boot/System.map-%{KERNELRELEASE}
cp .config %{buildroot}/boot/config-%{KERNELRELEASE}
ln -fns /usr/src/kernels/%{KERNELRELEASE} %{buildroot}/lib/modules/%{KERNELRELEASE}/build
-ln -fns /usr/src/kernels/%{KERNELRELEASE} %{buildroot}/lib/modules/%{KERNELRELEASE}/source
%if %{with_devel}
%{make} %{makeflags} run-command KBUILD_RUN_COMMAND='${srctree}/scripts/package/install-extmod-build %{buildroot}/usr/src/kernels/%{KERNELRELEASE}'
%endif
%defattr (-, root, root)
/lib/modules/%{KERNELRELEASE}
%exclude /lib/modules/%{KERNELRELEASE}/build
-%exclude /lib/modules/%{KERNELRELEASE}/source
/boot/*
%files headers
%defattr (-, root, root)
/usr/src/kernels/%{KERNELRELEASE}
/lib/modules/%{KERNELRELEASE}/build
-/lib/modules/%{KERNELRELEASE}/source
%endif
static int selinux_fs_context_submount(struct fs_context *fc,
struct super_block *reference)
{
- const struct superblock_security_struct *sbsec;
+ const struct superblock_security_struct *sbsec = selinux_superblock(reference);
struct selinux_mnt_opts *opts;
+ /*
+ * Ensure that fc->security remains NULL when no options are set
+ * as expected by selinux_set_mnt_opts().
+ */
+ if (!(sbsec->flags & (FSCONTEXT_MNT|CONTEXT_MNT|DEFCONTEXT_MNT)))
+ return 0;
+
opts = kzalloc(sizeof(*opts), GFP_KERNEL);
if (!opts)
return -ENOMEM;
- sbsec = selinux_superblock(reference);
if (sbsec->flags & FSCONTEXT_MNT)
opts->fscontext_sid = sbsec->sid;
if (sbsec->flags & CONTEXT_MNT)
size_t extra_size)
{
int err;
-#ifdef CONFIG_SND_DEBUG
- char name[8];
-#endif
if (extra_size > 0)
card->private_data = (char *)card + sizeof(struct snd_card);
}
#ifdef CONFIG_SND_DEBUG
- sprintf(name, "card%d", idx);
- card->debugfs_root = debugfs_create_dir(name, sound_debugfs_root);
+ card->debugfs_root = debugfs_create_dir(dev_name(&card->card_dev),
+ sound_debugfs_root);
#endif
return 0;
if (IS_ENABLED(CONFIG_SND_UMP))
snd_iprintf(buffer, "Type: %s\n",
rawmidi_is_ump(rmidi) ? "UMP" : "Legacy");
- if (rmidi->ops->proc_read)
+ if (rmidi->ops && rmidi->ops->proc_read)
rmidi->ops->proc_read(entry, buffer);
mutex_lock(&rmidi->open_mutex);
if (rmidi->info_flags & SNDRV_RAWMIDI_INFO_OUTPUT) {
if (! port->name[0]) {
if (info->name[0]) {
if (ports > 1)
- snprintf(port->name, sizeof(port->name), "%s-%u", info->name, p);
+ scnprintf(port->name, sizeof(port->name), "%s-%u", info->name, p);
else
- snprintf(port->name, sizeof(port->name), "%s", info->name);
+ scnprintf(port->name, sizeof(port->name), "%s", info->name);
} else {
/* last resort */
if (ports > 1)
SNDRV_SEQ_PORT_TYPE_PORT;
port->midi_channels = 16;
if (*group->name)
- snprintf(port->name, sizeof(port->name), "Group %d (%s)",
+ snprintf(port->name, sizeof(port->name), "Group %d (%.53s)",
group->group + 1, group->name);
else
sprintf(port->name, "Group %d", group->group + 1);
snd_seq_kernel_client_put(cptr);
}
+/* set up client's group_filter bitmap */
+static void setup_client_group_filter(struct seq_ump_client *client)
+{
+ struct snd_seq_client *cptr;
+ unsigned int filter;
+ int p;
+
+ cptr = snd_seq_kernel_client_get(client->seq_client);
+ if (!cptr)
+ return;
+ filter = ~(1U << 0); /* always allow groupless messages */
+ for (p = 0; p < SNDRV_UMP_MAX_GROUPS; p++) {
+ if (client->groups[p].active)
+ filter &= ~(1U << (p + 1));
+ }
+ cptr->group_filter = filter;
+ snd_seq_kernel_client_put(cptr);
+}
+
/* UMP group change notification */
static void handle_group_notify(struct work_struct *work)
{
update_group_attrs(client);
update_port_infos(client);
+ setup_client_group_filter(client);
}
/* UMP FB change notification */
goto error;
}
+ setup_client_group_filter(client);
+
err = create_ump_endpoint_port(client);
if (err < 0)
goto error;
struct snd_seq_event *event,
int atomic, int hop)
{
+ if (dest->group_filter & (1U << dest_port->ump_group))
+ return 0; /* group filtered - skip the event */
if (event->type == SNDRV_SEQ_EVENT_SYSEX)
return cvt_sysex_to_ump(dest, dest_port, event, atomic, hop);
else if (snd_seq_client_is_midi2(dest))
struct snd_rawmidi_substream *subs;
list_for_each_entry(subs, &str->substreams, list) {
- snprintf(subs->name, sizeof(subs->name),
- "%s MIDI %d",
- bebob->card->shortname, subs->number + 1);
+ scnprintf(subs->name, sizeof(subs->name),
+ "%s MIDI %d",
+ bebob->card->shortname, subs->number + 1);
}
}
struct snd_rawmidi_substream *subs;
list_for_each_entry(subs, &str->substreams, list) {
- snprintf(subs->name, sizeof(subs->name),
- "%s MIDI %d", dice->card->shortname, subs->number + 1);
+ scnprintf(subs->name, sizeof(subs->name),
+ "%s MIDI %d", dice->card->shortname, subs->number + 1);
}
}
list_for_each_entry(subs, &str->substreams, list) {
if (!is_console) {
- snprintf(subs->name, sizeof(subs->name),
- "%s MIDI %d",
- dg00x->card->shortname,
- subs->number + 1);
+ scnprintf(subs->name, sizeof(subs->name),
+ "%s MIDI %d",
+ dg00x->card->shortname,
+ subs->number + 1);
} else {
- snprintf(subs->name, sizeof(subs->name),
- "%s control",
- dg00x->card->shortname);
+ scnprintf(subs->name, sizeof(subs->name),
+ "%s control",
+ dg00x->card->shortname);
}
}
}
struct snd_rawmidi_substream *substream;
list_for_each_entry(substream, &stream->substreams, list) {
- snprintf(substream->name, sizeof(substream->name),
- "%s MIDI %d", name, substream->number + 1);
+ scnprintf(substream->name, sizeof(substream->name),
+ "%s MIDI %d", name, substream->number + 1);
}
}
strcpy(efw->card->driver, "Fireworks");
strcpy(efw->card->shortname, hwinfo->model_name);
strcpy(efw->card->mixername, hwinfo->model_name);
- snprintf(efw->card->longname, sizeof(efw->card->longname),
- "%s %s v%s, GUID %08x%08x at %s, S%d",
- hwinfo->vendor_name, hwinfo->model_name, version,
- hwinfo->guid_hi, hwinfo->guid_lo,
- dev_name(&efw->unit->device), 100 << fw_dev->max_speed);
+ scnprintf(efw->card->longname, sizeof(efw->card->longname),
+ "%s %s v%s, GUID %08x%08x at %s, S%d",
+ hwinfo->vendor_name, hwinfo->model_name, version,
+ hwinfo->guid_hi, hwinfo->guid_lo,
+ dev_name(&efw->unit->device), 100 << fw_dev->max_speed);
if (hwinfo->flags & BIT(FLAG_RESP_ADDR_CHANGABLE))
efw->resp_addr_changable = true;
struct snd_rawmidi_substream *subs;
list_for_each_entry(subs, &str->substreams, list) {
- snprintf(subs->name, sizeof(subs->name),
- "%s MIDI %d", efw->card->shortname, subs->number + 1);
+ scnprintf(subs->name, sizeof(subs->name),
+ "%s MIDI %d", efw->card->shortname, subs->number + 1);
}
}
struct snd_rawmidi_substream *subs;
list_for_each_entry(subs, &str->substreams, list) {
- snprintf(subs->name, sizeof(subs->name),
- "%s MIDI %d", motu->card->shortname, subs->number + 1);
+ scnprintf(subs->name, sizeof(subs->name),
+ "%s MIDI %d", motu->card->shortname, subs->number + 1);
}
}
struct snd_rawmidi_substream *subs;
list_for_each_entry(subs, &str->substreams, list) {
- snprintf(subs->name, sizeof(subs->name),
- "%s MIDI %d",
- oxfw->card->shortname, subs->number + 1);
+ scnprintf(subs->name, sizeof(subs->name),
+ "%s MIDI %d",
+ oxfw->card->shortname, subs->number + 1);
}
}
strcpy(oxfw->card->mixername, m);
strcpy(oxfw->card->shortname, m);
- snprintf(oxfw->card->longname, sizeof(oxfw->card->longname),
- "%s %s (OXFW%x %04x), GUID %08x%08x at %s, S%d",
- v, m, firmware >> 20, firmware & 0xffff,
- fw_dev->config_rom[3], fw_dev->config_rom[4],
- dev_name(&oxfw->unit->device), 100 << fw_dev->max_speed);
+ scnprintf(oxfw->card->longname, sizeof(oxfw->card->longname),
+ "%s %s (OXFW%x %04x), GUID %08x%08x at %s, S%d",
+ v, m, firmware >> 20, firmware & 0xffff,
+ fw_dev->config_rom[3], fw_dev->config_rom[4],
+ dev_name(&oxfw->unit->device), 100 << fw_dev->max_speed);
end:
return err;
}
/* TODO: support virtual MIDI ports. */
if (subs->number < tscm->spec->midi_capture_ports) {
/* Hardware MIDI ports. */
- snprintf(subs->name, sizeof(subs->name),
- "%s MIDI %d",
- tscm->card->shortname, subs->number + 1);
+ scnprintf(subs->name, sizeof(subs->name),
+ "%s MIDI %d",
+ tscm->card->shortname, subs->number + 1);
}
}
list_for_each_entry(subs, &stream->substreams, list) {
if (subs->number < tscm->spec->midi_playback_ports) {
/* Hardware MIDI ports only. */
- snprintf(subs->name, sizeof(subs->name),
- "%s MIDI %d",
- tscm->card->shortname, subs->number + 1);
+ scnprintf(subs->name, sizeof(subs->name),
+ "%s MIDI %d",
+ tscm->card->shortname, subs->number + 1);
}
}
module_param_named(sdw_link_mask, ctrl_link_mask, int, 0444);
MODULE_PARM_DESC(sdw_link_mask, "Intel link mask (one bit per link)");
-static bool is_link_enabled(struct fwnode_handle *fw_node, int i)
+static bool is_link_enabled(struct fwnode_handle *fw_node, u8 idx)
{
struct fwnode_handle *link;
char name[32];
/* Find master handle */
snprintf(name, sizeof(name),
- "mipi-sdw-link-%d-subproperties", i);
+ "mipi-sdw-link-%hhu-subproperties", idx);
link = fwnode_get_named_child_node(fw_node, name);
if (!link)
sdw_intel_scan_controller(struct sdw_intel_acpi_info *info)
{
struct acpi_device *adev = acpi_fetch_acpi_dev(info->handle);
- int ret, i;
- u8 count;
+ u8 count, i;
+ int ret;
if (!adev)
return -EINVAL;
strscpy(card->shortname, chip->pcm->name, sizeof(card->shortname));
if (!thinkpad[n])
- snprintf(card->longname, sizeof(card->longname),
- "%s at 0x%lx, irq %d, dma %d",
- chip->pcm->name, chip->port, irq[n], dma1[n]);
+ scnprintf(card->longname, sizeof(card->longname),
+ "%s at 0x%lx, irq %d, dma %d",
+ chip->pcm->name, chip->port, irq[n], dma1[n]);
else
- snprintf(card->longname, sizeof(card->longname),
- "%s at 0x%lx, irq %d, dma %d [Thinkpad]",
- chip->pcm->name, chip->port, irq[n], dma1[n]);
+ scnprintf(card->longname, sizeof(card->longname),
+ "%s at 0x%lx, irq %d, dma %d [Thinkpad]",
+ chip->pcm->name, chip->port, irq[n], dma1[n]);
error = snd_card_register(card);
if (error < 0)
strscpy(card->shortname, chip->pcm->name, sizeof(card->shortname));
if (dma2[n] < 0)
- snprintf(card->longname, sizeof(card->longname),
- "%s at 0x%lx, irq %d, dma %d",
- chip->pcm->name, chip->port, irq[n], dma1[n]);
+ scnprintf(card->longname, sizeof(card->longname),
+ "%s at 0x%lx, irq %d, dma %d",
+ chip->pcm->name, chip->port, irq[n], dma1[n]);
else
- snprintf(card->longname, sizeof(card->longname),
- "%s at 0x%lx, irq %d, dma %d&%d",
- chip->pcm->name, chip->port, irq[n], dma1[n], dma2[n]);
+ scnprintf(card->longname, sizeof(card->longname),
+ "%s at 0x%lx, irq %d, dma %d&%d",
+ chip->pcm->name, chip->port, irq[n], dma1[n], dma2[n]);
error = snd_wss_mixer(chip);
if (error < 0)
strscpy(card->driver, chip->pcm->name, sizeof(card->driver));
strscpy(card->shortname, chip->pcm->name, sizeof(card->shortname));
if (dma2[dev] < 0)
- snprintf(card->longname, sizeof(card->longname),
- "%s at 0x%lx, irq %i, dma %i",
- chip->pcm->name, chip->port, irq[dev], dma1[dev]);
+ scnprintf(card->longname, sizeof(card->longname),
+ "%s at 0x%lx, irq %i, dma %i",
+ chip->pcm->name, chip->port, irq[dev], dma1[dev]);
else
- snprintf(card->longname, sizeof(card->longname),
- "%s at 0x%lx, irq %i, dma %i&%d",
- chip->pcm->name, chip->port, irq[dev], dma1[dev],
- dma2[dev]);
+ scnprintf(card->longname, sizeof(card->longname),
+ "%s at 0x%lx, irq %i, dma %i&%d",
+ chip->pcm->name, chip->port, irq[dev], dma1[dev],
+ dma2[dev]);
err = snd_wss_timer(chip, 0);
if (err < 0)
strscpy(card->driver, "ES1688", sizeof(card->driver));
strscpy(card->shortname, chip->pcm->name, sizeof(card->shortname));
- snprintf(card->longname, sizeof(card->longname),
- "%s at 0x%lx, irq %i, dma %i", chip->pcm->name, chip->port,
- chip->irq, chip->dma8);
+ scnprintf(card->longname, sizeof(card->longname),
+ "%s at 0x%lx, irq %i, dma %i", chip->pcm->name, chip->port,
+ chip->irq, chip->dma8);
if (fm_port[n] == SNDRV_AUTO_PORT)
fm_port[n] = port[n]; /* share the same port */
}
strcpy(card->driver, "miro");
- snprintf(card->longname, sizeof(card->longname),
- "%s: OPTi%s, %s at 0x%lx, irq %d, dma %d&%d",
- card->shortname, miro->name, codec->pcm->name,
- miro->wss_base + 4, miro->irq, miro->dma1, miro->dma2);
+ scnprintf(card->longname, sizeof(card->longname),
+ "%s: OPTi%s, %s at 0x%lx, irq %d, dma %d&%d",
+ card->shortname, miro->name, codec->pcm->name,
+ miro->wss_base + 4, miro->irq, miro->dma1, miro->dma2);
if (mpu_port <= 0 || mpu_port == SNDRV_AUTO_PORT)
rmidi = NULL;
strcpy(card->driver, chip->name);
sprintf(card->shortname, "OPTi %s", card->driver);
#if defined(CS4231) || defined(OPTi93X)
- snprintf(card->longname, sizeof(card->longname),
- "%s, %s at 0x%lx, irq %d, dma %d&%d",
- card->shortname, codec->pcm->name,
- chip->wss_base + 4, irq, dma1, xdma2);
+ scnprintf(card->longname, sizeof(card->longname),
+ "%s, %s at 0x%lx, irq %d, dma %d&%d",
+ card->shortname, codec->pcm->name,
+ chip->wss_base + 4, irq, dma1, xdma2);
#else
- snprintf(card->longname, sizeof(card->longname),
- "%s, %s at 0x%lx, irq %d, dma %d",
- card->shortname, codec->pcm->name, chip->wss_base + 4, irq,
- dma1);
+ scnprintf(card->longname, sizeof(card->longname),
+ "%s, %s at 0x%lx, irq %d, dma %d",
+ card->shortname, codec->pcm->name, chip->wss_base + 4, irq,
+ dma1);
#endif /* CS4231 || OPTi93X */
if (mpu_port <= 0 || mpu_port == SNDRV_AUTO_PORT)
char name[14];
int err;
- snprintf(name, sizeof(name), "sndscape.co%d", version);
+ scnprintf(name, sizeof(name), "sndscape.co%d", version);
err = request_firmware(&init_fw, name, card->dev);
if (err < 0) {
}
sprintf(card->shortname, "C-Media CMI%d", val);
if (cm->chip_version < 68)
- sprintf(modelstr, " (model %d)", cm->chip_version);
+ scnprintf(modelstr, sizeof(modelstr),
+ " (model %d)", cm->chip_version);
else
modelstr[0] = '\0';
- sprintf(card->longname, "%s%s at %#lx, irq %i",
- card->shortname, modelstr, cm->iobase, cm->irq);
+ scnprintf(card->longname, sizeof(card->longname),
+ "%s%s at %#lx, irq %i",
+ card->shortname, modelstr, cm->iobase, cm->irq);
if (cm->chip_version >= 39) {
val = snd_cmipci_read_b(cm, CM_REG_MPU_PCI + 1);
}
}
-static int __maybe_unused cs35l56_hda_runtime_suspend(struct device *dev)
+static int cs35l56_hda_runtime_suspend(struct device *dev)
{
struct cs35l56_hda *cs35l56 = dev_get_drvdata(dev);
return cs35l56_runtime_suspend_common(&cs35l56->base);
}
-static int __maybe_unused cs35l56_hda_runtime_resume(struct device *dev)
+static int cs35l56_hda_runtime_resume(struct device *dev)
{
struct cs35l56_hda *cs35l56 = dev_get_drvdata(dev);
int ret;
ucontrol->value.integer.value[0] = pos;
- return ret;
+ return 0;
}
static int cs35l56_hda_posture_put(struct snd_kcontrol *kcontrol,
sub = acpi_get_subsystem_id(ACPI_HANDLE(cs35l56->base.dev));
if (IS_ERR(sub)) {
- /* If no ACPI SUB, return 0 and fallback to legacy firmware path, otherwise fail */
- if (PTR_ERR(sub) == -ENODATA)
- return 0;
- else
- return PTR_ERR(sub);
+ dev_info(cs35l56->base.dev,
+ "Read ACPI _SUB failed(%ld): fallback to generic firmware\n",
+ PTR_ERR(sub));
+ } else {
+ cs35l56->system_name = sub;
}
- cs35l56->system_name = sub;
-
cs35l56->base.reset_gpio = devm_gpiod_get_index_optional(cs35l56->base.dev,
"reset",
cs35l56->index,
{
struct cs35l56_hda *cs35l56 = dev_get_drvdata(dev);
+ pm_runtime_dont_use_autosuspend(cs35l56->base.dev);
pm_runtime_get_sync(cs35l56->base.dev);
pm_runtime_disable(cs35l56->base.dev);
EXPORT_SYMBOL_NS_GPL(cs35l56_hda_remove, SND_HDA_SCODEC_CS35L56);
const struct dev_pm_ops cs35l56_hda_pm_ops = {
- SET_RUNTIME_PM_OPS(cs35l56_hda_runtime_suspend, cs35l56_hda_runtime_resume, NULL)
+ RUNTIME_PM_OPS(cs35l56_hda_runtime_suspend, cs35l56_hda_runtime_resume, NULL)
SYSTEM_SLEEP_PM_OPS(cs35l56_hda_system_suspend, cs35l56_hda_system_resume)
LATE_SYSTEM_SLEEP_PM_OPS(cs35l56_hda_system_suspend_late,
cs35l56_hda_system_resume_early)
return -ENOMEM;
cs35l56->base.dev = &clt->dev;
- cs35l56->base.can_hibernate = true;
cs35l56->base.regmap = devm_regmap_init_i2c(clt, &cs35l56_regmap_i2c);
if (IS_ERR(cs35l56->base.regmap)) {
ret = PTR_ERR(cs35l56->base.regmap);
const char *sfx, int cidx, unsigned long val)
{
char name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
- snprintf(name, sizeof(name), "%s %s %s", pfx, dir, sfx);
+ int len;
+
+ len = snprintf(name, sizeof(name), "%s %s %s", pfx, dir, sfx);
+ if (snd_BUG_ON(len >= sizeof(name)))
+ return -EINVAL;
if (!add_control(spec, type, name, cidx, val))
return -ENOMEM;
return 0;
SND_PCI_QUIRK(0x8086, 0x2068, "Intel NUC7i3BNB", 0),
/* https://bugzilla.kernel.org/show_bug.cgi?id=198611 */
SND_PCI_QUIRK(0x17aa, 0x2227, "Lenovo X1 Carbon 3rd Gen", 0),
+ SND_PCI_QUIRK(0x17aa, 0x316e, "Lenovo ThinkCentre M70q", 0),
/* https://bugzilla.redhat.com/show_bug.cgi?id=1689623 */
SND_PCI_QUIRK(0x17aa, 0x367b, "Lenovo IdeaCentre B550", 0),
/* https://bugzilla.redhat.com/show_bug.cgi?id=1572975 */
snd_hda_override_conn_list(codec, 0x17, ARRAY_SIZE(conn), conn);
spec->gen.preferred_dacs = preferred_pairs;
spec->gen.auto_mute_via_amp = 1;
- snd_hda_codec_write_cache(codec, 0x14, 0, AC_VERB_SET_PIN_WIDGET_CONTROL,
- 0x0); /* Make sure 0x14 was disable */
+ if (spec->gen.autocfg.speaker_pins[0] != 0x14) {
+ snd_hda_codec_write_cache(codec, 0x14, 0, AC_VERB_SET_PIN_WIDGET_CONTROL,
+ 0x0); /* Make sure 0x14 was disable */
+ }
}
SND_PCI_QUIRK(0x1043, 0x1d1f, "ASUS ROG Strix G17 2023 (G713PV)", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x1043, 0x1d42, "ASUS Zephyrus G14 2022", ALC289_FIXUP_ASUS_GA401),
SND_PCI_QUIRK(0x1043, 0x1d4e, "ASUS TM420", ALC256_FIXUP_ASUS_HPE),
- SND_PCI_QUIRK(0x1043, 0x1e02, "ASUS UX3402", ALC245_FIXUP_CS35L41_SPI_2),
+ SND_PCI_QUIRK(0x1043, 0x1e02, "ASUS UX3402ZA", ALC245_FIXUP_CS35L41_SPI_2),
+ SND_PCI_QUIRK(0x1043, 0x16a3, "ASUS UX3402VA", ALC245_FIXUP_CS35L41_SPI_2),
SND_PCI_QUIRK(0x1043, 0x1e11, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA502),
SND_PCI_QUIRK(0x1043, 0x1e12, "ASUS UM3402", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x1043, 0x1e51, "ASUS Zephyrus M15", ALC294_FIXUP_ASUS_GU502_PINS),
{0x17, 0x90170110},
{0x19, 0x03a11030},
{0x21, 0x03211020}),
+ SND_HDA_PIN_QUIRK(0x10ec0287, 0x17aa, "Lenovo", ALC287_FIXUP_THINKPAD_I2S_SPK,
+ {0x17, 0x90170110}, /* 0x231f with RTK I2S AMP */
+ {0x19, 0x04a11040},
+ {0x21, 0x04211020}),
SND_HDA_PIN_QUIRK(0x10ec0286, 0x1025, "Acer", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE,
{0x12, 0x90a60130},
{0x17, 0x90170110},
strcpy(card->driver, "RIPTIDE");
strcpy(card->shortname, "Riptide");
#ifdef SUPPORT_JOYSTICK
- snprintf(card->longname, sizeof(card->longname),
- "%s at 0x%lx, irq %i mpu 0x%x opl3 0x%x gameport 0x%x",
- card->shortname, chip->port, chip->irq, chip->mpuaddr,
- chip->opladdr, chip->gameaddr);
+ scnprintf(card->longname, sizeof(card->longname),
+ "%s at 0x%lx, irq %i mpu 0x%x opl3 0x%x gameport 0x%x",
+ card->shortname, chip->port, chip->irq, chip->mpuaddr,
+ chip->opladdr, chip->gameaddr);
#else
- snprintf(card->longname, sizeof(card->longname),
- "%s at 0x%lx, irq %i mpu 0x%x opl3 0x%x",
- card->shortname, chip->port, chip->irq, chip->mpuaddr,
- chip->opladdr);
+ scnprintf(card->longname, sizeof(card->longname),
+ "%s at 0x%lx, irq %i mpu 0x%x opl3 0x%x",
+ card->shortname, chip->port, chip->irq, chip->mpuaddr,
+ chip->opladdr);
#endif
snd_riptide_proc_init(chip);
err = snd_card_register(card);
.driver_data = &acp6x_card,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "82QF"),
+ }
+ },
+ {
+ .driver_data = &acp6x_card,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "82TL"),
}
},
.driver_data = &acp6x_card,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "82UG"),
+ }
+ },
+ {
+ .driver_data = &acp6x_card,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "82V2"),
}
},
{
.driver_data = &acp6x_card,
.matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Micro-Star International Co., Ltd."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Bravo 15 B7ED"),
+ }
+ },
+ {
+ .driver_data = &acp6x_card,
+ .matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Alienware"),
DMI_MATCH(DMI_PRODUCT_NAME, "Alienware m17 R5 AMD"),
}
if ((aw_bin->all_bin_parse_num != 1) ||
(aw_bin->header_info[0].bin_data_type != DATA_TYPE_REGISTER)) {
dev_err(aw_dev->dev, "bin num or type error");
+ ret = -EINVAL;
goto parse_bin_failed;
}
if (aw_bin->header_info[0].valid_data_len % 4) {
dev_err(aw_dev->dev, "bin data len get error!");
+ ret = -EINVAL;
goto parse_bin_failed;
}
return -ENOMEM;
cs35l56->base.dev = dev;
- cs35l56->base.can_hibernate = true;
i2c_set_clientdata(client, cs35l56);
cs35l56->base.regmap = devm_regmap_init_i2c(client, regmap_config);
flush_workqueue(cs35l56->dsp_wq);
destroy_workqueue(cs35l56->dsp_wq);
+ pm_runtime_dont_use_autosuspend(cs35l56->base.dev);
pm_runtime_suspend(cs35l56->base.dev);
pm_runtime_disable(cs35l56->base.dev);
switch (status) {
case SDW_SLAVE_ATTACHED:
dev_dbg(cs42l42->dev, "ATTACHED\n");
+
+ /*
+ * The SoundWire core can report stale ATTACH notifications
+ * if we hard-reset CS42L42 in probe() but it had already been
+ * enumerated. Reject the ATTACH if we haven't yet seen an
+ * UNATTACH report for the device being in reset.
+ */
+ if (cs42l42->sdw_waiting_first_unattach)
+ break;
+
/*
* Initialise codec, this only needs to be done once.
* When resuming from suspend, resume callback will handle re-init of codec,
break;
case SDW_SLAVE_UNATTACHED:
dev_dbg(cs42l42->dev, "UNATTACHED\n");
+
+ if (cs42l42->sdw_waiting_first_unattach) {
+ /*
+ * SoundWire core has seen that CS42L42 is not on
+ * the bus so release RESET and wait for ATTACH.
+ */
+ cs42l42->sdw_waiting_first_unattach = false;
+ gpiod_set_value_cansleep(cs42l42->reset_gpio, 1);
+ }
+
break;
default:
break;
if (cs42l42->reset_gpio) {
dev_dbg(cs42l42->dev, "Found reset GPIO\n");
- gpiod_set_value_cansleep(cs42l42->reset_gpio, 1);
+
+ /*
+ * ACPI can override the default GPIO state we requested
+ * so ensure that we start with RESET low.
+ */
+ gpiod_set_value_cansleep(cs42l42->reset_gpio, 0);
+
+ /* Ensure minimum reset pulse width */
+ usleep_range(10, 500);
+
+ /*
+ * On SoundWire keep the chip in reset until we get an UNATTACH
+ * notification from the SoundWire core. This acts as a
+ * synchronization point to reject stale ATTACH notifications
+ * if the chip was already enumerated before we reset it.
+ */
+ if (cs42l42->sdw_peripheral)
+ cs42l42->sdw_waiting_first_unattach = true;
+ else
+ gpiod_set_value_cansleep(cs42l42->reset_gpio, 1);
}
usleep_range(CS42L42_BOOT_TIME_US, CS42L42_BOOT_TIME_US * 2);
u8 stream_use;
bool hp_adc_up_pending;
bool suspended;
+ bool sdw_waiting_first_unattach;
bool init_done;
};
static int cs42l43_request_irq(struct cs42l43_codec *priv,
struct irq_domain *dom, const char * const name,
- unsigned int irq, irq_handler_t handler)
+ unsigned int irq, irq_handler_t handler,
+ unsigned long flags)
{
int ret;
dev_dbg(priv->dev, "Request IRQ %d for %s\n", ret, name);
- ret = devm_request_threaded_irq(priv->dev, ret, NULL, handler, IRQF_ONESHOT,
- name, priv);
+ ret = devm_request_threaded_irq(priv->dev, ret, NULL, handler,
+ IRQF_ONESHOT | flags, name, priv);
if (ret)
return dev_err_probe(priv->dev, ret, "Failed to request IRQ %s\n", name);
return 0;
}
- ret = cs42l43_request_irq(priv, dom, close_name, close_irq, handler);
+ ret = cs42l43_request_irq(priv, dom, close_name, close_irq, handler, IRQF_SHARED);
if (ret)
return ret;
- return cs42l43_request_irq(priv, dom, open_name, open_irq, handler);
+ return cs42l43_request_irq(priv, dom, open_name, open_irq, handler, IRQF_SHARED);
}
static int cs42l43_codec_probe(struct platform_device *pdev)
for (i = 0; i < ARRAY_SIZE(cs42l43_irqs); i++) {
ret = cs42l43_request_irq(priv, dom, cs42l43_irqs[i].name,
- cs42l43_irqs[i].irq, cs42l43_irqs[i].handler);
+ cs42l43_irqs[i].irq,
+ cs42l43_irqs[i].handler, 0);
if (ret)
goto err_pm;
}
struct rt5640_priv *rt5640 = data;
int delay = 0;
- if (rt5640->jd_src == RT5640_JD_SRC_HDA_HEADER) {
- cancel_delayed_work_sync(&rt5640->jack_work);
+ if (rt5640->jd_src == RT5640_JD_SRC_HDA_HEADER)
delay = 100;
- }
if (rt5640->jack)
- queue_delayed_work(system_long_wq, &rt5640->jack_work, delay);
+ mod_delayed_work(system_long_wq, &rt5640->jack_work, delay);
return IRQ_HANDLED;
}
if (jack_data && jack_data->use_platform_clock)
rt5640->use_platform_clock = jack_data->use_platform_clock;
- ret = devm_request_threaded_irq(component->dev, rt5640->irq,
- NULL, rt5640_irq,
- IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
- "rt5640", rt5640);
+ ret = request_irq(rt5640->irq, rt5640_irq,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ "rt5640", rt5640);
if (ret) {
dev_warn(component->dev, "Failed to request IRQ %d: %d\n", rt5640->irq, ret);
rt5640_disable_jack_detect(component);
rt5640->jack = jack;
- ret = devm_request_threaded_irq(component->dev, rt5640->irq,
- NULL, rt5640_irq, IRQF_TRIGGER_RISING | IRQF_ONESHOT,
- "rt5640", rt5640);
+ ret = request_irq(rt5640->irq, rt5640_irq,
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT, "rt5640", rt5640);
if (ret) {
dev_warn(component->dev, "Failed to request IRQ %d: %d\n", rt5640->irq, ret);
- rt5640->irq = -ENXIO;
+ rt5640->jack = NULL;
return;
}
+ rt5640->irq_requested = true;
/* sync initial jack state */
queue_delayed_work(system_long_wq, &rt5640->jack_work, 0);
{
struct rt5640_priv *rt5640 = snd_soc_component_get_drvdata(component);
- if (rt5640->irq) {
+ if (rt5640->jack) {
/* disable jack interrupts during system suspend */
disable_irq(rt5640->irq);
+ rt5640_cancel_work(rt5640);
}
- rt5640_cancel_work(rt5640);
snd_soc_component_force_bias_level(component, SND_SOC_BIAS_OFF);
rt5640_reset(component);
regcache_cache_only(rt5640->regmap, true);
regcache_cache_only(rt5640->regmap, false);
regcache_sync(rt5640->regmap);
- if (rt5640->irq)
- enable_irq(rt5640->irq);
-
if (rt5640->jack) {
if (rt5640->jd_src == RT5640_JD_SRC_HDA_HEADER) {
snd_soc_component_update_bits(component,
}
}
+ enable_irq(rt5640->irq);
queue_delayed_work(system_long_wq, &rt5640->jack_work, 0);
}
}
wm8960->regmap = devm_regmap_init_i2c(i2c, &wm8960_regmap);
- if (IS_ERR(wm8960->regmap))
- return PTR_ERR(wm8960->regmap);
+ if (IS_ERR(wm8960->regmap)) {
+ ret = PTR_ERR(wm8960->regmap);
+ goto bulk_disable;
+ }
if (pdata)
memcpy(&wm8960->pdata, pdata, sizeof(struct wm8960_data));
ret = i2c_master_recv(i2c, &val, sizeof(val));
if (ret >= 0) {
dev_err(&i2c->dev, "Not wm8960, wm8960 reg can not read by i2c\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto bulk_disable;
}
ret = wm8960_reset(wm8960->regmap);
if (ret != 0) {
dev_err(&i2c->dev, "Failed to issue reset\n");
- return ret;
+ goto bulk_disable;
}
if (wm8960->pdata.shared_lrclk) {
if (ret != 0) {
dev_err(&i2c->dev, "Failed to enable LRCM: %d\n",
ret);
- return ret;
+ goto bulk_disable;
}
}
ret = devm_snd_soc_register_component(&i2c->dev,
&soc_component_dev_wm8960, &wm8960_dai, 1);
+ if (ret)
+ goto bulk_disable;
+ return 0;
+
+bulk_disable:
+ regulator_bulk_disable(ARRAY_SIZE(wm8960->supplies), wm8960->supplies);
return ret;
}
struct wm_coeff_ctl *ctl;
int ret;
+ mutex_lock(&dsp->cs_dsp.pwr_lock);
ret = cs_dsp_coeff_write_ctrl(cs_ctl, 0, buf, len);
+ mutex_unlock(&dsp->cs_dsp.pwr_lock);
+
if (ret < 0)
return ret;
int wm_adsp_read_ctl(struct wm_adsp *dsp, const char *name, int type,
unsigned int alg, void *buf, size_t len)
{
- return cs_dsp_coeff_read_ctrl(cs_dsp_get_ctl(&dsp->cs_dsp, name, type, alg),
- 0, buf, len);
+ int ret;
+
+ mutex_lock(&dsp->cs_dsp.pwr_lock);
+ ret = cs_dsp_coeff_read_ctrl(cs_dsp_get_ctl(&dsp->cs_dsp, name, type, alg),
+ 0, buf, len);
+ mutex_unlock(&dsp->cs_dsp.pwr_lock);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(wm_adsp_read_ctl);
if (IS_ERR(priv->cpu_mclk)) {
ret = PTR_ERR(priv->cpu_mclk);
dev_err(&cpu_pdev->dev, "failed to get DAI mclk1: %d\n", ret);
- return -EINVAL;
+ return ret;
}
priv->audmix_pdev = audmix_pdev;
static struct snd_pcm_hardware imx_rpmsg_pcm_hardware = {
.info = SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_BLOCK_TRANSFER |
+ SNDRV_PCM_INFO_BATCH |
SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_MMAP_VALID |
SNDRV_PCM_INFO_NO_PERIOD_WAKEUP |
SND_SOC_DAIFMT_NB_NF |
SND_SOC_DAIFMT_CBC_CFC;
+ /*
+ * i.MX rpmsg sound cards work on codec slave mode. MCLK will be
+ * disabled by CPU DAI driver in hw_free(). Some codec requires MCLK
+ * present at power up/down sequence. So need to set ignore_pmdown_time
+ * to power down codec immediately before MCLK is turned off.
+ */
+ data->dai.ignore_pmdown_time = 1;
+
/* Optional codec node */
ret = of_parse_phandle_with_fixed_args(np, "audio-codec", 0, 0, &args);
if (ret) {
return -ENOMEM;
dl[i].codecs->name = devm_kstrdup(dev, cname, GFP_KERNEL);
+ if (!dl[i].codecs->name)
+ return -ENOMEM;
+
dl[i].codecs->dai_name = pcm->name;
dl[i].num_codecs = 1;
dl[i].num_cpus = 1;
return 0;
}
-static int axg_spdifin_startup(struct snd_pcm_substream *substream,
- struct snd_soc_dai *dai)
-{
- struct axg_spdifin *priv = snd_soc_dai_get_drvdata(dai);
- int ret;
-
- ret = clk_prepare_enable(priv->refclk);
- if (ret) {
- dev_err(dai->dev,
- "failed to enable spdifin reference clock\n");
- return ret;
- }
-
- regmap_update_bits(priv->map, SPDIFIN_CTRL0, SPDIFIN_CTRL0_EN,
- SPDIFIN_CTRL0_EN);
-
- return 0;
-}
-
-static void axg_spdifin_shutdown(struct snd_pcm_substream *substream,
- struct snd_soc_dai *dai)
-{
- struct axg_spdifin *priv = snd_soc_dai_get_drvdata(dai);
-
- regmap_update_bits(priv->map, SPDIFIN_CTRL0, SPDIFIN_CTRL0_EN, 0);
- clk_disable_unprepare(priv->refclk);
-}
-
static void axg_spdifin_write_mode_param(struct regmap *map, int mode,
unsigned int val,
unsigned int num_per_reg,
ret = axg_spdifin_sample_mode_config(dai, priv);
if (ret) {
dev_err(dai->dev, "mode configuration failed\n");
- clk_disable_unprepare(priv->pclk);
- return ret;
+ goto pclk_err;
}
+ ret = clk_prepare_enable(priv->refclk);
+ if (ret) {
+ dev_err(dai->dev,
+ "failed to enable spdifin reference clock\n");
+ goto pclk_err;
+ }
+
+ regmap_update_bits(priv->map, SPDIFIN_CTRL0, SPDIFIN_CTRL0_EN,
+ SPDIFIN_CTRL0_EN);
+
return 0;
+
+pclk_err:
+ clk_disable_unprepare(priv->pclk);
+ return ret;
}
static int axg_spdifin_dai_remove(struct snd_soc_dai *dai)
{
struct axg_spdifin *priv = snd_soc_dai_get_drvdata(dai);
+ regmap_update_bits(priv->map, SPDIFIN_CTRL0, SPDIFIN_CTRL0_EN, 0);
+ clk_disable_unprepare(priv->refclk);
clk_disable_unprepare(priv->pclk);
return 0;
}
.probe = axg_spdifin_dai_probe,
.remove = axg_spdifin_dai_remove,
.prepare = axg_spdifin_prepare,
- .startup = axg_spdifin_startup,
- .shutdown = axg_spdifin_shutdown,
};
static int axg_spdifin_iec958_info(struct snd_kcontrol *kcontrol,
if (i >= RSND_MAX_COMPONENT) {
dev_info(dev, "reach to max component\n");
of_node_put(node);
+ of_node_put(ports);
break;
}
}
{
struct snd_soc_dai *cpu_dai;
struct snd_soc_dai *codec_dai;
+ struct snd_pcm_hw_params tmp_params;
int i, ret = 0;
snd_soc_dpcm_mutex_assert_held(rtd);
goto out;
for_each_rtd_codec_dais(rtd, i, codec_dai) {
- struct snd_pcm_hw_params codec_params;
unsigned int tdm_mask = snd_soc_dai_tdm_mask_get(codec_dai, substream->stream);
/*
continue;
/* copy params for each codec */
- codec_params = *params;
+ tmp_params = *params;
/* fixup params based on TDM slot masks */
if (tdm_mask)
- soc_pcm_codec_params_fixup(&codec_params, tdm_mask);
+ soc_pcm_codec_params_fixup(&tmp_params, tdm_mask);
ret = snd_soc_dai_hw_params(codec_dai, substream,
- &codec_params);
+ &tmp_params);
if(ret < 0)
goto out;
- soc_pcm_set_dai_params(codec_dai, &codec_params);
- snd_soc_dapm_update_dai(substream, &codec_params, codec_dai);
+ soc_pcm_set_dai_params(codec_dai, &tmp_params);
+ snd_soc_dapm_update_dai(substream, &tmp_params, codec_dai);
}
for_each_rtd_cpu_dais(rtd, i, cpu_dai) {
- struct snd_pcm_hw_params cpu_params;
unsigned int ch_mask = 0;
int j;
continue;
/* copy params for each cpu */
- cpu_params = *params;
+ tmp_params = *params;
if (!rtd->dai_link->codec_ch_maps)
goto hw_params;
/* fixup cpu channel number */
if (ch_mask)
- soc_pcm_codec_params_fixup(&cpu_params, ch_mask);
+ soc_pcm_codec_params_fixup(&tmp_params, ch_mask);
hw_params:
- ret = snd_soc_dai_hw_params(cpu_dai, substream, &cpu_params);
+ ret = snd_soc_dai_hw_params(cpu_dai, substream, &tmp_params);
if (ret < 0)
goto out;
/* store the parameters for each DAI */
- soc_pcm_set_dai_params(cpu_dai, &cpu_params);
- snd_soc_dapm_update_dai(substream, &cpu_params, cpu_dai);
+ soc_pcm_set_dai_params(cpu_dai, &tmp_params);
+ snd_soc_dapm_update_dai(substream, &tmp_params, cpu_dai);
}
ret = snd_soc_pcm_component_hw_params(substream, params);
return 1;
return 0;
}
+EXPORT_SYMBOL_GPL(snd_soc_dai_is_dummy);
int snd_soc_component_is_dummy(struct snd_soc_component *component)
{
snd_sof_ipc_free(sdev);
snd_sof_free_debug(sdev);
snd_sof_remove(sdev);
+ sof_ops_free(sdev);
}
- sof_ops_free(sdev);
-
/* release firmware */
snd_sof_fw_unload(sdev);
/* step 3: wait for IPC DONE bit from ROM */
ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR, chip->ipc_ack, status,
((status & chip->ipc_ack_mask) == chip->ipc_ack_mask),
- HDA_DSP_REG_POLL_INTERVAL_US, MTL_DSP_PURGE_TIMEOUT_US);
+ HDA_DSP_REG_POLL_INTERVAL_US, HDA_DSP_INIT_TIMEOUT_US);
if (ret < 0) {
if (hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS)
dev_err(sdev->dev, "timeout waiting for purge IPC done\n");
#define MTL_DSP_IRQSTS_IPC BIT(0)
#define MTL_DSP_IRQSTS_SDW BIT(6)
-#define MTL_DSP_PURGE_TIMEOUT_US 20000000 /* 20s */
#define MTL_DSP_REG_POLL_INTERVAL_US 10 /* 10 us */
/* Memory windows */
ret = sof_update_ipc_object(scomp, available_fmt,
SOF_AUDIO_FMT_NUM_TOKENS, swidget->tuples,
- swidget->num_tuples, sizeof(available_fmt), 1);
+ swidget->num_tuples, sizeof(*available_fmt), 1);
if (ret) {
dev_err(scomp->dev, "Failed to parse audio format token count\n");
return ret;
sof_widget_free_unlocked(sdev, swidget);
use_count_decremented = true;
core_put:
- snd_sof_dsp_core_put(sdev, swidget->core);
+ if (!use_count_decremented)
+ snd_sof_dsp_core_put(sdev, swidget->core);
pipe_widget_free:
if (swidget->id != snd_soc_dapm_scheduler)
sof_widget_free_unlocked(sdev, swidget->spipe->pipe_widget);
#include <linux/platform_device.h>
#include <sound/graph_card.h>
#include <sound/pcm_params.h>
+#include <sound/soc-dai.h>
#define MAX_PLLA_OUT0_DIV 128
unsigned int plla_out0_rates[NUM_RATE_TYPE];
};
+static bool need_clk_update(struct snd_soc_dai *dai)
+{
+ if (snd_soc_dai_is_dummy(dai) ||
+ !dai->driver->ops ||
+ !dai->driver->name)
+ return false;
+
+ if (strstr(dai->driver->name, "I2S") ||
+ strstr(dai->driver->name, "DMIC") ||
+ strstr(dai->driver->name, "DSPK"))
+ return true;
+
+ return false;
+}
+
/* Setup PLL clock as per the given sample rate */
static int tegra_audio_graph_update_pll(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
int err;
- /*
- * This gets called for each DAI link (FE or BE) when DPCM is used.
- * We may not want to update PLLA rate for each call. So PLLA update
- * must be restricted to external I/O links (I2S, DMIC or DSPK) since
- * they actually depend on it. I/O modules update their clocks in
- * hw_param() of their respective component driver and PLLA rate
- * update here helps them to derive appropriate rates.
- *
- * TODO: When more HW accelerators get added (like sample rate
- * converter, volume gain controller etc., which don't really
- * depend on PLLA) we need a better way to filter here.
- */
- if (cpu_dai->driver->ops && rtd->dai_link->no_pcm) {
+ if (need_clk_update(cpu_dai)) {
err = tegra_audio_graph_update_pll(substream, params);
if (err)
return err;
}
usb_make_path(usb_dev, usbpath, sizeof(usbpath));
- snprintf(card->longname, sizeof(card->longname), "%s %s (%s)",
+ scnprintf(card->longname, sizeof(card->longname), "%s %s (%s)",
cdev->vendor_name, cdev->product_name, usbpath);
setup_card(cdev);
struct uac_clock_source_descriptor *hdr = _ftr;
struct usb_mixer_elem_info *cval;
struct snd_kcontrol *kctl;
- char name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
int ret;
if (state->mixer->protocol != UAC_VERSION_2)
kctl->private_free = snd_usb_mixer_elem_free;
ret = snd_usb_copy_string_desc(state->chip, hdr->iClockSource,
- name, sizeof(name));
+ kctl->id.name, sizeof(kctl->id.name));
if (ret > 0)
- snprintf(kctl->id.name, sizeof(kctl->id.name),
- "%s Validity", name);
+ append_ctl_name(kctl, " Validity");
else
snprintf(kctl->id.name, sizeof(kctl->id.name),
"Clock Source %d Validity", hdr->bClockID);
/* Add input phantom controls */
if (info->inputs_per_phantom == 1) {
for (i = 0; i < info->phantom_count; i++) {
- snprintf(s, sizeof(s), fmt, i + 1,
- "Phantom Power", "Switch");
+ scnprintf(s, sizeof(s), fmt, i + 1,
+ "Phantom Power", "Switch");
err = scarlett2_add_new_ctl(
mixer, &scarlett2_phantom_ctl,
i, 1, s, &private->phantom_ctls[i]);
int from = i * info->inputs_per_phantom + 1;
int to = (i + 1) * info->inputs_per_phantom;
- snprintf(s, sizeof(s), fmt2, from, to,
- "Phantom Power", "Switch");
+ scnprintf(s, sizeof(s), fmt2, from, to,
+ "Phantom Power", "Switch");
err = scarlett2_add_new_ctl(
mixer, &scarlett2_phantom_ctl,
i, 1, s, &private->phantom_ctls[i]);
*stream_cnt = 0;
num_devices = 0;
do {
- snprintf(node, sizeof(node), "%d", num_devices);
+ scnprintf(node, sizeof(node), "%d", num_devices);
if (!xenbus_exists(XBT_NIL, xb_dev->nodename, node))
break;
#define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */
#define PRED_CMD_IBPB BIT(0) /* Indirect Branch Prediction Barrier */
+#define PRED_CMD_SBPB BIT(7) /* Selective Branch Prediction Barrier */
#define MSR_PPIN_CTL 0x0000004e
#define MSR_PPIN 0x0000004f
* Not susceptible to Post-Barrier
* Return Stack Buffer Predictions.
*/
+#define ARCH_CAP_GDS_CTRL BIT(25) /*
+ * CPU is vulnerable to Gather
+ * Data Sampling (GDS) and
+ * has controls for mitigation.
+ */
+#define ARCH_CAP_GDS_NO BIT(26) /*
+ * CPU is not vulnerable to Gather
+ * Data Sampling (GDS).
+ */
#define ARCH_CAP_XAPIC_DISABLE BIT(21) /*
* IA32_XAPIC_DISABLE_STATUS MSR
#define RNGDS_MITG_DIS BIT(0) /* SRBDS support */
#define RTM_ALLOW BIT(1) /* TSX development mode */
#define FB_CLEAR_DIS BIT(3) /* CPU Fill buffer clear disable */
+#define GDS_MITG_DIS BIT(4) /* Disable GDS mitigation */
+#define GDS_MITG_LOCKED BIT(5) /* GDS mitigation locked */
#define MSR_IA32_SYSENTER_CS 0x00000174
#define MSR_IA32_SYSENTER_ESP 0x00000175
____BTF_ID(symbol)
#define __ID(prefix) \
- __PASTE(prefix, __COUNTER__)
+ __PASTE(__PASTE(prefix, __COUNTER__), __LINE__)
/*
* The BTF_ID defines unique symbol for each ID pointing
#define PHYS_ADDR_MAX (~(phys_addr_t)0)
-#define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (typeof(x))(a) - 1)
-#define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask))
#define ALIGN(x, a) __ALIGN_KERNEL((x), (a))
#define ALIGN_DOWN(x, a) __ALIGN_KERNEL((x) - ((a) - 1), (a))
return __va(address);
}
-void reserve_bootmem_region(phys_addr_t start, phys_addr_t end);
+void reserve_bootmem_region(phys_addr_t start, phys_addr_t end, int nid);
static inline void totalram_pages_inc(void)
{
#ifndef _TOOLS_INCLUDE_LINUX_SEQ_FILE_H
#define _TOOLS_INCLUDE_LINUX_SEQ_FILE_H
+struct seq_file;
+
#endif /* _TOOLS_INCLUDE_LINUX_SEQ_FILE_H */
#define __NR_cachestat 451
__SYSCALL(__NR_cachestat, sys_cachestat)
+#define __NR_fchmodat2 452
+__SYSCALL(__NR_fchmodat2, sys_fchmodat2)
+
#undef __NR_syscalls
-#define __NR_syscalls 452
+#define __NR_syscalls 453
/*
* 32 bit systems traditionally used different
* Bitfield of supported PRIME sharing capabilities. See &DRM_PRIME_CAP_IMPORT
* and &DRM_PRIME_CAP_EXPORT.
*
- * PRIME buffers are exposed as dma-buf file descriptors. See
- * Documentation/gpu/drm-mm.rst, section "PRIME Buffer Sharing".
+ * Starting from kernel version 6.6, both &DRM_PRIME_CAP_IMPORT and
+ * &DRM_PRIME_CAP_EXPORT are always advertised.
+ *
+ * PRIME buffers are exposed as dma-buf file descriptors.
+ * See :ref:`prime_buffer_sharing`.
*/
#define DRM_CAP_PRIME 0x5
/**
*
* If this bit is set in &DRM_CAP_PRIME, the driver supports importing PRIME
* buffers via the &DRM_IOCTL_PRIME_FD_TO_HANDLE ioctl.
+ *
+ * Starting from kernel version 6.6, this bit is always set in &DRM_CAP_PRIME.
*/
#define DRM_PRIME_CAP_IMPORT 0x1
/**
*
* If this bit is set in &DRM_CAP_PRIME, the driver supports exporting PRIME
* buffers via the &DRM_IOCTL_PRIME_HANDLE_TO_FD ioctl.
+ *
+ * Starting from kernel version 6.6, this bit is always set in &DRM_CAP_PRIME.
*/
#define DRM_PRIME_CAP_EXPORT 0x2
/**
/**
* DRM_CAP_SYNCOBJ
*
- * If set to 1, the driver supports sync objects. See
- * Documentation/gpu/drm-mm.rst, section "DRM Sync Objects".
+ * If set to 1, the driver supports sync objects. See :ref:`drm_sync_objects`.
*/
#define DRM_CAP_SYNCOBJ 0x13
/**
* DRM_CAP_SYNCOBJ_TIMELINE
*
* If set to 1, the driver supports timeline operations on sync objects. See
- * Documentation/gpu/drm-mm.rst, section "DRM Sync Objects".
+ * :ref:`drm_sync_objects`.
*/
#define DRM_CAP_SYNCOBJ_TIMELINE 0x14
__u32 pad;
};
+/**
+ * struct drm_syncobj_eventfd
+ * @handle: syncobj handle.
+ * @flags: Zero to wait for the point to be signalled, or
+ * &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE to wait for a fence to be
+ * available for the point.
+ * @point: syncobj timeline point (set to zero for binary syncobjs).
+ * @fd: Existing eventfd to sent events to.
+ * @pad: Must be zero.
+ *
+ * Register an eventfd to be signalled by a syncobj. The eventfd counter will
+ * be incremented by one.
+ */
+struct drm_syncobj_eventfd {
+ __u32 handle;
+ __u32 flags;
+ __u64 point;
+ __s32 fd;
+ __u32 pad;
+};
+
struct drm_syncobj_array {
__u64 handles;
*/
#define DRM_IOCTL_MODE_GETFB2 DRM_IOWR(0xCE, struct drm_mode_fb_cmd2)
+#define DRM_IOCTL_SYNCOBJ_EVENTFD DRM_IOWR(0xCF, struct drm_syncobj_eventfd)
+
/*
* Device specific ioctls should only be in their respective headers
* The device specific ioctl range is from 0x40 to 0x9f.
#define DRM_COMMAND_BASE 0x40
#define DRM_COMMAND_END 0xA0
-/*
- * Header for events written back to userspace on the drm fd. The
- * type defines the type of event, the length specifies the total
- * length of the event (including the header), and user_data is
- * typically a 64 bit value passed with the ioctl that triggered the
- * event. A read on the drm fd will always only return complete
- * events, that is, if for example the read buffer is 100 bytes, and
- * there are two 64 byte events pending, only one will be returned.
+/**
+ * struct drm_event - Header for DRM events
+ * @type: event type.
+ * @length: total number of payload bytes (including header).
*
- * Event types 0 - 0x7fffffff are generic drm events, 0x80000000 and
- * up are chipset specific.
+ * This struct is a header for events written back to user-space on the DRM FD.
+ * A read on the DRM FD will always only return complete events: e.g. if the
+ * read buffer is 100 bytes large and there are two 64 byte events pending,
+ * only one will be returned.
+ *
+ * Event types 0 - 0x7fffffff are generic DRM events, 0x80000000 and
+ * up are chipset specific. Generic DRM events include &DRM_EVENT_VBLANK,
+ * &DRM_EVENT_FLIP_COMPLETE and &DRM_EVENT_CRTC_SEQUENCE.
*/
struct drm_event {
__u32 type;
__u32 length;
};
+/**
+ * DRM_EVENT_VBLANK - vertical blanking event
+ *
+ * This event is sent in response to &DRM_IOCTL_WAIT_VBLANK with the
+ * &_DRM_VBLANK_EVENT flag set.
+ *
+ * The event payload is a struct drm_event_vblank.
+ */
#define DRM_EVENT_VBLANK 0x01
+/**
+ * DRM_EVENT_FLIP_COMPLETE - page-flip completion event
+ *
+ * This event is sent in response to an atomic commit or legacy page-flip with
+ * the &DRM_MODE_PAGE_FLIP_EVENT flag set.
+ *
+ * The event payload is a struct drm_event_vblank.
+ */
#define DRM_EVENT_FLIP_COMPLETE 0x02
+/**
+ * DRM_EVENT_CRTC_SEQUENCE - CRTC sequence event
+ *
+ * This event is sent in response to &DRM_IOCTL_CRTC_QUEUE_SEQUENCE.
+ *
+ * The event payload is a struct drm_event_crtc_sequence.
+ */
#define DRM_EVENT_CRTC_SEQUENCE 0x03
struct drm_event_vblank {
* performed again, if the helper is used in combination with
* direct packet access.
* Return
- * 0 on success, or a negative error in case of failure.
+ * 0 on success, or a negative error in case of failure. Positive
+ * error indicates a potential drop or congestion in the target
+ * device. The particular positive error codes are not defined.
*
* u64 bpf_get_current_pid_tgid(void)
* Description
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _UAPI_LINUX_SECCOMP_H
+#define _UAPI_LINUX_SECCOMP_H
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+
+
+/* Valid values for seccomp.mode and prctl(PR_SET_SECCOMP, <mode>) */
+#define SECCOMP_MODE_DISABLED 0 /* seccomp is not in use. */
+#define SECCOMP_MODE_STRICT 1 /* uses hard-coded filter. */
+#define SECCOMP_MODE_FILTER 2 /* uses user-supplied filter. */
+
+/* Valid operations for seccomp syscall. */
+#define SECCOMP_SET_MODE_STRICT 0
+#define SECCOMP_SET_MODE_FILTER 1
+#define SECCOMP_GET_ACTION_AVAIL 2
+#define SECCOMP_GET_NOTIF_SIZES 3
+
+/* Valid flags for SECCOMP_SET_MODE_FILTER */
+#define SECCOMP_FILTER_FLAG_TSYNC (1UL << 0)
+#define SECCOMP_FILTER_FLAG_LOG (1UL << 1)
+#define SECCOMP_FILTER_FLAG_SPEC_ALLOW (1UL << 2)
+#define SECCOMP_FILTER_FLAG_NEW_LISTENER (1UL << 3)
+#define SECCOMP_FILTER_FLAG_TSYNC_ESRCH (1UL << 4)
+/* Received notifications wait in killable state (only respond to fatal signals) */
+#define SECCOMP_FILTER_FLAG_WAIT_KILLABLE_RECV (1UL << 5)
+
+/*
+ * All BPF programs must return a 32-bit value.
+ * The bottom 16-bits are for optional return data.
+ * The upper 16-bits are ordered from least permissive values to most,
+ * as a signed value (so 0x8000000 is negative).
+ *
+ * The ordering ensures that a min_t() over composed return values always
+ * selects the least permissive choice.
+ */
+#define SECCOMP_RET_KILL_PROCESS 0x80000000U /* kill the process */
+#define SECCOMP_RET_KILL_THREAD 0x00000000U /* kill the thread */
+#define SECCOMP_RET_KILL SECCOMP_RET_KILL_THREAD
+#define SECCOMP_RET_TRAP 0x00030000U /* disallow and force a SIGSYS */
+#define SECCOMP_RET_ERRNO 0x00050000U /* returns an errno */
+#define SECCOMP_RET_USER_NOTIF 0x7fc00000U /* notifies userspace */
+#define SECCOMP_RET_TRACE 0x7ff00000U /* pass to a tracer or disallow */
+#define SECCOMP_RET_LOG 0x7ffc0000U /* allow after logging */
+#define SECCOMP_RET_ALLOW 0x7fff0000U /* allow */
+
+/* Masks for the return value sections. */
+#define SECCOMP_RET_ACTION_FULL 0xffff0000U
+#define SECCOMP_RET_ACTION 0x7fff0000U
+#define SECCOMP_RET_DATA 0x0000ffffU
+
+/**
+ * struct seccomp_data - the format the BPF program executes over.
+ * @nr: the system call number
+ * @arch: indicates system call convention as an AUDIT_ARCH_* value
+ * as defined in <linux/audit.h>.
+ * @instruction_pointer: at the time of the system call.
+ * @args: up to 6 system call arguments always stored as 64-bit values
+ * regardless of the architecture.
+ */
+struct seccomp_data {
+ int nr;
+ __u32 arch;
+ __u64 instruction_pointer;
+ __u64 args[6];
+};
+
+struct seccomp_notif_sizes {
+ __u16 seccomp_notif;
+ __u16 seccomp_notif_resp;
+ __u16 seccomp_data;
+};
+
+struct seccomp_notif {
+ __u64 id;
+ __u32 pid;
+ __u32 flags;
+ struct seccomp_data data;
+};
+
+/*
+ * Valid flags for struct seccomp_notif_resp
+ *
+ * Note, the SECCOMP_USER_NOTIF_FLAG_CONTINUE flag must be used with caution!
+ * If set by the process supervising the syscalls of another process the
+ * syscall will continue. This is problematic because of an inherent TOCTOU.
+ * An attacker can exploit the time while the supervised process is waiting on
+ * a response from the supervising process to rewrite syscall arguments which
+ * are passed as pointers of the intercepted syscall.
+ * It should be absolutely clear that this means that the seccomp notifier
+ * _cannot_ be used to implement a security policy! It should only ever be used
+ * in scenarios where a more privileged process supervises the syscalls of a
+ * lesser privileged process to get around kernel-enforced security
+ * restrictions when the privileged process deems this safe. In other words,
+ * in order to continue a syscall the supervising process should be sure that
+ * another security mechanism or the kernel itself will sufficiently block
+ * syscalls if arguments are rewritten to something unsafe.
+ *
+ * Similar precautions should be applied when stacking SECCOMP_RET_USER_NOTIF
+ * or SECCOMP_RET_TRACE. For SECCOMP_RET_USER_NOTIF filters acting on the
+ * same syscall, the most recently added filter takes precedence. This means
+ * that the new SECCOMP_RET_USER_NOTIF filter can override any
+ * SECCOMP_IOCTL_NOTIF_SEND from earlier filters, essentially allowing all
+ * such filtered syscalls to be executed by sending the response
+ * SECCOMP_USER_NOTIF_FLAG_CONTINUE. Note that SECCOMP_RET_TRACE can equally
+ * be overriden by SECCOMP_USER_NOTIF_FLAG_CONTINUE.
+ */
+#define SECCOMP_USER_NOTIF_FLAG_CONTINUE (1UL << 0)
+
+struct seccomp_notif_resp {
+ __u64 id;
+ __s64 val;
+ __s32 error;
+ __u32 flags;
+};
+
+#define SECCOMP_USER_NOTIF_FD_SYNC_WAKE_UP (1UL << 0)
+
+/* valid flags for seccomp_notif_addfd */
+#define SECCOMP_ADDFD_FLAG_SETFD (1UL << 0) /* Specify remote fd */
+#define SECCOMP_ADDFD_FLAG_SEND (1UL << 1) /* Addfd and return it, atomically */
+
+/**
+ * struct seccomp_notif_addfd
+ * @id: The ID of the seccomp notification
+ * @flags: SECCOMP_ADDFD_FLAG_*
+ * @srcfd: The local fd number
+ * @newfd: Optional remote FD number if SETFD option is set, otherwise 0.
+ * @newfd_flags: The O_* flags the remote FD should have applied
+ */
+struct seccomp_notif_addfd {
+ __u64 id;
+ __u32 flags;
+ __u32 srcfd;
+ __u32 newfd;
+ __u32 newfd_flags;
+};
+
+#define SECCOMP_IOC_MAGIC '!'
+#define SECCOMP_IO(nr) _IO(SECCOMP_IOC_MAGIC, nr)
+#define SECCOMP_IOR(nr, type) _IOR(SECCOMP_IOC_MAGIC, nr, type)
+#define SECCOMP_IOW(nr, type) _IOW(SECCOMP_IOC_MAGIC, nr, type)
+#define SECCOMP_IOWR(nr, type) _IOWR(SECCOMP_IOC_MAGIC, nr, type)
+
+/* Flags for seccomp notification fd ioctl. */
+#define SECCOMP_IOCTL_NOTIF_RECV SECCOMP_IOWR(0, struct seccomp_notif)
+#define SECCOMP_IOCTL_NOTIF_SEND SECCOMP_IOWR(1, \
+ struct seccomp_notif_resp)
+#define SECCOMP_IOCTL_NOTIF_ID_VALID SECCOMP_IOW(2, __u64)
+/* On success, the return value is the remote process's added fd number */
+#define SECCOMP_IOCTL_NOTIF_ADDFD SECCOMP_IOW(3, \
+ struct seccomp_notif_addfd)
+
+#define SECCOMP_IOCTL_NOTIF_SET_FLAGS SECCOMP_IOW(4, __u64)
+
+#endif /* _UAPI_LINUX_SECCOMP_H */
continue;
}
- if (insn_func(dest) && insn_func(dest) == insn_func(insn)) {
+ if (insn_func(dest) && insn_func(insn) &&
+ insn_func(dest)->pfunc == insn_func(insn)->pfunc) {
/*
* Anything from->to self is either _THIS_IP_ or
* IRET-to-self.
449 n64 futex_waitv sys_futex_waitv
450 common set_mempolicy_home_node sys_set_mempolicy_home_node
451 n64 cachestat sys_cachestat
+452 n64 fchmodat2 sys_fchmodat2
449 common futex_waitv sys_futex_waitv
450 nospu set_mempolicy_home_node sys_set_mempolicy_home_node
451 common cachestat sys_cachestat
+452 common fchmodat2 sys_fchmodat2
449 common futex_waitv sys_futex_waitv sys_futex_waitv
450 common set_mempolicy_home_node sys_set_mempolicy_home_node sys_set_mempolicy_home_node
451 common cachestat sys_cachestat sys_cachestat
+452 common fchmodat2 sys_fchmodat2 sys_fchmodat2
449 common futex_waitv sys_futex_waitv
450 common set_mempolicy_home_node sys_set_mempolicy_home_node
451 common cachestat sys_cachestat
+452 common fchmodat2 sys_fchmodat2
+453 64 map_shadow_stack sys_map_shadow_stack
#
# Due to a historical design error, certain syscalls are numbered differently
#include <sys/syscall.h>
#include <sys/ioctl.h>
#include <linux/time64.h>
-#include <linux/seccomp.h>
+#include <uapi/linux/seccomp.h>
#include <sys/prctl.h>
#include <unistd.h>
"include/uapi/linux/perf_event.h"
"include/uapi/linux/prctl.h"
"include/uapi/linux/sched.h"
+ "include/uapi/linux/seccomp.h"
"include/uapi/linux/stat.h"
"include/uapi/linux/usbdevice_fs.h"
"include/uapi/linux/vhost.h"
}
}
free(cpuid);
- if (!pmu)
+ if (!pmu || !table)
return table;
for (i = 0; i < table->num_pmus; i++) {
# pylint: disable=invalid-name
return Function('has_event', event)
-def strcmp_cpuid_str(event: str) -> Function:
+def strcmp_cpuid_str(cpuid: Event) -> Function:
# pylint: disable=redefined-builtin
# pylint: disable=invalid-name
- return Function('strcmp_cpuid_str', event)
+ return Function('strcmp_cpuid_str', cpuid)
class Metric:
"""An individual metric that will specifiable on the perf command line."""
+++ /dev/null
-// SPDX-License-Identifier: GPL-2.0
-/*
- * bpf-prologue.c
- *
- * Copyright (C) 2015 He Kuang <hekuang@huawei.com>
- * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
- * Copyright (C) 2015 Huawei Inc.
- */
-
-#include <bpf/libbpf.h>
-#include "debug.h"
-#include "bpf-loader.h"
-#include "bpf-prologue.h"
-#include "probe-finder.h"
-#include <errno.h>
-#include <stdlib.h>
-#include <dwarf-regs.h>
-#include <linux/filter.h>
-
-#define BPF_REG_SIZE 8
-
-#define JMP_TO_ERROR_CODE -1
-#define JMP_TO_SUCCESS_CODE -2
-#define JMP_TO_USER_CODE -3
-
-struct bpf_insn_pos {
- struct bpf_insn *begin;
- struct bpf_insn *end;
- struct bpf_insn *pos;
-};
-
-static inline int
-pos_get_cnt(struct bpf_insn_pos *pos)
-{
- return pos->pos - pos->begin;
-}
-
-static int
-append_insn(struct bpf_insn new_insn, struct bpf_insn_pos *pos)
-{
- if (!pos->pos)
- return -BPF_LOADER_ERRNO__PROLOGUE2BIG;
-
- if (pos->pos + 1 >= pos->end) {
- pr_err("bpf prologue: prologue too long\n");
- pos->pos = NULL;
- return -BPF_LOADER_ERRNO__PROLOGUE2BIG;
- }
-
- *(pos->pos)++ = new_insn;
- return 0;
-}
-
-static int
-check_pos(struct bpf_insn_pos *pos)
-{
- if (!pos->pos || pos->pos >= pos->end)
- return -BPF_LOADER_ERRNO__PROLOGUE2BIG;
- return 0;
-}
-
-/*
- * Convert type string (u8/u16/u32/u64/s8/s16/s32/s64 ..., see
- * Documentation/trace/kprobetrace.rst) to size field of BPF_LDX_MEM
- * instruction (BPF_{B,H,W,DW}).
- */
-static int
-argtype_to_ldx_size(const char *type)
-{
- int arg_size = type ? atoi(&type[1]) : 64;
-
- switch (arg_size) {
- case 8:
- return BPF_B;
- case 16:
- return BPF_H;
- case 32:
- return BPF_W;
- case 64:
- default:
- return BPF_DW;
- }
-}
-
-static const char *
-insn_sz_to_str(int insn_sz)
-{
- switch (insn_sz) {
- case BPF_B:
- return "BPF_B";
- case BPF_H:
- return "BPF_H";
- case BPF_W:
- return "BPF_W";
- case BPF_DW:
- return "BPF_DW";
- default:
- return "UNKNOWN";
- }
-}
-
-/* Give it a shorter name */
-#define ins(i, p) append_insn((i), (p))
-
-/*
- * Give a register name (in 'reg'), generate instruction to
- * load register into an eBPF register rd:
- * 'ldd target_reg, offset(ctx_reg)', where:
- * ctx_reg is pre initialized to pointer of 'struct pt_regs'.
- */
-static int
-gen_ldx_reg_from_ctx(struct bpf_insn_pos *pos, int ctx_reg,
- const char *reg, int target_reg)
-{
- int offset = regs_query_register_offset(reg);
-
- if (offset < 0) {
- pr_err("bpf: prologue: failed to get register %s\n",
- reg);
- return offset;
- }
- ins(BPF_LDX_MEM(BPF_DW, target_reg, ctx_reg, offset), pos);
-
- return check_pos(pos);
-}
-
-/*
- * Generate a BPF_FUNC_probe_read function call.
- *
- * src_base_addr_reg is a register holding base address,
- * dst_addr_reg is a register holding dest address (on stack),
- * result is:
- *
- * *[dst_addr_reg] = *([src_base_addr_reg] + offset)
- *
- * Arguments of BPF_FUNC_probe_read:
- * ARG1: ptr to stack (dest)
- * ARG2: size (8)
- * ARG3: unsafe ptr (src)
- */
-static int
-gen_read_mem(struct bpf_insn_pos *pos,
- int src_base_addr_reg,
- int dst_addr_reg,
- long offset,
- int probeid)
-{
- /* mov arg3, src_base_addr_reg */
- if (src_base_addr_reg != BPF_REG_ARG3)
- ins(BPF_MOV64_REG(BPF_REG_ARG3, src_base_addr_reg), pos);
- /* add arg3, #offset */
- if (offset)
- ins(BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG3, offset), pos);
-
- /* mov arg2, #reg_size */
- ins(BPF_ALU64_IMM(BPF_MOV, BPF_REG_ARG2, BPF_REG_SIZE), pos);
-
- /* mov arg1, dst_addr_reg */
- if (dst_addr_reg != BPF_REG_ARG1)
- ins(BPF_MOV64_REG(BPF_REG_ARG1, dst_addr_reg), pos);
-
- /* Call probe_read */
- ins(BPF_EMIT_CALL(probeid), pos);
- /*
- * Error processing: if read fail, goto error code,
- * will be relocated. Target should be the start of
- * error processing code.
- */
- ins(BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, JMP_TO_ERROR_CODE),
- pos);
-
- return check_pos(pos);
-}
-
-/*
- * Each arg should be bare register. Fetch and save them into argument
- * registers (r3 - r5).
- *
- * BPF_REG_1 should have been initialized with pointer to
- * 'struct pt_regs'.
- */
-static int
-gen_prologue_fastpath(struct bpf_insn_pos *pos,
- struct probe_trace_arg *args, int nargs)
-{
- int i, err = 0;
-
- for (i = 0; i < nargs; i++) {
- err = gen_ldx_reg_from_ctx(pos, BPF_REG_1, args[i].value,
- BPF_PROLOGUE_START_ARG_REG + i);
- if (err)
- goto errout;
- }
-
- return check_pos(pos);
-errout:
- return err;
-}
-
-/*
- * Slow path:
- * At least one argument has the form of 'offset($rx)'.
- *
- * Following code first stores them into stack, then loads all of then
- * to r2 - r5.
- * Before final loading, the final result should be:
- *
- * low address
- * BPF_REG_FP - 24 ARG3
- * BPF_REG_FP - 16 ARG2
- * BPF_REG_FP - 8 ARG1
- * BPF_REG_FP
- * high address
- *
- * For each argument (described as: offn(...off2(off1(reg)))),
- * generates following code:
- *
- * r7 <- fp
- * r7 <- r7 - stack_offset // Ideal code should initialize r7 using
- * // fp before generating args. However,
- * // eBPF won't regard r7 as stack pointer
- * // if it is generated by minus 8 from
- * // another stack pointer except fp.
- * // This is why we have to set r7
- * // to fp for each variable.
- * r3 <- value of 'reg'-> generated using gen_ldx_reg_from_ctx()
- * (r7) <- r3 // skip following instructions for bare reg
- * r3 <- r3 + off1 . // skip if off1 == 0
- * r2 <- 8 \
- * r1 <- r7 |-> generated by gen_read_mem()
- * call probe_read /
- * jnei r0, 0, err ./
- * r3 <- (r7)
- * r3 <- r3 + off2 . // skip if off2 == 0
- * r2 <- 8 \ // r2 may be broken by probe_read, so set again
- * r1 <- r7 |-> generated by gen_read_mem()
- * call probe_read /
- * jnei r0, 0, err ./
- * ...
- */
-static int
-gen_prologue_slowpath(struct bpf_insn_pos *pos,
- struct probe_trace_arg *args, int nargs)
-{
- int err, i, probeid;
-
- for (i = 0; i < nargs; i++) {
- struct probe_trace_arg *arg = &args[i];
- const char *reg = arg->value;
- struct probe_trace_arg_ref *ref = NULL;
- int stack_offset = (i + 1) * -8;
-
- pr_debug("prologue: fetch arg %d, base reg is %s\n",
- i, reg);
-
- /* value of base register is stored into ARG3 */
- err = gen_ldx_reg_from_ctx(pos, BPF_REG_CTX, reg,
- BPF_REG_ARG3);
- if (err) {
- pr_err("prologue: failed to get offset of register %s\n",
- reg);
- goto errout;
- }
-
- /* Make r7 the stack pointer. */
- ins(BPF_MOV64_REG(BPF_REG_7, BPF_REG_FP), pos);
- /* r7 += -8 */
- ins(BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, stack_offset), pos);
- /*
- * Store r3 (base register) onto stack
- * Ensure fp[offset] is set.
- * fp is the only valid base register when storing
- * into stack. We are not allowed to use r7 as base
- * register here.
- */
- ins(BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_ARG3,
- stack_offset), pos);
-
- ref = arg->ref;
- probeid = BPF_FUNC_probe_read_kernel;
- while (ref) {
- pr_debug("prologue: arg %d: offset %ld\n",
- i, ref->offset);
-
- if (ref->user_access)
- probeid = BPF_FUNC_probe_read_user;
-
- err = gen_read_mem(pos, BPF_REG_3, BPF_REG_7,
- ref->offset, probeid);
- if (err) {
- pr_err("prologue: failed to generate probe_read function call\n");
- goto errout;
- }
-
- ref = ref->next;
- /*
- * Load previous result into ARG3. Use
- * BPF_REG_FP instead of r7 because verifier
- * allows FP based addressing only.
- */
- if (ref)
- ins(BPF_LDX_MEM(BPF_DW, BPF_REG_ARG3,
- BPF_REG_FP, stack_offset), pos);
- }
- }
-
- /* Final pass: read to registers */
- for (i = 0; i < nargs; i++) {
- int insn_sz = (args[i].ref) ? argtype_to_ldx_size(args[i].type) : BPF_DW;
-
- pr_debug("prologue: load arg %d, insn_sz is %s\n",
- i, insn_sz_to_str(insn_sz));
- ins(BPF_LDX_MEM(insn_sz, BPF_PROLOGUE_START_ARG_REG + i,
- BPF_REG_FP, -BPF_REG_SIZE * (i + 1)), pos);
- }
-
- ins(BPF_JMP_IMM(BPF_JA, BPF_REG_0, 0, JMP_TO_SUCCESS_CODE), pos);
-
- return check_pos(pos);
-errout:
- return err;
-}
-
-static int
-prologue_relocate(struct bpf_insn_pos *pos, struct bpf_insn *error_code,
- struct bpf_insn *success_code, struct bpf_insn *user_code)
-{
- struct bpf_insn *insn;
-
- if (check_pos(pos))
- return -BPF_LOADER_ERRNO__PROLOGUE2BIG;
-
- for (insn = pos->begin; insn < pos->pos; insn++) {
- struct bpf_insn *target;
- u8 class = BPF_CLASS(insn->code);
- u8 opcode;
-
- if (class != BPF_JMP)
- continue;
- opcode = BPF_OP(insn->code);
- if (opcode == BPF_CALL)
- continue;
-
- switch (insn->off) {
- case JMP_TO_ERROR_CODE:
- target = error_code;
- break;
- case JMP_TO_SUCCESS_CODE:
- target = success_code;
- break;
- case JMP_TO_USER_CODE:
- target = user_code;
- break;
- default:
- pr_err("bpf prologue: internal error: relocation failed\n");
- return -BPF_LOADER_ERRNO__PROLOGUE;
- }
-
- insn->off = target - (insn + 1);
- }
- return 0;
-}
-
-int bpf__gen_prologue(struct probe_trace_arg *args, int nargs,
- struct bpf_insn *new_prog, size_t *new_cnt,
- size_t cnt_space)
-{
- struct bpf_insn *success_code = NULL;
- struct bpf_insn *error_code = NULL;
- struct bpf_insn *user_code = NULL;
- struct bpf_insn_pos pos;
- bool fastpath = true;
- int err = 0, i;
-
- if (!new_prog || !new_cnt)
- return -EINVAL;
-
- if (cnt_space > BPF_MAXINSNS)
- cnt_space = BPF_MAXINSNS;
-
- pos.begin = new_prog;
- pos.end = new_prog + cnt_space;
- pos.pos = new_prog;
-
- if (!nargs) {
- ins(BPF_ALU64_IMM(BPF_MOV, BPF_PROLOGUE_FETCH_RESULT_REG, 0),
- &pos);
-
- if (check_pos(&pos))
- goto errout;
-
- *new_cnt = pos_get_cnt(&pos);
- return 0;
- }
-
- if (nargs > BPF_PROLOGUE_MAX_ARGS) {
- pr_warning("bpf: prologue: %d arguments are dropped\n",
- nargs - BPF_PROLOGUE_MAX_ARGS);
- nargs = BPF_PROLOGUE_MAX_ARGS;
- }
-
- /* First pass: validation */
- for (i = 0; i < nargs; i++) {
- struct probe_trace_arg_ref *ref = args[i].ref;
-
- if (args[i].value[0] == '@') {
- /* TODO: fetch global variable */
- pr_err("bpf: prologue: global %s%+ld not support\n",
- args[i].value, ref ? ref->offset : 0);
- return -ENOTSUP;
- }
-
- while (ref) {
- /* fastpath is true if all args has ref == NULL */
- fastpath = false;
-
- /*
- * Instruction encodes immediate value using
- * s32, ref->offset is long. On systems which
- * can't fill long in s32, refuse to process if
- * ref->offset too large (or small).
- */
-#ifdef __LP64__
-#define OFFSET_MAX ((1LL << 31) - 1)
-#define OFFSET_MIN ((1LL << 31) * -1)
- if (ref->offset > OFFSET_MAX ||
- ref->offset < OFFSET_MIN) {
- pr_err("bpf: prologue: offset out of bound: %ld\n",
- ref->offset);
- return -BPF_LOADER_ERRNO__PROLOGUEOOB;
- }
-#endif
- ref = ref->next;
- }
- }
- pr_debug("prologue: pass validation\n");
-
- if (fastpath) {
- /* If all variables are registers... */
- pr_debug("prologue: fast path\n");
- err = gen_prologue_fastpath(&pos, args, nargs);
- if (err)
- goto errout;
- } else {
- pr_debug("prologue: slow path\n");
-
- /* Initialization: move ctx to a callee saved register. */
- ins(BPF_MOV64_REG(BPF_REG_CTX, BPF_REG_ARG1), &pos);
-
- err = gen_prologue_slowpath(&pos, args, nargs);
- if (err)
- goto errout;
- /*
- * start of ERROR_CODE (only slow pass needs error code)
- * mov r2 <- 1 // r2 is error number
- * mov r3 <- 0 // r3, r4... should be touched or
- * // verifier would complain
- * mov r4 <- 0
- * ...
- * goto usercode
- */
- error_code = pos.pos;
- ins(BPF_ALU64_IMM(BPF_MOV, BPF_PROLOGUE_FETCH_RESULT_REG, 1),
- &pos);
-
- for (i = 0; i < nargs; i++)
- ins(BPF_ALU64_IMM(BPF_MOV,
- BPF_PROLOGUE_START_ARG_REG + i,
- 0),
- &pos);
- ins(BPF_JMP_IMM(BPF_JA, BPF_REG_0, 0, JMP_TO_USER_CODE),
- &pos);
- }
-
- /*
- * start of SUCCESS_CODE:
- * mov r2 <- 0
- * goto usercode // skip
- */
- success_code = pos.pos;
- ins(BPF_ALU64_IMM(BPF_MOV, BPF_PROLOGUE_FETCH_RESULT_REG, 0), &pos);
-
- /*
- * start of USER_CODE:
- * Restore ctx to r1
- */
- user_code = pos.pos;
- if (!fastpath) {
- /*
- * Only slow path needs restoring of ctx. In fast path,
- * register are loaded directly from r1.
- */
- ins(BPF_MOV64_REG(BPF_REG_ARG1, BPF_REG_CTX), &pos);
- err = prologue_relocate(&pos, error_code, success_code,
- user_code);
- if (err)
- goto errout;
- }
-
- err = check_pos(&pos);
- if (err)
- goto errout;
-
- *new_cnt = pos_get_cnt(&pos);
- return 0;
-errout:
- return err;
-}
#define MAX_CPUS 4096
// FIXME: These should come from system headers
+#ifndef bool
typedef char bool;
+#endif
typedef int pid_t;
typedef long long int __s64;
typedef __s64 time64_t;
size_t sz;
};
-#define HASHMAP_INIT(hash_fn, equal_fn, ctx) { \
- .hash_fn = (hash_fn), \
- .equal_fn = (equal_fn), \
- .ctx = (ctx), \
- .buckets = NULL, \
- .cap = 0, \
- .cap_bits = 0, \
- .sz = 0, \
-}
-
void hashmap__init(struct hashmap *map, hashmap_hash_fn hash_fn,
hashmap_equal_fn equal_fn, void *ctx);
struct hashmap *hashmap__new(hashmap_hash_fn hash_fn,
pmu_name = pe->pmu;
}
- alias = malloc(sizeof(*alias));
+ alias = zalloc(sizeof(*alias));
if (!alias)
return -ENOMEM;
{
}
+static inline void accept_memory(phys_addr_t start, phys_addr_t end)
+{
+}
+
#endif
return NULL;
}
-void reserve_bootmem_region(phys_addr_t start, phys_addr_t end)
+void reserve_bootmem_region(phys_addr_t start, phys_addr_t end, int nid)
{
}
// SPDX-License-Identifier: GPL-2.0-or-later
+#include "basic_api.h"
#include <string.h>
#include <linux/memblock.h>
-#include "basic_api.h"
#define EXPECTED_MEMBLOCK_REGIONS 128
#define FUNC_ADD "memblock_add"
#include <stdlib.h>
#include <assert.h>
#include <linux/types.h>
+#include <linux/seq_file.h>
#include <linux/memblock.h>
#include <linux/sizes.h>
#include <linux/printk.h>
int conf_get_bool(snd_config_t *root, const char *key1, const char *key2, int def)
{
snd_config_t *cfg;
- long l;
int ret;
if (!root)
{
unsigned short revents;
snd_ctl_event_t *event;
- int count, err;
+ int err;
unsigned int mask = 0;
unsigned int ev_id;
static void test_ctl_name(struct ctl_data *ctl)
{
bool name_ok = true;
- bool check;
ksft_print_msg("%d.%d %s\n", ctl->card->card, ctl->elem,
ctl->name);
snd_ctl_elem_value_t *val)
{
int err;
- long val_read;
/* Ideally this will fail... */
err = snd_ctl_elem_write(ctl->card->handle, val);
static bool test_ctl_write_invalid_boolean(struct ctl_data *ctl)
{
- int err, i;
- long val_read;
+ int i;
bool fail = false;
snd_ctl_elem_value_t *val;
snd_ctl_elem_value_alloca(&val);
static bool test_ctl_write_invalid_enumerated(struct ctl_data *ctl)
{
- int err, i;
- unsigned int val_read;
+ int i;
bool fail = false;
snd_ctl_elem_value_t *val;
snd_ctl_elem_value_alloca(&val);
static void test_ctl_write_invalid(struct ctl_data *ctl)
{
bool pass;
- int err;
/* If the control is turned off let's be polite */
if (snd_ctl_elem_info_is_inactive(ctl->info)) {
static void test_pcm_time(struct pcm_data *data, enum test_class class,
const char *test_name, snd_config_t *pcm_cfg)
{
- char name[64], key[128], msg[256];
+ char name[64], msg[256];
const int duration_s = 2, margin_ms = 100;
const int duration_ms = duration_s * 1000;
const char *cs;
{
struct card_data *card;
struct pcm_data *pcm;
- snd_config_t *global_config, *cfg, *pcm_cfg;
+ snd_config_t *global_config, *cfg;
int num_pcm_tests = 0, num_tests, num_std_pcm_tests;
int ret;
void *thread_ret;
*/
TEST_F(pcmtest, reset_ioctl) {
snd_pcm_t *handle;
- unsigned char *it;
int test_res;
struct pcmtest_test_params *params = &self->params;
bpf_cookie/multi_kprobe_attach_api # kprobe_multi_link_api_subtest:FAIL:fentry_raw_skel_load unexpected error: -3
bpf_cookie/multi_kprobe_link_api # kprobe_multi_link_api_subtest:FAIL:fentry_raw_skel_load unexpected error: -3
fexit_sleep # The test never returns. The remaining tests cannot start.
-kprobe_multi_bench_attach # bpf_program__attach_kprobe_multi_opts unexpected error: -95
-kprobe_multi_test/attach_api_addrs # bpf_program__attach_kprobe_multi_opts unexpected error: -95
-kprobe_multi_test/attach_api_pattern # bpf_program__attach_kprobe_multi_opts unexpected error: -95
-kprobe_multi_test/attach_api_syms # bpf_program__attach_kprobe_multi_opts unexpected error: -95
-kprobe_multi_test/bench_attach # bpf_program__attach_kprobe_multi_opts unexpected error: -95
-kprobe_multi_test/link_api_addrs # link_fd unexpected link_fd: actual -95 < expected 0
-kprobe_multi_test/link_api_syms # link_fd unexpected link_fd: actual -95 < expected 0
-kprobe_multi_test/skel_api # libbpf: failed to load BPF skeleton 'kprobe_multi': -3
+kprobe_multi_bench_attach # needs CONFIG_FPROBE
+kprobe_multi_test # needs CONFIG_FPROBE
module_attach # prog 'kprobe_multi': failed to auto-attach: -95
fentry_test/fentry_many_args # fentry_many_args:FAIL:fentry_many_args_attach unexpected error: -524
fexit_test/fexit_many_args # fexit_many_args:FAIL:fexit_many_args_attach unexpected error: -524
CONFIG_BPF=y
CONFIG_BPF_EVENTS=y
CONFIG_BPF_JIT=y
+CONFIG_BPF_KPROBE_OVERRIDE=y
CONFIG_BPF_LIRC_MODE2=y
CONFIG_BPF_LSM=y
CONFIG_BPF_STREAM_PARSER=y
CONFIG_BONDING=y
CONFIG_BOOTTIME_TRACING=y
CONFIG_BPF_JIT_ALWAYS_ON=y
-CONFIG_BPF_KPROBE_OVERRIDE=y
CONFIG_BPF_PRELOAD=y
CONFIG_BPF_PRELOAD_UMD=y
CONFIG_BPFILTER=y
int *ifindex;
int err;
int ret;
+ int lwt_egress_ret; /* expected retval at lwt/egress */
bool success_on_tc;
} tests[] = {
/* Empty packets are always rejected. */
.data_size_in = sizeof(eth_hlen),
.ifindex = &veth_ifindex,
.ret = -ERANGE,
+ .lwt_egress_ret = -ERANGE,
.success_on_tc = true,
},
{
.data_size_in = sizeof(eth_hlen),
.ifindex = &ipip_ifindex,
.ret = -ERANGE,
+ .lwt_egress_ret = -ERANGE,
},
/* ETH_HLEN+1-sized packet should be redirected. */
.data_in = eth_hlen_pp,
.data_size_in = sizeof(eth_hlen_pp),
.ifindex = &veth_ifindex,
+ .lwt_egress_ret = 1, /* veth_xmit NET_XMIT_DROP */
},
{
.msg = "ipip ETH_HLEN+1 packet ingress",
for (i = 0; i < ARRAY_SIZE(tests); i++) {
bpf_object__for_each_program(prog, bpf_obj->obj) {
- char buf[128];
+ bool at_egress = strstr(bpf_program__name(prog), "egress") != NULL;
bool at_tc = !strncmp(bpf_program__section_name(prog), "tc", 2);
+ int expected_ret;
+ char buf[128];
+
+ expected_ret = at_egress && !at_tc ? tests[i].lwt_egress_ret : tests[i].ret;
tattr.data_in = tests[i].data_in;
tattr.data_size_in = tests[i].data_size_in;
if (at_tc && tests[i].success_on_tc)
ASSERT_GE(bpf_obj->bss->ret, 0, buf);
else
- ASSERT_EQ(bpf_obj->bss->ret, tests[i].ret, buf);
+ ASSERT_EQ(bpf_obj->bss->ret, expected_ret, buf);
}
}
#include "kprobe_multi.skel.h"
#include "trace_helpers.h"
#include "kprobe_multi_empty.skel.h"
+#include "kprobe_multi_override.skel.h"
#include "bpf/libbpf_internal.h"
#include "bpf/hashmap.h"
}
}
+static void test_attach_override(void)
+{
+ struct kprobe_multi_override *skel = NULL;
+ struct bpf_link *link = NULL;
+
+ skel = kprobe_multi_override__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "kprobe_multi_empty__open_and_load"))
+ goto cleanup;
+
+ /* The test_override calls bpf_override_return so it should fail
+ * to attach to bpf_fentry_test1 function, which is not on error
+ * injection list.
+ */
+ link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_override,
+ "bpf_fentry_test1", NULL);
+ if (!ASSERT_ERR_PTR(link, "override_attached_bpf_fentry_test1")) {
+ bpf_link__destroy(link);
+ goto cleanup;
+ }
+
+ /* The should_fail_bio function is on error injection list,
+ * attach should succeed.
+ */
+ link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_override,
+ "should_fail_bio", NULL);
+ if (!ASSERT_OK_PTR(link, "override_attached_should_fail_bio"))
+ goto cleanup;
+
+ bpf_link__destroy(link);
+
+cleanup:
+ kprobe_multi_override__destroy(skel);
+}
+
void serial_test_kprobe_multi_bench_attach(void)
{
if (test__start_subtest("kernel"))
test_attach_api_syms();
if (test__start_subtest("attach_api_fails"))
test_attach_api_fails();
+ if (test__start_subtest("attach_override"))
+ test_attach_override();
}
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2023. Huawei Technologies Co., Ltd */
+#define _GNU_SOURCE
+#include <sched.h>
+#include <pthread.h>
+#include <stdbool.h>
+#include <bpf/btf.h>
+#include <test_progs.h>
+
+#include "test_bpf_ma.skel.h"
+
+void test_test_bpf_ma(void)
+{
+ struct test_bpf_ma *skel;
+ struct btf *btf;
+ int i, err;
+
+ skel = test_bpf_ma__open();
+ if (!ASSERT_OK_PTR(skel, "open"))
+ return;
+
+ btf = bpf_object__btf(skel->obj);
+ if (!ASSERT_OK_PTR(btf, "btf"))
+ goto out;
+
+ for (i = 0; i < ARRAY_SIZE(skel->rodata->data_sizes); i++) {
+ char name[32];
+ int id;
+
+ snprintf(name, sizeof(name), "bin_data_%u", skel->rodata->data_sizes[i]);
+ id = btf__find_by_name_kind(btf, name, BTF_KIND_STRUCT);
+ if (!ASSERT_GT(id, 0, "bin_data"))
+ goto out;
+ skel->rodata->data_btf_ids[i] = id;
+ }
+
+ err = test_bpf_ma__load(skel);
+ if (!ASSERT_OK(err, "load"))
+ goto out;
+
+ err = test_bpf_ma__attach(skel);
+ if (!ASSERT_OK(err, "attach"))
+ goto out;
+
+ skel->bss->pid = getpid();
+ usleep(1);
+ ASSERT_OK(skel->bss->err, "test error");
+out:
+ test_bpf_ma__destroy(skel);
+}
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+#include <net/if.h>
+#include <test_progs.h>
+#include <network_helpers.h>
+
+#define LOCAL_NETNS "xdp_dev_bound_only_netns"
+
+static int load_dummy_prog(char *name, __u32 ifindex, __u32 flags)
+{
+ struct bpf_insn insns[] = { BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN() };
+ LIBBPF_OPTS(bpf_prog_load_opts, opts);
+
+ opts.prog_flags = flags;
+ opts.prog_ifindex = ifindex;
+ return bpf_prog_load(BPF_PROG_TYPE_XDP, name, "GPL", insns, ARRAY_SIZE(insns), &opts);
+}
+
+/* A test case for bpf_offload_netdev->offload handling bug:
+ * - create a veth device (does not support offload);
+ * - create a device bound XDP program with BPF_F_XDP_DEV_BOUND_ONLY flag
+ * (such programs are not offloaded);
+ * - create a device bound XDP program without flags (such programs are offloaded).
+ * This might lead to 'BUG: kernel NULL pointer dereference'.
+ */
+void test_xdp_dev_bound_only_offdev(void)
+{
+ struct nstoken *tok = NULL;
+ __u32 ifindex;
+ int fd1 = -1;
+ int fd2 = -1;
+
+ SYS(out, "ip netns add " LOCAL_NETNS);
+ tok = open_netns(LOCAL_NETNS);
+ if (!ASSERT_OK_PTR(tok, "open_netns"))
+ goto out;
+ SYS(out, "ip link add eth42 type veth");
+ ifindex = if_nametoindex("eth42");
+ if (!ASSERT_NEQ(ifindex, 0, "if_nametoindex")) {
+ perror("if_nametoindex");
+ goto out;
+ }
+ fd1 = load_dummy_prog("dummy1", ifindex, BPF_F_XDP_DEV_BOUND_ONLY);
+ if (!ASSERT_GE(fd1, 0, "load_dummy_prog #1")) {
+ perror("load_dummy_prog #1");
+ goto out;
+ }
+ /* Program with ifindex is considered offloaded, however veth
+ * does not support offload => error should be reported.
+ */
+ fd2 = load_dummy_prog("dummy2", ifindex, 0);
+ ASSERT_EQ(fd2, -EINVAL, "load_dummy_prog #2 (offloaded)");
+
+out:
+ close(fd1);
+ close(fd2);
+ close_netns(tok);
+ /* eth42 was added inside netns, removing the netns will
+ * also remove eth42 veth pair.
+ */
+ SYS_NOFAIL("ip netns del " LOCAL_NETNS);
+}
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+SEC("kprobe.multi")
+int test_override(struct pt_regs *ctx)
+{
+ bpf_override_return(ctx, 123);
+ return 0;
+}
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2023. Huawei Technologies Co., Ltd */
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+
+#include "bpf_experimental.h"
+#include "bpf_misc.h"
+
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+#endif
+
+struct generic_map_value {
+ void *data;
+};
+
+char _license[] SEC("license") = "GPL";
+
+const unsigned int data_sizes[] = {8, 16, 32, 64, 96, 128, 192, 256, 512, 1024, 2048, 4096};
+const volatile unsigned int data_btf_ids[ARRAY_SIZE(data_sizes)] = {};
+
+int err = 0;
+int pid = 0;
+
+#define DEFINE_ARRAY_WITH_KPTR(_size) \
+ struct bin_data_##_size { \
+ char data[_size - sizeof(void *)]; \
+ }; \
+ struct map_value_##_size { \
+ struct bin_data_##_size __kptr * data; \
+ /* To emit BTF info for bin_data_xx */ \
+ struct bin_data_##_size not_used; \
+ }; \
+ struct { \
+ __uint(type, BPF_MAP_TYPE_ARRAY); \
+ __type(key, int); \
+ __type(value, struct map_value_##_size); \
+ __uint(max_entries, 128); \
+ } array_##_size SEC(".maps");
+
+static __always_inline void batch_alloc_free(struct bpf_map *map, unsigned int batch,
+ unsigned int idx)
+{
+ struct generic_map_value *value;
+ unsigned int i, key;
+ void *old, *new;
+
+ for (i = 0; i < batch; i++) {
+ key = i;
+ value = bpf_map_lookup_elem(map, &key);
+ if (!value) {
+ err = 1;
+ return;
+ }
+ new = bpf_obj_new_impl(data_btf_ids[idx], NULL);
+ if (!new) {
+ err = 2;
+ return;
+ }
+ old = bpf_kptr_xchg(&value->data, new);
+ if (old) {
+ bpf_obj_drop(old);
+ err = 3;
+ return;
+ }
+ }
+ for (i = 0; i < batch; i++) {
+ key = i;
+ value = bpf_map_lookup_elem(map, &key);
+ if (!value) {
+ err = 4;
+ return;
+ }
+ old = bpf_kptr_xchg(&value->data, NULL);
+ if (!old) {
+ err = 5;
+ return;
+ }
+ bpf_obj_drop(old);
+ }
+}
+
+#define CALL_BATCH_ALLOC_FREE(size, batch, idx) \
+ batch_alloc_free((struct bpf_map *)(&array_##size), batch, idx)
+
+DEFINE_ARRAY_WITH_KPTR(8);
+DEFINE_ARRAY_WITH_KPTR(16);
+DEFINE_ARRAY_WITH_KPTR(32);
+DEFINE_ARRAY_WITH_KPTR(64);
+DEFINE_ARRAY_WITH_KPTR(96);
+DEFINE_ARRAY_WITH_KPTR(128);
+DEFINE_ARRAY_WITH_KPTR(192);
+DEFINE_ARRAY_WITH_KPTR(256);
+DEFINE_ARRAY_WITH_KPTR(512);
+DEFINE_ARRAY_WITH_KPTR(1024);
+DEFINE_ARRAY_WITH_KPTR(2048);
+DEFINE_ARRAY_WITH_KPTR(4096);
+
+SEC("fentry/" SYS_PREFIX "sys_nanosleep")
+int test_bpf_mem_alloc_free(void *ctx)
+{
+ if ((u32)bpf_get_current_pid_tgid() != pid)
+ return 0;
+
+ /* Alloc 128 8-bytes objects in batch to trigger refilling,
+ * then free 128 8-bytes objects in batch to trigger freeing.
+ */
+ CALL_BATCH_ALLOC_FREE(8, 128, 0);
+ CALL_BATCH_ALLOC_FREE(16, 128, 1);
+ CALL_BATCH_ALLOC_FREE(32, 128, 2);
+ CALL_BATCH_ALLOC_FREE(64, 128, 3);
+ CALL_BATCH_ALLOC_FREE(96, 128, 4);
+ CALL_BATCH_ALLOC_FREE(128, 128, 5);
+ CALL_BATCH_ALLOC_FREE(192, 128, 6);
+ CALL_BATCH_ALLOC_FREE(256, 128, 7);
+ CALL_BATCH_ALLOC_FREE(512, 64, 8);
+ CALL_BATCH_ALLOC_FREE(1024, 32, 9);
+ CALL_BATCH_ALLOC_FREE(2048, 16, 10);
+ CALL_BATCH_ALLOC_FREE(4096, 8, 11);
+
+ return 0;
+}
}
}
- get_unpriv_disabled();
+ unpriv_disabled = get_unpriv_disabled();
if (unpriv && unpriv_disabled) {
printf("Cannot run as unprivileged user with sysctl %s.\n",
UNPRIV_SYSCTL);
# SPDX-License-Identifier: GPL-2.0-or-later
-CFLAGS += -Wall -O2 -g -fsanitize=address -fsanitize=undefined $(KHDR_INCLUDES)
+CFLAGS += -Wall -O2 -g -fsanitize=address -fsanitize=undefined -static-libasan $(KHDR_INCLUDES)
TEST_GEN_PROGS := fchmodat2_test
include ../lib.mk
# kselftest skip code is 4
err_skip=4
+# umount required
+UMOUNT_DIR=""
+
# cgroup RT scheduling prevents chrt commands from succeeding, which
# induces failures in test wakeup tests. Disable for the duration of
# the tests.
cleanup() {
echo $sched_rt_runtime_orig > $sched_rt_runtime
+ if [ -n "${UMOUNT_DIR}" ]; then
+ umount ${UMOUNT_DIR} ||:
+ fi
}
errexit() { # message
;;
--logdir|-l)
LOG_DIR=$2
+ LINK_PTR=
shift 2
;;
*.tc)
mount -t tracefs nodev /sys/kernel/tracing ||
errexit "Failed to mount /sys/kernel/tracing"
TRACING_DIR="/sys/kernel/tracing"
+ UMOUNT_DIR=${TRACING_DIR}
# If debugfs exists, then so does /sys/kernel/debug
elif [ -d "/sys/kernel/debug" ]; then
mount -t debugfs nodev /sys/kernel/debug ||
errexit "Failed to mount /sys/kernel/debug"
TRACING_DIR="/sys/kernel/debug/tracing"
+ UMOUNT_DIR=${TRACING_DIR}
else
err_ret=$err_skip
errexit "debugfs and tracefs are not configured in this kernel"
TOP_DIR=`absdir $0`
TEST_DIR=$TOP_DIR/test.d
TEST_CASES=`find_testcases $TEST_DIR`
-LOG_DIR=$TOP_DIR/logs/`date +%Y%m%d-%H%M%S`/
+LOG_TOP_DIR=$TOP_DIR/logs
+LOG_DATE=`date +%Y%m%d-%H%M%S`
+LOG_DIR=$LOG_TOP_DIR/$LOG_DATE/
+LINK_PTR=$LOG_TOP_DIR/latest
KEEP_LOG=0
KTAP=0
DEBUG=0
LOG_FILE=$LOG_DIR/ftracetest.log
mkdir -p $LOG_DIR || errexit "Failed to make a log directory: $LOG_DIR"
date > $LOG_FILE
+ if [ "x-$LINK_PTR" != "x-" ]; then
+ unlink $LINK_PTR
+ ln -fs $LOG_DATE $LINK_PTR
+ fi
fi
# Define text colors
instance_set() {
while :; do
- echo 1 > foo/events/sched/sched_switch
+ echo 1 > foo/events/sched/sched_switch/enable
done 2> /dev/null
}
#!/bin/sh
# SPDX-License-Identifier: GPL-2.0
# description: event trigger - test inter-event histogram trigger trace action with dynamic string param
-# requires: set_event synthetic_events events/sched/sched_process_exec/hist "char name[]' >> synthetic_events":README ping:program
+# requires: set_event synthetic_events events/sched/sched_process_exec/hist "' >> synthetic_events":README ping:program
fail() { #msg
echo $1
#!/bin/sh
# SPDX-License-Identifier: GPL-2.0
# description: event trigger - test synthetic_events syntax parser errors
-# requires: synthetic_events error_log "char name[]' >> synthetic_events":README
+# requires: synthetic_events error_log "' >> synthetic_events":README
check_error() { # command-with-error-pos-by-^
ftrace_errlog_check 'synthetic_events' "$1" 'synthetic_events'
{
# Make sure tests will time out if utility is available.
if [ -x /usr/bin/timeout ] ; then
- /usr/bin/timeout --foreground "$kselftest_timeout" $1
+ /usr/bin/timeout --foreground "$kselftest_timeout" \
+ /usr/bin/timeout "$kselftest_timeout" $1
else
$1
fi
print_targets=0
while getopts "p" arg; do
- case $arg in
- p)
+ case $arg in
+ p)
print_targets=1
shift;;
- esac
+ esac
done
if [ $# -eq 0 ]
# Get all TARGETS from selftests Makefile
targets=$(grep -E "^TARGETS +|^TARGETS =" Makefile | cut -d "=" -f2)
+# Initially, in LDLIBS related lines, the dep checker needs
+# to ignore lines containing the following strings:
+filter="\$(VAR_LDLIBS)\|pkg-config\|PKG_CONFIG\|IOURING_EXTRA_LIBS"
+
# Single test case
if [ $# -eq 2 ]
then
l1_test $test
l2_test $test
l3_test $test
+ l4_test $test
+ l5_test $test
print_results $1 $2
exit $?
# Append space at the end of the list to append more tests.
l1_tests=$(grep -r --include=Makefile "^LDLIBS" | \
- grep -v "VAR_LDLIBS" | awk -F: '{print $1}')
+ grep -v "$filter" | awk -F: '{print $1}' | uniq)
# Level 2: LDLIBS set dynamically.
#
# Append space at the end of the list to append more tests.
l2_tests=$(grep -r --include=Makefile ": LDLIBS" | \
- grep -v "VAR_LDLIBS" | awk -F: '{print $1}')
+ grep -v "$filter" | awk -F: '{print $1}' | uniq)
# Level 3
# memfd and others use pkg-config to find mount and fuse libs
# VAR_LDLIBS := $(shell pkg-config fuse --libs 2>/dev/null)
l3_tests=$(grep -r --include=Makefile "^VAR_LDLIBS" | \
- grep -v "pkg-config" | awk -F: '{print $1}')
+ grep -v "pkg-config\|PKG_CONFIG" | awk -F: '{print $1}' | uniq)
-#echo $l1_tests
-#echo $l2_1_tests
-#echo $l3_tests
+# Level 4
+# some tests may fall back to default using `|| echo -l<libname>`
+# if pkg-config doesn't find the libs, instead of using VAR_LDLIBS
+# as per level 3 checks.
+# e.g:
+# netfilter/Makefile
+# LDLIBS += $(shell $(HOSTPKG_CONFIG) --libs libmnl 2>/dev/null || echo -lmnl)
+l4_tests=$(grep -r --include=Makefile "^LDLIBS" | \
+ grep "pkg-config\|PKG_CONFIG" | awk -F: '{print $1}' | uniq)
+
+# Level 5
+# some tests may use IOURING_EXTRA_LIBS to add extra libs to LDLIBS,
+# which in turn may be defined in a sub-Makefile
+# e.g.:
+# mm/Makefile
+# $(OUTPUT)/gup_longterm: LDLIBS += $(IOURING_EXTRA_LIBS)
+l5_tests=$(grep -r --include=Makefile "LDLIBS +=.*\$(IOURING_EXTRA_LIBS)" | \
+ awk -F: '{print $1}' | uniq)
+
+#echo l1_tests $l1_tests
+#echo l2_tests $l2_tests
+#echo l3_tests $l3_tests
+#echo l4_tests $l4_tests
+#echo l5_tests $l5_tests
all_tests
print_results $1 $2
for test in $l3_tests; do
l3_test $test
done
+
+ for test in $l4_tests; do
+ l4_test $test
+ done
+
+ for test in $l5_tests; do
+ l5_test $test
+ done
}
# Use same parsing used for l1_tests and pick libraries this time.
l1_test()
{
test_libs=$(grep --include=Makefile "^LDLIBS" $test | \
- grep -v "VAR_LDLIBS" | \
+ grep -v "$filter" | \
sed -e 's/\:/ /' | \
sed -e 's/+/ /' | cut -d "=" -f 2)
check_libs $test $test_libs
}
-# Use same parsing used for l2__tests and pick libraries this time.
+# Use same parsing used for l2_tests and pick libraries this time.
l2_test()
{
test_libs=$(grep --include=Makefile ": LDLIBS" $test | \
- grep -v "VAR_LDLIBS" | \
+ grep -v "$filter" | \
sed -e 's/\:/ /' | sed -e 's/+/ /' | \
cut -d "=" -f 2)
check_libs $test $test_libs
}
+l4_test()
+{
+ test_libs=$(grep --include=Makefile "^VAR_LDLIBS\|^LDLIBS" $test | \
+ grep "\(pkg-config\|PKG_CONFIG\).*|| echo " | \
+ sed -e 's/.*|| echo //' | sed -e 's/)$//')
+
+ check_libs $test $test_libs
+}
+
+l5_test()
+{
+ tests=$(find $(dirname "$test") -type f -name "*.mk")
+ test_libs=$(grep "^IOURING_EXTRA_LIBS +\?=" $tests | \
+ cut -d "=" -f 2)
+
+ check_libs $test $test_libs
+}
+
check_libs()
{
char *str;
va_start(ap, fmt);
- vasprintf(&str, fmt, ap);
+ TEST_ASSERT(vasprintf(&str, fmt, ap) >= 0, "vasprintf() failed");
va_end(ap);
return str;
#define REG_MASK (KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK)
+static bool isa_ext_cant_disable[KVM_RISCV_ISA_EXT_MAX];
+
bool filter_reg(__u64 reg)
{
+ switch (reg & ~REG_MASK) {
/*
- * Some ISA extensions are optional and not present on all host,
- * but they can't be disabled through ISA_EXT registers when present.
- * So, to make life easy, just filtering out these kind of registers.
+ * Same set of ISA_EXT registers are not present on all host because
+ * ISA_EXT registers are visible to the KVM user space based on the
+ * ISA extensions available on the host. Also, disabling an ISA
+ * extension using corresponding ISA_EXT register does not affect
+ * the visibility of the ISA_EXT register itself.
+ *
+ * Based on above, we should filter-out all ISA_EXT registers.
*/
- switch (reg & ~REG_MASK) {
+ case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_A:
+ case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_C:
+ case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_D:
+ case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_F:
+ case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_H:
+ case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_I:
+ case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_M:
+ case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SVPBMT:
case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SSTC:
case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SVINVAL:
case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZIHINTPAUSE:
+ case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZICBOM:
+ case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZICBOZ:
case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZBB:
case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SSAIA:
+ case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_V:
+ case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SVNAPOT:
case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZBA:
case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZBS:
case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZICNTR:
case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZIFENCEI:
case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZIHPM:
return true;
+ /* AIA registers are always available when Ssaia can't be disabled */
+ case KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(siselect):
+ case KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio1):
+ case KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio2):
+ case KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(sieh):
+ case KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(siph):
+ case KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio1h):
+ case KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio2h):
+ return isa_ext_cant_disable[KVM_RISCV_ISA_EXT_SSAIA];
default:
break;
}
unsigned long value;
ret = __vcpu_get_reg(vcpu, RISCV_ISA_EXT_REG(ext), &value);
- if (ret) {
- printf("Failed to get ext %d", ext);
- return false;
- }
-
- return !!value;
+ return (ret) ? false : !!value;
}
void finalize_vcpu(struct kvm_vcpu *vcpu, struct vcpu_reg_list *c)
{
+ unsigned long isa_ext_state[KVM_RISCV_ISA_EXT_MAX] = { 0 };
struct vcpu_reg_sublist *s;
+ int rc;
+
+ for (int i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++)
+ __vcpu_get_reg(vcpu, RISCV_ISA_EXT_REG(i), &isa_ext_state[i]);
/*
* Disable all extensions which were enabled by default
* if they were available in the risc-v host.
*/
- for (int i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++)
- __vcpu_set_reg(vcpu, RISCV_ISA_EXT_REG(i), 0);
+ for (int i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++) {
+ rc = __vcpu_set_reg(vcpu, RISCV_ISA_EXT_REG(i), 0);
+ if (rc && isa_ext_state[i])
+ isa_ext_cant_disable[i] = true;
+ }
for_each_sublist(c, s) {
if (!s->feature)
KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(time),
KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(compare),
KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(state),
- KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_A,
- KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_C,
- KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_I,
- KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_M,
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_V01,
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_TIME,
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_IPI,
run_tests: all
ifdef building_out_of_srctree
@if [ "X$(TEST_PROGS)$(TEST_PROGS_EXTENDED)$(TEST_FILES)" != "X" ]; then \
- rsync -aLq $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES) $(OUTPUT); \
+ rsync -aq --copy-unsafe-links $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES) $(OUTPUT); \
fi
@if [ "X$(TEST_PROGS)" != "X" ]; then \
$(call RUN_TESTS, $(TEST_GEN_PROGS) $(TEST_CUSTOM_PROGS) \
define INSTALL_SINGLE_RULE
$(if $(INSTALL_LIST),@mkdir -p $(INSTALL_PATH))
- $(if $(INSTALL_LIST),rsync -aL $(INSTALL_LIST) $(INSTALL_PATH)/)
+ $(if $(INSTALL_LIST),rsync -a --copy-unsafe-links $(INSTALL_LIST) $(INSTALL_PATH)/)
endef
define INSTALL_RULE
fi
if [[ $cgroup2 ]]; then
- cgroup_path=$(mount -t cgroup2 | head -1 | awk -e '{print $3}')
+ cgroup_path=$(mount -t cgroup2 | head -1 | awk '{print $3}')
if [[ -z "$cgroup_path" ]]; then
cgroup_path=/dev/cgroup/memory
mount -t cgroup2 none $cgroup_path
fi
echo "+hugetlb" >$cgroup_path/cgroup.subtree_control
else
- cgroup_path=$(mount -t cgroup | grep ",hugetlb" | awk -e '{print $3}')
+ cgroup_path=$(mount -t cgroup | grep ",hugetlb" | awk '{print $3}')
if [[ -z "$cgroup_path" ]]; then
cgroup_path=/dev/cgroup/memory
mount -t cgroup memory,hugetlb $cgroup_path
if [[ $cgroup2 ]]; then
- CGROUP_ROOT=$(mount -t cgroup2 | head -1 | awk -e '{print $3}')
+ CGROUP_ROOT=$(mount -t cgroup2 | head -1 | awk '{print $3}')
if [[ -z "$CGROUP_ROOT" ]]; then
CGROUP_ROOT=/dev/cgroup/memory
mount -t cgroup2 none $CGROUP_ROOT
fi
echo "+hugetlb +memory" >$CGROUP_ROOT/cgroup.subtree_control
else
- CGROUP_ROOT=$(mount -t cgroup | grep ",hugetlb" | awk -e '{print $3}')
+ CGROUP_ROOT=$(mount -t cgroup | grep ",hugetlb" | awk '{print $3}')
if [[ -z "$CGROUP_ROOT" ]]; then
CGROUP_ROOT=/dev/cgroup/memory
mount -t cgroup memory,hugetlb $CGROUP_ROOT
# SPDX-License-Identifier: GPL-2.0
NR_FILES=32768
-SAVED_NR_FILES=$(ulimit -n)
+readonly NETNS="ns-$(mktemp -u XXXXXX)"
# default values
port=443
done
setup() {
+ ip netns add "${NETNS}"
+ ip -netns "${NETNS}" link add veth0 type veth peer name veth1
+ ip -netns "${NETNS}" link set lo up
+ ip -netns "${NETNS}" link set veth0 up
+ ip -netns "${NETNS}" link set veth1 up
+
if [[ "$use_v6" == true ]]; then
- ip addr add $addr_v6 nodad dev eth0
+ ip -netns "${NETNS}" addr add $addr_v6 nodad dev veth0
else
- ip addr add $addr_v4 dev lo
+ ip -netns "${NETNS}" addr add $addr_v4 dev lo
fi
- ulimit -n $NR_FILES
}
cleanup() {
- if [[ "$use_v6" == true ]]; then
- ip addr del $addr_v6 dev eth0
- else
- ip addr del $addr_v4/32 dev lo
- fi
- ulimit -n $SAVED_NR_FILES
+ ip netns del "${NETNS}"
}
if [[ "$addr" != "" ]]; then
fi
setup
if [[ "$use_v6" == true ]] ; then
- ./bind_bhash $port "ipv6" $addr_v6
+ ip netns exec "${NETNS}" sh -c \
+ "ulimit -n ${NR_FILES};./bind_bhash ${port} ipv6 ${addr_v6}"
else
- ./bind_bhash $port "ipv4" $addr_v4
+ ip netns exec "${NETNS}" sh -c \
+ "ulimit -n ${NR_FILES};./bind_bhash ${port} ipv4 ${addr_v4}"
fi
cleanup
#include "../kselftest_harness.h"
+struct in6_addr in6addr_v4mapped_any = {
+ .s6_addr = {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 255, 255,
+ 0, 0, 0, 0
+ }
+};
+
+struct in6_addr in6addr_v4mapped_loopback = {
+ .s6_addr = {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 255, 255,
+ 127, 0, 0, 1
+ }
+};
+
FIXTURE(bind_wildcard)
{
struct sockaddr_in addr4;
struct sockaddr_in6 addr6;
- int expected_errno;
};
FIXTURE_VARIANT(bind_wildcard)
{
const __u32 addr4_const;
const struct in6_addr *addr6_const;
+ int expected_errno;
};
FIXTURE_VARIANT_ADD(bind_wildcard, v4_any_v6_any)
{
.addr4_const = INADDR_ANY,
.addr6_const = &in6addr_any,
+ .expected_errno = EADDRINUSE,
};
FIXTURE_VARIANT_ADD(bind_wildcard, v4_any_v6_local)
{
.addr4_const = INADDR_ANY,
.addr6_const = &in6addr_loopback,
+ .expected_errno = 0,
+};
+
+FIXTURE_VARIANT_ADD(bind_wildcard, v4_any_v6_v4mapped_any)
+{
+ .addr4_const = INADDR_ANY,
+ .addr6_const = &in6addr_v4mapped_any,
+ .expected_errno = EADDRINUSE,
+};
+
+FIXTURE_VARIANT_ADD(bind_wildcard, v4_any_v6_v4mapped_local)
+{
+ .addr4_const = INADDR_ANY,
+ .addr6_const = &in6addr_v4mapped_loopback,
+ .expected_errno = EADDRINUSE,
};
FIXTURE_VARIANT_ADD(bind_wildcard, v4_local_v6_any)
{
.addr4_const = INADDR_LOOPBACK,
.addr6_const = &in6addr_any,
+ .expected_errno = EADDRINUSE,
};
FIXTURE_VARIANT_ADD(bind_wildcard, v4_local_v6_local)
{
.addr4_const = INADDR_LOOPBACK,
.addr6_const = &in6addr_loopback,
+ .expected_errno = 0,
+};
+
+FIXTURE_VARIANT_ADD(bind_wildcard, v4_local_v6_v4mapped_any)
+{
+ .addr4_const = INADDR_LOOPBACK,
+ .addr6_const = &in6addr_v4mapped_any,
+ .expected_errno = EADDRINUSE,
+};
+
+FIXTURE_VARIANT_ADD(bind_wildcard, v4_local_v6_v4mapped_local)
+{
+ .addr4_const = INADDR_LOOPBACK,
+ .addr6_const = &in6addr_v4mapped_loopback,
+ .expected_errno = EADDRINUSE,
};
FIXTURE_SETUP(bind_wildcard)
self->addr6.sin6_family = AF_INET6;
self->addr6.sin6_port = htons(0);
self->addr6.sin6_addr = *variant->addr6_const;
-
- if (variant->addr6_const == &in6addr_any)
- self->expected_errno = EADDRINUSE;
- else
- self->expected_errno = 0;
}
FIXTURE_TEARDOWN(bind_wildcard)
void bind_sockets(struct __test_metadata *_metadata,
FIXTURE_DATA(bind_wildcard) *self,
+ int expected_errno,
struct sockaddr *addr1, socklen_t addrlen1,
struct sockaddr *addr2, socklen_t addrlen2)
{
ASSERT_GT(fd[1], 0);
ret = bind(fd[1], addr2, addrlen2);
- if (self->expected_errno) {
+ if (expected_errno) {
ASSERT_EQ(ret, -1);
- ASSERT_EQ(errno, self->expected_errno);
+ ASSERT_EQ(errno, expected_errno);
} else {
ASSERT_EQ(ret, 0);
}
TEST_F(bind_wildcard, v4_v6)
{
- bind_sockets(_metadata, self,
- (struct sockaddr *)&self->addr4, sizeof(self->addr6),
+ bind_sockets(_metadata, self, variant->expected_errno,
+ (struct sockaddr *)&self->addr4, sizeof(self->addr4),
(struct sockaddr *)&self->addr6, sizeof(self->addr6));
}
TEST_F(bind_wildcard, v6_v4)
{
- bind_sockets(_metadata, self,
+ bind_sockets(_metadata, self, variant->expected_errno,
(struct sockaddr *)&self->addr6, sizeof(self->addr6),
(struct sockaddr *)&self->addr4, sizeof(self->addr4));
}
done
}
-ip -Version > /dev/null 2>&1
-if [ $? -ne 0 ];then
- echo "SKIP: Could not run test without ip tool"
- exit $ksft_skip
-fi
-
-trap cleanup EXIT
-
-for i in "$ns1" "$ns2" "$ns3" ;do
- ip netns add $i || exit $ksft_skip
- ip -net $i link set lo up
-done
-
-echo "INFO: preparing interfaces."
-# Three HSR nodes. Each node has one link to each of its neighbour, two links in total.
-#
-# ns1eth1 ----- ns2eth1
-# hsr1 hsr2
-# ns1eth2 ns2eth2
-# | |
-# ns3eth1 ns3eth2
-# \ /
-# hsr3
-#
-# Interfaces
-ip link add ns1eth1 netns "$ns1" type veth peer name ns2eth1 netns "$ns2"
-ip link add ns1eth2 netns "$ns1" type veth peer name ns3eth1 netns "$ns3"
-ip link add ns3eth2 netns "$ns3" type veth peer name ns2eth2 netns "$ns2"
-
-# HSRv0.
-ip -net "$ns1" link add name hsr1 type hsr slave1 ns1eth1 slave2 ns1eth2 supervision 45 version 0 proto 0
-ip -net "$ns2" link add name hsr2 type hsr slave1 ns2eth1 slave2 ns2eth2 supervision 45 version 0 proto 0
-ip -net "$ns3" link add name hsr3 type hsr slave1 ns3eth1 slave2 ns3eth2 supervision 45 version 0 proto 0
-
-# IP for HSR
-ip -net "$ns1" addr add 100.64.0.1/24 dev hsr1
-ip -net "$ns1" addr add dead:beef:1::1/64 dev hsr1 nodad
-ip -net "$ns2" addr add 100.64.0.2/24 dev hsr2
-ip -net "$ns2" addr add dead:beef:1::2/64 dev hsr2 nodad
-ip -net "$ns3" addr add 100.64.0.3/24 dev hsr3
-ip -net "$ns3" addr add dead:beef:1::3/64 dev hsr3 nodad
-
-# All Links up
-ip -net "$ns1" link set ns1eth1 up
-ip -net "$ns1" link set ns1eth2 up
-ip -net "$ns1" link set hsr1 up
-
-ip -net "$ns2" link set ns2eth1 up
-ip -net "$ns2" link set ns2eth2 up
-ip -net "$ns2" link set hsr2 up
-
-ip -net "$ns3" link set ns3eth1 up
-ip -net "$ns3" link set ns3eth2 up
-ip -net "$ns3" link set hsr3 up
-
# $1: IP address
is_v6()
{
fi
}
-
-echo "INFO: Initial validation ping."
-# Each node has to be able each one.
-do_ping "$ns1" 100.64.0.2
-do_ping "$ns2" 100.64.0.1
-do_ping "$ns3" 100.64.0.1
-stop_if_error "Initial validation failed."
-
-do_ping "$ns1" 100.64.0.3
-do_ping "$ns2" 100.64.0.3
-do_ping "$ns3" 100.64.0.2
-
-do_ping "$ns1" dead:beef:1::2
-do_ping "$ns1" dead:beef:1::3
-do_ping "$ns2" dead:beef:1::1
-do_ping "$ns2" dead:beef:1::2
-do_ping "$ns3" dead:beef:1::1
-do_ping "$ns3" dead:beef:1::2
-
-stop_if_error "Initial validation failed."
+do_complete_ping_test()
+{
+ echo "INFO: Initial validation ping."
+ # Each node has to be able each one.
+ do_ping "$ns1" 100.64.0.2
+ do_ping "$ns2" 100.64.0.1
+ do_ping "$ns3" 100.64.0.1
+ stop_if_error "Initial validation failed."
+
+ do_ping "$ns1" 100.64.0.3
+ do_ping "$ns2" 100.64.0.3
+ do_ping "$ns3" 100.64.0.2
+
+ do_ping "$ns1" dead:beef:1::2
+ do_ping "$ns1" dead:beef:1::3
+ do_ping "$ns2" dead:beef:1::1
+ do_ping "$ns2" dead:beef:1::2
+ do_ping "$ns3" dead:beef:1::1
+ do_ping "$ns3" dead:beef:1::2
+
+ stop_if_error "Initial validation failed."
# Wait until supervisor all supervision frames have been processed and the node
# entries have been merged. Otherwise duplicate frames will be observed which is
# valid at this stage.
-WAIT=5
-while [ ${WAIT} -gt 0 ]
-do
- grep 00:00:00:00:00:00 /sys/kernel/debug/hsr/hsr*/node_table
- if [ $? -ne 0 ]
- then
- break
- fi
- sleep 1
- let WAIT = WAIT - 1
-done
+ WAIT=5
+ while [ ${WAIT} -gt 0 ]
+ do
+ grep 00:00:00:00:00:00 /sys/kernel/debug/hsr/hsr*/node_table
+ if [ $? -ne 0 ]
+ then
+ break
+ fi
+ sleep 1
+ let "WAIT = WAIT - 1"
+ done
# Just a safety delay in case the above check didn't handle it.
-sleep 1
+ sleep 1
+
+ echo "INFO: Longer ping test."
+ do_ping_long "$ns1" 100.64.0.2
+ do_ping_long "$ns1" dead:beef:1::2
+ do_ping_long "$ns1" 100.64.0.3
+ do_ping_long "$ns1" dead:beef:1::3
-echo "INFO: Longer ping test."
-do_ping_long "$ns1" 100.64.0.2
-do_ping_long "$ns1" dead:beef:1::2
-do_ping_long "$ns1" 100.64.0.3
-do_ping_long "$ns1" dead:beef:1::3
+ stop_if_error "Longer ping test failed."
-stop_if_error "Longer ping test failed."
+ do_ping_long "$ns2" 100.64.0.1
+ do_ping_long "$ns2" dead:beef:1::1
+ do_ping_long "$ns2" 100.64.0.3
+ do_ping_long "$ns2" dead:beef:1::2
+ stop_if_error "Longer ping test failed."
-do_ping_long "$ns2" 100.64.0.1
-do_ping_long "$ns2" dead:beef:1::1
-do_ping_long "$ns2" 100.64.0.3
-do_ping_long "$ns2" dead:beef:1::2
-stop_if_error "Longer ping test failed."
+ do_ping_long "$ns3" 100.64.0.1
+ do_ping_long "$ns3" dead:beef:1::1
+ do_ping_long "$ns3" 100.64.0.2
+ do_ping_long "$ns3" dead:beef:1::2
+ stop_if_error "Longer ping test failed."
-do_ping_long "$ns3" 100.64.0.1
-do_ping_long "$ns3" dead:beef:1::1
-do_ping_long "$ns3" 100.64.0.2
-do_ping_long "$ns3" dead:beef:1::2
-stop_if_error "Longer ping test failed."
+ echo "INFO: Cutting one link."
+ do_ping_long "$ns1" 100.64.0.3 &
-echo "INFO: Cutting one link."
-do_ping_long "$ns1" 100.64.0.3 &
+ sleep 3
+ ip -net "$ns3" link set ns3eth1 down
+ wait
-sleep 3
-ip -net "$ns3" link set ns3eth1 down
-wait
+ ip -net "$ns3" link set ns3eth1 up
-ip -net "$ns3" link set ns3eth1 up
+ stop_if_error "Failed with one link down."
-stop_if_error "Failed with one link down."
+ echo "INFO: Delay the link and drop a few packages."
+ tc -net "$ns3" qdisc add dev ns3eth1 root netem delay 50ms
+ tc -net "$ns2" qdisc add dev ns2eth1 root netem delay 5ms loss 25%
-echo "INFO: Delay the link and drop a few packages."
-tc -net "$ns3" qdisc add dev ns3eth1 root netem delay 50ms
-tc -net "$ns2" qdisc add dev ns2eth1 root netem delay 5ms loss 25%
+ do_ping_long "$ns1" 100.64.0.2
+ do_ping_long "$ns1" 100.64.0.3
-do_ping_long "$ns1" 100.64.0.2
-do_ping_long "$ns1" 100.64.0.3
+ stop_if_error "Failed with delay and packetloss."
-stop_if_error "Failed with delay and packetloss."
+ do_ping_long "$ns2" 100.64.0.1
+ do_ping_long "$ns2" 100.64.0.3
-do_ping_long "$ns2" 100.64.0.1
-do_ping_long "$ns2" 100.64.0.3
+ stop_if_error "Failed with delay and packetloss."
-stop_if_error "Failed with delay and packetloss."
+ do_ping_long "$ns3" 100.64.0.1
+ do_ping_long "$ns3" 100.64.0.2
+ stop_if_error "Failed with delay and packetloss."
+
+ echo "INFO: All good."
+}
+
+setup_hsr_interfaces()
+{
+ local HSRv="$1"
+
+ echo "INFO: preparing interfaces for HSRv${HSRv}."
+# Three HSR nodes. Each node has one link to each of its neighbour, two links in total.
+#
+# ns1eth1 ----- ns2eth1
+# hsr1 hsr2
+# ns1eth2 ns2eth2
+# | |
+# ns3eth1 ns3eth2
+# \ /
+# hsr3
+#
+ # Interfaces
+ ip link add ns1eth1 netns "$ns1" type veth peer name ns2eth1 netns "$ns2"
+ ip link add ns1eth2 netns "$ns1" type veth peer name ns3eth1 netns "$ns3"
+ ip link add ns3eth2 netns "$ns3" type veth peer name ns2eth2 netns "$ns2"
+
+ # HSRv0/1
+ ip -net "$ns1" link add name hsr1 type hsr slave1 ns1eth1 slave2 ns1eth2 supervision 45 version $HSRv proto 0
+ ip -net "$ns2" link add name hsr2 type hsr slave1 ns2eth1 slave2 ns2eth2 supervision 45 version $HSRv proto 0
+ ip -net "$ns3" link add name hsr3 type hsr slave1 ns3eth1 slave2 ns3eth2 supervision 45 version $HSRv proto 0
+
+ # IP for HSR
+ ip -net "$ns1" addr add 100.64.0.1/24 dev hsr1
+ ip -net "$ns1" addr add dead:beef:1::1/64 dev hsr1 nodad
+ ip -net "$ns2" addr add 100.64.0.2/24 dev hsr2
+ ip -net "$ns2" addr add dead:beef:1::2/64 dev hsr2 nodad
+ ip -net "$ns3" addr add 100.64.0.3/24 dev hsr3
+ ip -net "$ns3" addr add dead:beef:1::3/64 dev hsr3 nodad
+
+ # All Links up
+ ip -net "$ns1" link set ns1eth1 up
+ ip -net "$ns1" link set ns1eth2 up
+ ip -net "$ns1" link set hsr1 up
+
+ ip -net "$ns2" link set ns2eth1 up
+ ip -net "$ns2" link set ns2eth2 up
+ ip -net "$ns2" link set hsr2 up
+
+ ip -net "$ns3" link set ns3eth1 up
+ ip -net "$ns3" link set ns3eth2 up
+ ip -net "$ns3" link set hsr3 up
+}
+
+ip -Version > /dev/null 2>&1
+if [ $? -ne 0 ];then
+ echo "SKIP: Could not run test without ip tool"
+ exit $ksft_skip
+fi
+
+trap cleanup EXIT
+
+for i in "$ns1" "$ns2" "$ns3" ;do
+ ip netns add $i || exit $ksft_skip
+ ip -net $i link set lo up
+done
+
+setup_hsr_interfaces 0
+do_complete_ping_test
+cleanup
+
+for i in "$ns1" "$ns2" "$ns3" ;do
+ ip netns add $i || exit $ksft_skip
+ ip -net $i link set lo up
+done
-do_ping_long "$ns3" 100.64.0.1
-do_ping_long "$ns3" 100.64.0.2
-stop_if_error "Failed with delay and packetloss."
+setup_hsr_interfaces 1
+do_complete_ping_test
-echo "INFO: All good."
exit $ret
msg.msg_iov = &vec;
msg.msg_iovlen = 1;
- EXPECT_EQ(sendmsg(self->cfd, &msg, 0), send_len);
+ EXPECT_EQ(sendmsg(self->fd, &msg, 0), send_len);
}
while (recvs++ < sends) {
- EXPECT_NE(recv(self->fd, mem, send_len, 0), -1);
+ EXPECT_NE(recv(self->cfd, mem, send_len, 0), -1);
}
free(mem);
msg.msg_iov = vec;
msg.msg_iovlen = iov_len;
- EXPECT_EQ(sendmsg(self->cfd, &msg, 0), total_len);
+ EXPECT_EQ(sendmsg(self->fd, &msg, 0), total_len);
buf = malloc(total_len);
- EXPECT_NE(recv(self->fd, buf, total_len, 0), -1);
+ EXPECT_NE(recv(self->cfd, buf, total_len, 0), -1);
for (i = 0; i < iov_len; i++) {
EXPECT_EQ(memcmp(test_strs[i], buf + len_cmp,
strlen(test_strs[i])),
# SPDX-License-Identifier: GPL-2.0-only
nf-queue
connect_close
+audit_logread
nft_concat_range.sh nft_conntrack_helper.sh \
nft_queue.sh nft_meta.sh nf_nat_edemux.sh \
ipip-conntrack-mtu.sh conntrack_tcp_unreplied.sh \
- conntrack_vrf.sh nft_synproxy.sh rpath.sh
+ conntrack_vrf.sh nft_synproxy.sh rpath.sh nft_audit.sh
HOSTPKG_CONFIG := pkg-config
CFLAGS += $(shell $(HOSTPKG_CONFIG) --cflags libmnl 2>/dev/null)
LDLIBS += $(shell $(HOSTPKG_CONFIG) --libs libmnl 2>/dev/null || echo -lmnl)
-TEST_GEN_FILES = nf-queue connect_close
+TEST_GEN_FILES = nf-queue connect_close audit_logread
include ../lib.mk
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+
+#define _GNU_SOURCE
+#include <errno.h>
+#include <fcntl.h>
+#include <poll.h>
+#include <signal.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/socket.h>
+#include <unistd.h>
+#include <linux/audit.h>
+#include <linux/netlink.h>
+
+static int fd;
+
+#define MAX_AUDIT_MESSAGE_LENGTH 8970
+struct audit_message {
+ struct nlmsghdr nlh;
+ union {
+ struct audit_status s;
+ char data[MAX_AUDIT_MESSAGE_LENGTH];
+ } u;
+};
+
+int audit_recv(int fd, struct audit_message *rep)
+{
+ struct sockaddr_nl addr;
+ socklen_t addrlen = sizeof(addr);
+ int ret;
+
+ do {
+ ret = recvfrom(fd, rep, sizeof(*rep), 0,
+ (struct sockaddr *)&addr, &addrlen);
+ } while (ret < 0 && errno == EINTR);
+
+ if (ret < 0 ||
+ addrlen != sizeof(addr) ||
+ addr.nl_pid != 0 ||
+ rep->nlh.nlmsg_type == NLMSG_ERROR) /* short-cut for now */
+ return -1;
+
+ return ret;
+}
+
+int audit_send(int fd, uint16_t type, uint32_t key, uint32_t val)
+{
+ static int seq = 0;
+ struct audit_message msg = {
+ .nlh = {
+ .nlmsg_len = NLMSG_SPACE(sizeof(msg.u.s)),
+ .nlmsg_type = type,
+ .nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK,
+ .nlmsg_seq = ++seq,
+ },
+ .u.s = {
+ .mask = key,
+ .enabled = key == AUDIT_STATUS_ENABLED ? val : 0,
+ .pid = key == AUDIT_STATUS_PID ? val : 0,
+ }
+ };
+ struct sockaddr_nl addr = {
+ .nl_family = AF_NETLINK,
+ };
+ int ret;
+
+ do {
+ ret = sendto(fd, &msg, msg.nlh.nlmsg_len, 0,
+ (struct sockaddr *)&addr, sizeof(addr));
+ } while (ret < 0 && errno == EINTR);
+
+ if (ret != (int)msg.nlh.nlmsg_len)
+ return -1;
+ return 0;
+}
+
+int audit_set(int fd, uint32_t key, uint32_t val)
+{
+ struct audit_message rep = { 0 };
+ int ret;
+
+ ret = audit_send(fd, AUDIT_SET, key, val);
+ if (ret)
+ return ret;
+
+ ret = audit_recv(fd, &rep);
+ if (ret < 0)
+ return ret;
+ return 0;
+}
+
+int readlog(int fd)
+{
+ struct audit_message rep = { 0 };
+ int ret = audit_recv(fd, &rep);
+ const char *sep = "";
+ char *k, *v;
+
+ if (ret < 0)
+ return ret;
+
+ if (rep.nlh.nlmsg_type != AUDIT_NETFILTER_CFG)
+ return 0;
+
+ /* skip the initial "audit(...): " part */
+ strtok(rep.u.data, " ");
+
+ while ((k = strtok(NULL, "="))) {
+ v = strtok(NULL, " ");
+
+ /* these vary and/or are uninteresting, ignore */
+ if (!strcmp(k, "pid") ||
+ !strcmp(k, "comm") ||
+ !strcmp(k, "subj"))
+ continue;
+
+ /* strip the varying sequence number */
+ if (!strcmp(k, "table"))
+ *strchrnul(v, ':') = '\0';
+
+ printf("%s%s=%s", sep, k, v);
+ sep = " ";
+ }
+ if (*sep) {
+ printf("\n");
+ fflush(stdout);
+ }
+ return 0;
+}
+
+void cleanup(int sig)
+{
+ audit_set(fd, AUDIT_STATUS_ENABLED, 0);
+ close(fd);
+ if (sig)
+ exit(0);
+}
+
+int main(int argc, char **argv)
+{
+ struct sigaction act = {
+ .sa_handler = cleanup,
+ };
+
+ fd = socket(PF_NETLINK, SOCK_RAW, NETLINK_AUDIT);
+ if (fd < 0) {
+ perror("Can't open netlink socket");
+ return -1;
+ }
+
+ if (sigaction(SIGTERM, &act, NULL) < 0 ||
+ sigaction(SIGINT, &act, NULL) < 0) {
+ perror("Can't set signal handler");
+ close(fd);
+ return -1;
+ }
+
+ audit_set(fd, AUDIT_STATUS_ENABLED, 1);
+ audit_set(fd, AUDIT_STATUS_PID, getpid());
+
+ while (1)
+ readlog(fd);
+}
CONFIG_NFT_MASQ=m
CONFIG_NFT_FLOW_OFFLOAD=m
CONFIG_NF_CT_NETLINK=m
+CONFIG_AUDIT=y
--- /dev/null
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Check that audit logs generated for nft commands are as expected.
+
+SKIP_RC=4
+RC=0
+
+nft --version >/dev/null 2>&1 || {
+ echo "SKIP: missing nft tool"
+ exit $SKIP_RC
+}
+
+logfile=$(mktemp)
+echo "logging into $logfile"
+./audit_logread >"$logfile" &
+logread_pid=$!
+trap 'kill $logread_pid; rm -f $logfile' EXIT
+exec 3<"$logfile"
+
+do_test() { # (cmd, log)
+ echo -n "testing for cmd: $1 ... "
+ cat <&3 >/dev/null
+ $1 >/dev/null || exit 1
+ sleep 0.1
+ res=$(diff -a -u <(echo "$2") - <&3)
+ [ $? -eq 0 ] && { echo "OK"; return; }
+ echo "FAIL"
+ echo "$res"
+ ((RC++))
+}
+
+nft flush ruleset
+
+for table in t1 t2; do
+ do_test "nft add table $table" \
+ "table=$table family=2 entries=1 op=nft_register_table"
+
+ do_test "nft add chain $table c1" \
+ "table=$table family=2 entries=1 op=nft_register_chain"
+
+ do_test "nft add chain $table c2; add chain $table c3" \
+ "table=$table family=2 entries=2 op=nft_register_chain"
+
+ cmd="add rule $table c1 counter"
+
+ do_test "nft $cmd" \
+ "table=$table family=2 entries=1 op=nft_register_rule"
+
+ do_test "nft $cmd; $cmd" \
+ "table=$table family=2 entries=2 op=nft_register_rule"
+
+ cmd=""
+ sep=""
+ for chain in c2 c3; do
+ for i in {1..3}; do
+ cmd+="$sep add rule $table $chain counter"
+ sep=";"
+ done
+ done
+ do_test "nft $cmd" \
+ "table=$table family=2 entries=6 op=nft_register_rule"
+done
+
+do_test 'nft reset rules t1 c2' \
+'table=t1 family=2 entries=3 op=nft_reset_rule'
+
+do_test 'nft reset rules table t1' \
+'table=t1 family=2 entries=3 op=nft_reset_rule
+table=t1 family=2 entries=3 op=nft_reset_rule
+table=t1 family=2 entries=3 op=nft_reset_rule'
+
+do_test 'nft reset rules' \
+'table=t1 family=2 entries=3 op=nft_reset_rule
+table=t1 family=2 entries=3 op=nft_reset_rule
+table=t1 family=2 entries=3 op=nft_reset_rule
+table=t2 family=2 entries=3 op=nft_reset_rule
+table=t2 family=2 entries=3 op=nft_reset_rule
+table=t2 family=2 entries=3 op=nft_reset_rule'
+
+for ((i = 0; i < 500; i++)); do
+ echo "add rule t2 c3 counter accept comment \"rule $i\""
+done | do_test 'nft -f -' \
+'table=t2 family=2 entries=500 op=nft_register_rule'
+
+do_test 'nft reset rules t2 c3' \
+'table=t2 family=2 entries=189 op=nft_reset_rule
+table=t2 family=2 entries=188 op=nft_reset_rule
+table=t2 family=2 entries=126 op=nft_reset_rule'
+
+do_test 'nft reset rules t2' \
+'table=t2 family=2 entries=3 op=nft_reset_rule
+table=t2 family=2 entries=3 op=nft_reset_rule
+table=t2 family=2 entries=186 op=nft_reset_rule
+table=t2 family=2 entries=188 op=nft_reset_rule
+table=t2 family=2 entries=129 op=nft_reset_rule'
+
+do_test 'nft reset rules' \
+'table=t1 family=2 entries=3 op=nft_reset_rule
+table=t1 family=2 entries=3 op=nft_reset_rule
+table=t1 family=2 entries=3 op=nft_reset_rule
+table=t2 family=2 entries=3 op=nft_reset_rule
+table=t2 family=2 entries=3 op=nft_reset_rule
+table=t2 family=2 entries=180 op=nft_reset_rule
+table=t2 family=2 entries=188 op=nft_reset_rule
+table=t2 family=2 entries=135 op=nft_reset_rule'
+
+exit $RC
# SPDX-License-Identifier: GPL-2.0-or-later
-CFLAGS += -Wall -O2 -g -fsanitize=address -fsanitize=undefined
+CFLAGS += -Wall -O2 -g -fsanitize=address -fsanitize=undefined -static-libasan
TEST_GEN_PROGS := openat2_test resolve_test rename_attack_test
include ../lib.mk
done;
endef
-override define EMIT_TESTS
+emit_tests:
+@for TARGET in $(SUB_DIRS); do \
BUILD_TARGET=$(OUTPUT)/$$TARGET; \
- $(MAKE) OUTPUT=$$BUILD_TARGET -s -C $$TARGET emit_tests;\
+ $(MAKE) OUTPUT=$$BUILD_TARGET -s -C $$TARGET $@;\
done;
-endef
override define CLEAN
+@for TARGET in $(SUB_DIRS); do \
tags:
find . -name '*.c' -o -name '*.h' | xargs ctags
-.PHONY: tags $(SUB_DIRS)
+.PHONY: tags $(SUB_DIRS) emit_tests
+TARGET=event_code_tests; BUILD_TARGET=$$OUTPUT/$$TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET run_tests
endef
-DEFAULT_EMIT_TESTS := $(EMIT_TESTS)
-override define EMIT_TESTS
- $(DEFAULT_EMIT_TESTS)
+emit_tests:
+ for TEST in $(TEST_GEN_PROGS); do \
+ BASENAME_TEST=`basename $$TEST`; \
+ echo "$(COLLECTION):$$BASENAME_TEST"; \
+ done
+TARGET=ebb; BUILD_TARGET=$$OUTPUT/$$TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -s -C $$TARGET emit_tests
+TARGET=sampling_tests; BUILD_TARGET=$$OUTPUT/$$TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -s -C $$TARGET emit_tests
+TARGET=event_code_tests; BUILD_TARGET=$$OUTPUT/$$TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -s -C $$TARGET emit_tests
-endef
DEFAULT_INSTALL_RULE := $(INSTALL_RULE)
override define INSTALL_RULE
event_code_tests:
TARGET=$@; BUILD_TARGET=$$OUTPUT/$$TARGET; mkdir -p $$BUILD_TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -k -C $$TARGET all
-.PHONY: all run_tests ebb sampling_tests event_code_tests
+.PHONY: all run_tests ebb sampling_tests event_code_tests emit_tests
"Private_Dirty: 0 kB\n"
"Referenced: 0 kB\n"
"Anonymous: 0 kB\n"
+"KSM: 0 kB\n"
"LazyFree: 0 kB\n"
"AnonHugePages: 0 kB\n"
"ShmemPmdMapped: 0 kB\n"
#include <asm/unistd.h>
#include "../kselftest_harness.h"
+#include "user_events_selftests.h"
const char *data_file = "/sys/kernel/tracing/user_events_data";
const char *enable_file = "/sys/kernel/tracing/events/user_events/__abi_event/enable";
FIXTURE(user) {
long check;
+ bool umount;
};
FIXTURE_SETUP(user) {
+ USER_EVENT_FIXTURE_SETUP(return, self->umount);
+
change_event(false);
self->check = 0;
}
FIXTURE_TEARDOWN(user) {
+ USER_EVENT_FIXTURE_TEARDOWN(self->umount);
}
TEST_F(user, enablement) {
--- /dev/null
+CONFIG_USER_EVENTS=y
#include <unistd.h>
#include "../kselftest_harness.h"
+#include "user_events_selftests.h"
const char *abi_file = "/sys/kernel/tracing/user_events_data";
const char *enable_file = "/sys/kernel/tracing/events/user_events/__test_event/enable";
FIXTURE(user) {
int check;
+ bool umount;
};
FIXTURE_SETUP(user) {
+ USER_EVENT_FIXTURE_SETUP(return, self->umount);
}
FIXTURE_TEARDOWN(user) {
+ USER_EVENT_FIXTURE_TEARDOWN(self->umount);
+
wait_for_delete();
}
#include <unistd.h>
#include "../kselftest_harness.h"
+#include "user_events_selftests.h"
const char *data_file = "/sys/kernel/tracing/user_events_data";
const char *status_file = "/sys/kernel/tracing/user_events_status";
int data_fd;
int enable_fd;
int check;
+ bool umount;
};
FIXTURE_SETUP(user) {
+ USER_EVENT_FIXTURE_SETUP(return, self->umount);
+
self->status_fd = open(status_file, O_RDONLY);
ASSERT_NE(-1, self->status_fd);
}
FIXTURE_TEARDOWN(user) {
+ USER_EVENT_FIXTURE_TEARDOWN(self->umount);
+
close(self->status_fd);
close(self->data_fd);
#include <asm/unistd.h>
#include "../kselftest_harness.h"
+#include "user_events_selftests.h"
const char *data_file = "/sys/kernel/tracing/user_events_data";
const char *id_file = "/sys/kernel/tracing/events/user_events/__test_event/id";
FIXTURE(user) {
int data_fd;
int check;
+ bool umount;
};
FIXTURE_SETUP(user) {
+ USER_EVENT_FIXTURE_SETUP(return, self->umount);
+
self->data_fd = open(data_file, O_RDWR);
ASSERT_NE(-1, self->data_fd);
}
FIXTURE_TEARDOWN(user) {
+ USER_EVENT_FIXTURE_TEARDOWN(self->umount);
+
close(self->data_fd);
if (clear(&self->check) != 0)
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _USER_EVENTS_SELFTESTS_H
+#define _USER_EVENTS_SELFTESTS_H
+
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/mount.h>
+#include <unistd.h>
+#include <errno.h>
+
+#include "../kselftest.h"
+
+static inline void tracefs_unmount(void)
+{
+ umount("/sys/kernel/tracing");
+}
+
+static inline bool tracefs_enabled(char **message, bool *fail, bool *umount)
+{
+ struct stat buf;
+ int ret;
+
+ *message = "";
+ *fail = false;
+ *umount = false;
+
+ /* Ensure tracefs is installed */
+ ret = stat("/sys/kernel/tracing", &buf);
+
+ if (ret == -1) {
+ *message = "Tracefs is not installed";
+ return false;
+ }
+
+ /* Ensure mounted tracefs */
+ ret = stat("/sys/kernel/tracing/README", &buf);
+
+ if (ret == -1 && errno == ENOENT) {
+ if (mount(NULL, "/sys/kernel/tracing", "tracefs", 0, NULL) != 0) {
+ *message = "Cannot mount tracefs";
+ *fail = true;
+ return false;
+ }
+
+ *umount = true;
+
+ ret = stat("/sys/kernel/tracing/README", &buf);
+ }
+
+ if (ret == -1) {
+ *message = "Cannot access tracefs";
+ *fail = true;
+ return false;
+ }
+
+ return true;
+}
+
+static inline bool user_events_enabled(char **message, bool *fail, bool *umount)
+{
+ struct stat buf;
+ int ret;
+
+ *message = "";
+ *fail = false;
+ *umount = false;
+
+ if (getuid() != 0) {
+ *message = "Must be run as root";
+ *fail = true;
+ return false;
+ }
+
+ if (!tracefs_enabled(message, fail, umount))
+ return false;
+
+ /* Ensure user_events is installed */
+ ret = stat("/sys/kernel/tracing/user_events_data", &buf);
+
+ if (ret == -1) {
+ switch (errno) {
+ case ENOENT:
+ *message = "user_events is not installed";
+ return false;
+
+ default:
+ *message = "Cannot access user_events_data";
+ *fail = true;
+ return false;
+ }
+ }
+
+ return true;
+}
+
+#define USER_EVENT_FIXTURE_SETUP(statement, umount) do { \
+ char *message; \
+ bool fail; \
+ if (!user_events_enabled(&message, &fail, &(umount))) { \
+ if (fail) { \
+ TH_LOG("Setup failed due to: %s", message); \
+ ASSERT_FALSE(fail); \
+ } \
+ SKIP(statement, "Skipping due to: %s", message); \
+ } \
+} while (0)
+
+#define USER_EVENT_FIXTURE_TEARDOWN(umount) do { \
+ if ((umount)) \
+ tracefs_unmount(); \
+} while (0)
+
+#endif /* _USER_EVENTS_SELFTESTS_H */