D: XF86_8514
D: cfdisk (curses based disk partitioning program)
+N: Mat Martineau
+E: mat@martineau.name
+D: MPTCP subsystem co-maintainer 2020-2023
+D: Keyctl restricted keyring and Diffie-Hellman UAPI
+D: Bluetooth L2CAP ERTM mode and AMP
+S: USA
+
N: John S. Marvin
E: jsm@fc.hp.com
D: PA-RISC port
- qcom,msm8939-pcnoc
- qcom,msm8939-snoc
- qcom,msm8996-a1noc
- - qcom,msm8996-a2noc
- qcom,msm8996-bimc
- qcom,msm8996-cnoc
- qcom,msm8996-pnoc
compatible:
contains:
enum:
+ - qcom,msm8996-a2noc
+
+ then:
+ properties:
+ clock-names:
+ items:
+ - const: bus
+ - const: bus_a
+ - const: aggre2_ufs_axi
+ - const: ufs_axi
+
+ clocks:
+ items:
+ - description: Bus Clock
+ - description: Bus A Clock
+ - description: Aggregate2 NoC UFS AXI Clock
+ - description: UFS AXI Clock
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
- qcom,sdm660-a2noc
then:
# Copyright 2019 BayLibre, SAS
%YAML 1.2
---
-$id: "http://devicetree.org/schemas/phy/amlogic,meson-g12a-usb2-phy.yaml#"
+$id: "http://devicetree.org/schemas/phy/amlogic,g12a-usb2-phy.yaml#"
$schema: "http://devicetree.org/meta-schemas/core.yaml#"
title: Amlogic G12A USB2 PHY
properties:
compatible:
enum:
- - amlogic,meson-g12a-usb2-phy
- - amlogic,meson-a1-usb2-phy
+ - amlogic,g12a-usb2-phy
+ - amlogic,a1-usb2-phy
reg:
maxItems: 1
examples:
- |
phy@36000 {
- compatible = "amlogic,meson-g12a-usb2-phy";
+ compatible = "amlogic,g12a-usb2-phy";
reg = <0x36000 0x2000>;
clocks = <&xtal>;
clock-names = "xtal";
# Copyright 2019 BayLibre, SAS
%YAML 1.2
---
-$id: "http://devicetree.org/schemas/phy/amlogic,meson-g12a-usb3-pcie-phy.yaml#"
+$id: "http://devicetree.org/schemas/phy/amlogic,g12a-usb3-pcie-phy.yaml#"
$schema: "http://devicetree.org/meta-schemas/core.yaml#"
title: Amlogic G12A USB3 + PCIE Combo PHY
properties:
compatible:
enum:
- - amlogic,meson-g12a-usb3-pcie-phy
+ - amlogic,g12a-usb3-pcie-phy
reg:
maxItems: 1
examples:
- |
phy@46000 {
- compatible = "amlogic,meson-g12a-usb3-pcie-phy";
+ compatible = "amlogic,g12a-usb3-pcie-phy";
reg = <0x46000 0x2000>;
clocks = <&ref_clk>;
clock-names = "ref_clk";
compatible:
enum:
- qcom,usb-hs-28nm-femtophy
- - qcom,usb-hs-28nm-mdm9607
reg:
maxItems: 1
qcom,protection-domain:
$ref: /schemas/types.yaml#/definitions/string-array
description: |
- Protection domain service name and path for APR service
- possible values are::
+ Protection domain service name and path for APR service (if supported).
+ Possible values are::
"avs/audio", "msm/adsp/audio_pd".
"kernel/elf_loader", "msm/modem/wlan_pd".
"tms/servreg", "msm/adsp/audio_pd".
required:
- reg
- - qcom,protection-domain
additionalProperties: true
When executing "make clean", the file "crc32table.h" will be deleted.
Kbuild will assume files to be in the same relative directory as the
-Makefile, except if prefixed with $(objtree).
+Makefile.
To exclude certain files or directories from make clean, use the
$(no-clean-files) variable.
userspace tools.
Documentation for Linux bridging is on:
- http://www.linuxfoundation.org/collaborate/workgroups/networking/bridge
+ https://wiki.linuxfoundation.org/networking/bridge
The bridge-utilities are maintained at:
git://git.kernel.org/pub/scm/linux/kernel/git/shemminger/bridge-utils.git
default 3
nf_conntrack_sctp_timeout_established - INTEGER (seconds)
- default 432000 (5 days)
+ default 210
+
+ Default is set to (hb_interval * path_max_retrans + rto_max)
nf_conntrack_sctp_timeout_shutdown_sent - INTEGER (seconds)
default 0.3
This timeout is used to setup conntrack entry on secondary paths.
Default is set to hb_interval.
-nf_conntrack_sctp_timeout_heartbeat_acked - INTEGER (seconds)
- default 210
-
- This timeout is used to setup conntrack entry on secondary paths.
- Default is set to (hb_interval * path_max_retrans + rto_max)
-
nf_conntrack_udp_timeout - INTEGER (seconds)
default 30
M: Robert Moore <robert.moore@intel.com>
M: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
L: linux-acpi@vger.kernel.org
-L: devel@acpica.org
+L: acpica-devel@lists.linuxfoundation.org
S: Supported
W: https://acpica.org/
W: https://github.com/acpica/acpica/
F: drivers/firmware/efi/test/
EFI VARIABLE FILESYSTEM
-M: Matthew Garrett <matthew.garrett@nebula.com>
M: Jeremy Kerr <jk@ozlabs.org>
M: Ard Biesheuvel <ardb@kernel.org>
L: linux-efi@vger.kernel.org
F: include/linux/fscache*.h
FSCRYPT: FILE SYSTEM LEVEL ENCRYPTION SUPPORT
+M: Eric Biggers <ebiggers@kernel.org>
M: Theodore Y. Ts'o <tytso@mit.edu>
M: Jaegeuk Kim <jaegeuk@kernel.org>
-M: Eric Biggers <ebiggers@kernel.org>
L: linux-fscrypt@vger.kernel.org
S: Supported
Q: https://patchwork.kernel.org/project/linux-fscrypt/list/
-T: git git://git.kernel.org/pub/scm/fs/fscrypt/fscrypt.git
+T: git https://git.kernel.org/pub/scm/fs/fscrypt/linux.git
F: Documentation/filesystems/fscrypt.rst
F: fs/crypto/
-F: include/linux/fscrypt*.h
+F: include/linux/fscrypt.h
F: include/uapi/linux/fscrypt.h
FSI SUBSYSTEM
FSVERITY: READ-ONLY FILE-BASED AUTHENTICITY PROTECTION
M: Eric Biggers <ebiggers@kernel.org>
M: Theodore Y. Ts'o <tytso@mit.edu>
-L: linux-fscrypt@vger.kernel.org
+L: fsverity@lists.linux.dev
S: Supported
-Q: https://patchwork.kernel.org/project/linux-fscrypt/list/
-T: git git://git.kernel.org/pub/scm/fs/fscrypt/fscrypt.git fsverity
+Q: https://patchwork.kernel.org/project/fsverity/list/
+T: git https://git.kernel.org/pub/scm/fs/fsverity/linux.git
F: Documentation/filesystems/fsverity.rst
F: fs/verity/
F: include/linux/fsverity.h
HISILICON DMA DRIVER
M: Zhou Wang <wangzhou1@hisilicon.com>
-M: Jie Hai <haijie1@hisilicon.com>
+M: Jie Hai <haijie1@huawei.com>
L: dmaengine@vger.kernel.org
S: Maintained
F: drivers/dma/hisi_dma.c
F: net/netlabel/
NETWORKING [MPTCP]
-M: Mat Martineau <mathew.j.martineau@linux.intel.com>
M: Matthieu Baerts <matthieu.baerts@tessares.net>
L: netdev@vger.kernel.org
L: mptcp@lists.linux.dev
VERSION = 6
PATCHLEVEL = 2
SUBLEVEL = 0
-EXTRAVERSION = -rc4
+EXTRAVERSION = -rc5
NAME = Hurr durr I'ma ninja sloth
# *DOCUMENTATION*
CFLAGS_KERNEL =
RUSTFLAGS_KERNEL =
AFLAGS_KERNEL =
-export LDFLAGS_vmlinux =
+LDFLAGS_vmlinux =
# Use USERINCLUDE when you must reference the UAPI directories only.
USERINCLUDE := \
@:
PHONY += vmlinux
+# LDFLAGS_vmlinux in the top Makefile defines linker flags for the top vmlinux,
+# not for decompressors. LDFLAGS_vmlinux in arch/*/boot/compressed/Makefile is
+# unrelated; the decompressors just happen to have the same base name,
+# arch/*/boot/compressed/vmlinux.
+# Export LDFLAGS_vmlinux only to scripts/Makefile.vmlinux.
+#
+# _LDFLAGS_vmlinux is a workaround for the 'private export' bug:
+# https://savannah.gnu.org/bugs/?61463
+# For Make > 4.4, the following simple code will work:
+# vmlinux: private export LDFLAGS_vmlinux := $(LDFLAGS_vmlinux)
+vmlinux: private _LDFLAGS_vmlinux := $(LDFLAGS_vmlinux)
+vmlinux: export LDFLAGS_vmlinux = $(_LDFLAGS_vmlinux)
vmlinux: vmlinux.o $(KBUILD_LDS) modpost
$(Q)$(MAKE) -f $(srctree)/scripts/Makefile.vmlinux
# *.ko are usually independent of vmlinux, but CONFIG_DEBUG_INFOBTF_MODULES
# is an exception.
ifdef CONFIG_DEBUG_INFO_BTF_MODULES
+KBUILD_BUILTIN := 1
modules: vmlinux
endif
};
gpio0: gpio@18100 {
- compatible = "marvell,armadaxp-gpio",
+ compatible = "marvell,armada-370-gpio",
"marvell,orion-gpio";
reg = <0x18100 0x40>, <0x181c0 0x08>;
reg-names = "gpio", "pwm";
};
gpio1: gpio@18140 {
- compatible = "marvell,armadaxp-gpio",
+ compatible = "marvell,armada-370-gpio",
"marvell,orion-gpio";
reg = <0x18140 0x40>, <0x181c8 0x08>;
reg-names = "gpio", "pwm";
};
gpio0: gpio@18100 {
- compatible = "marvell,armadaxp-gpio", "marvell,orion-gpio";
+ compatible = "marvell,orion-gpio";
reg = <0x18100 0x40>;
ngpios = <32>;
gpio-controller;
};
gpio1: gpio@18140 {
- compatible = "marvell,armadaxp-gpio", "marvell,orion-gpio";
+ compatible = "marvell,orion-gpio";
reg = <0x18140 0x40>;
ngpios = <28>;
gpio-controller;
scl-gpios = <&gpio3 21 GPIO_ACTIVE_HIGH>;
status = "okay";
- i2c-switch@70 {
+ i2c-mux@70 {
compatible = "nxp,pca9547";
#address-cells = <1>;
#size-cells = <0>;
&uart1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart1>;
- uart-has-rtscts;
rts-gpios = <&gpio7 1 GPIO_ACTIVE_HIGH>;
status = "okay";
};
};
&i2c2 {
- clock_frequency = <100000>;
+ clock-frequency = <100000>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_i2c2>;
status = "okay";
};
&i2c1 {
- clock_frequency = <100000>;
+ clock-frequency = <100000>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_i2c1>;
status = "okay";
};
&i2c4 {
- clock_frequency = <100000>;
+ clock-frequency = <100000>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_i2c1>;
status = "okay";
};
&i2c1 {
- clock_frequency = <100000>;
+ clock-frequency = <100000>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_i2c1>;
status = "okay";
};
&i2c2 {
- clock_frequency = <100000>;
+ clock-frequency = <100000>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_i2c2>;
status = "okay";
serial@f995e000 {
status = "okay";
};
+ };
+};
- sdhci@f9824900 {
- bus-width = <8>;
- non-removable;
- status = "okay";
- };
+&sdhc_1 {
+ bus-width = <8>;
+ non-removable;
+ status = "okay";
+};
- sdhci@f98a4900 {
- cd-gpios = <&tlmm 122 GPIO_ACTIVE_LOW>;
- bus-width = <4>;
- };
- };
+&sdhc_2 {
+ cd-gpios = <&tlmm 122 GPIO_ACTIVE_LOW>;
+ bus-width = <4>;
};
status = "disabled";
};
- mmc@f9824900 {
+ sdhc_1: mmc@f9824900 {
compatible = "qcom,apq8084-sdhci", "qcom,sdhci-msm-v4";
reg = <0xf9824900 0x11c>, <0xf9824000 0x800>;
reg-names = "hc", "core";
status = "disabled";
};
- mmc@f98a4900 {
+ sdhc_2: mmc@f98a4900 {
compatible = "qcom,apq8084-sdhci", "qcom,sdhci-msm-v4";
reg = <0xf98a4900 0x11c>, <0xf98a4000 0x800>;
reg-names = "hc", "core";
mpddrc: mpddrc@ffffe800 {
compatible = "microchip,sam9x60-ddramc", "atmel,sama5d3-ddramc";
reg = <0xffffe800 0x200>;
- clocks = <&pmc PMC_TYPE_SYSTEM 2>, <&pmc PMC_TYPE_CORE PMC_MCK>;
+ clocks = <&pmc PMC_TYPE_SYSTEM 2>, <&pmc PMC_TYPE_PERIPHERAL 49>;
clock-names = "ddrck", "mpddr";
};
&qspi {
pinctrl-names = "default", "sleep";
- pinctrl-0 = <&qspi_clk_pins_a &qspi_bk1_pins_a>;
- pinctrl-1 = <&qspi_clk_sleep_pins_a &qspi_bk1_sleep_pins_a>;
+ pinctrl-0 = <&qspi_clk_pins_a
+ &qspi_bk1_pins_a
+ &qspi_cs1_pins_a>;
+ pinctrl-1 = <&qspi_clk_sleep_pins_a
+ &qspi_bk1_sleep_pins_a
+ &qspi_cs1_sleep_pins_a>;
reg = <0x58003000 0x1000>, <0x70000000 0x4000000>;
#address-cells = <1>;
#size-cells = <0>;
&qspi {
pinctrl-names = "default", "sleep";
- pinctrl-0 = <&qspi_clk_pins_a &qspi_bk1_pins_a>;
- pinctrl-1 = <&qspi_clk_sleep_pins_a &qspi_bk1_sleep_pins_a>;
+ pinctrl-0 = <&qspi_clk_pins_a
+ &qspi_bk1_pins_a
+ &qspi_cs1_pins_a>;
+ pinctrl-1 = <&qspi_clk_sleep_pins_a
+ &qspi_bk1_sleep_pins_a
+ &qspi_cs1_sleep_pins_a>;
reg = <0x58003000 0x1000>, <0x70000000 0x4000000>;
#address-cells = <1>;
#size-cells = <0>;
&qspi {
pinctrl-names = "default", "sleep";
- pinctrl-0 = <&qspi_clk_pins_a &qspi_bk1_pins_a>;
- pinctrl-1 = <&qspi_clk_sleep_pins_a &qspi_bk1_sleep_pins_a>;
+ pinctrl-0 = <&qspi_clk_pins_a
+ &qspi_bk1_pins_a
+ &qspi_cs1_pins_a>;
+ pinctrl-1 = <&qspi_clk_sleep_pins_a
+ &qspi_bk1_sleep_pins_a
+ &qspi_cs1_sleep_pins_a>;
reg = <0x58003000 0x1000>, <0x70000000 0x4000000>;
#address-cells = <1>;
#size-cells = <0>;
&qspi {
pinctrl-names = "default", "sleep";
- pinctrl-0 = <&qspi_clk_pins_a &qspi_bk1_pins_a>;
- pinctrl-1 = <&qspi_clk_sleep_pins_a &qspi_bk1_sleep_pins_a>;
+ pinctrl-0 = <&qspi_clk_pins_a
+ &qspi_bk1_pins_a
+ &qspi_cs1_pins_a>;
+ pinctrl-1 = <&qspi_clk_sleep_pins_a
+ &qspi_bk1_sleep_pins_a
+ &qspi_cs1_sleep_pins_a>;
reg = <0x58003000 0x1000>, <0x70000000 0x200000>;
#address-cells = <1>;
#size-cells = <0>;
};
&i2c2 {
- tca9548@70 {
+ i2c-mux@70 {
compatible = "nxp,pca9548";
pinctrl-0 = <&pinctrl_i2c_mux_reset>;
pinctrl-names = "default";
};
&i2c2 {
- tca9548@70 {
+ i2c-mux@70 {
compatible = "nxp,pca9548";
pinctrl-0 = <&pinctrl_i2c_mux_reset>;
pinctrl-names = "default";
#include <linux/init.h>
#include <linux/mc146818rtc.h>
-#include <linux/bcd.h>
#include <linux/io.h>
#include "common.h"
np = of_find_compatible_node(NULL, NULL, "fsl,imx25-iim");
iim_base = of_iomap(np, 0);
+ of_node_put(np);
BUG_ON(!iim_base);
rev = readl(iim_base + MXC_IIMSREV);
iounmap(iim_base);
np = of_find_compatible_node(NULL, NULL, "fsl,imx27-ccm");
ccm_base = of_iomap(np, 0);
+ of_node_put(np);
BUG_ON(!ccm_base);
/*
* now we have access to the IO registers. As we need
np = of_find_compatible_node(NULL, NULL, "fsl,imx31-iim");
iim_base = of_iomap(np, 0);
+ of_node_put(np);
BUG_ON(!iim_base);
/* read SREV register from IIM module */
np = of_find_compatible_node(NULL, NULL, "fsl,imx35-iim");
iim_base = of_iomap(np, 0);
+ of_node_put(np);
BUG_ON(!iim_base);
rev = imx_readl(iim_base + MXC_IIMSREV);
np = of_find_compatible_node(NULL, NULL, compat);
iim_base = of_iomap(np, 0);
+ of_node_put(np);
WARN_ON(!iim_base);
srev = readl(iim_base + IIM_SREV) & 0xff;
depends on ARCH_MULTI_V4T || ARCH_MULTI_V5
depends on CPU_LITTLE_ENDIAN
depends on ATAGS
+ select ARCH_OMAP
select ARCH_HAS_HOLES_MEMORYMODEL
select ARCH_OMAP
select CLKSRC_MMIO
select CPU_ARM926T
select OMAP_DM_TIMER
-config ARCH_OMAP1_ANY
- select ARCH_OMAP
- def_bool ARCH_OMAP730 || ARCH_OMAP850 || ARCH_OMAP15XX || ARCH_OMAP16XX
-
config ARCH_OMAP
bool
# Makefile for the linux kernel.
#
-ifdef CONFIG_ARCH_OMAP1_ANY
-
# Common support
obj-y := io.o id.o sram-init.o sram.o time.o irq.o mux.o flash.o \
serial.o devices.o dma.o omap-dma.o fb.o
obj-$(CONFIG_ARCH_OMAP850) += gpio7xx.o
obj-$(CONFIG_ARCH_OMAP15XX) += gpio15xx.o
obj-$(CONFIG_ARCH_OMAP16XX) += gpio16xx.o
-
-endif
#include <linux/gpio.h>
#include <linux/platform_data/gpio-omap.h>
#include <linux/soc/ti/omap1-soc.h>
+#include <asm/irq.h>
#include "irqs.h"
* The machine specific code may provide the extra mapping besides the
* default mapping provided here.
*/
-static struct map_desc omap_io_desc[] __initdata = {
+#if defined (CONFIG_ARCH_OMAP730) || defined (CONFIG_ARCH_OMAP850)
+static struct map_desc omap7xx_io_desc[] __initdata = {
{
.virtual = OMAP1_IO_VIRT,
.pfn = __phys_to_pfn(OMAP1_IO_PHYS),
.length = OMAP1_IO_SIZE,
.type = MT_DEVICE
- }
-};
-
-#if defined (CONFIG_ARCH_OMAP730) || defined (CONFIG_ARCH_OMAP850)
-static struct map_desc omap7xx_io_desc[] __initdata = {
+ },
{
.virtual = OMAP7XX_DSP_BASE,
.pfn = __phys_to_pfn(OMAP7XX_DSP_START),
#ifdef CONFIG_ARCH_OMAP15XX
static struct map_desc omap1510_io_desc[] __initdata = {
{
+ .virtual = OMAP1_IO_VIRT,
+ .pfn = __phys_to_pfn(OMAP1_IO_PHYS),
+ .length = OMAP1_IO_SIZE,
+ .type = MT_DEVICE
+ },
+ {
.virtual = OMAP1510_DSP_BASE,
.pfn = __phys_to_pfn(OMAP1510_DSP_START),
.length = OMAP1510_DSP_SIZE,
#if defined(CONFIG_ARCH_OMAP16XX)
static struct map_desc omap16xx_io_desc[] __initdata = {
{
+ .virtual = OMAP1_IO_VIRT,
+ .pfn = __phys_to_pfn(OMAP1_IO_PHYS),
+ .length = OMAP1_IO_SIZE,
+ .type = MT_DEVICE
+ },
+ {
.virtual = OMAP16XX_DSP_BASE,
.pfn = __phys_to_pfn(OMAP16XX_DSP_START),
.length = OMAP16XX_DSP_SIZE,
};
#endif
-/*
- * Maps common IO regions for omap1
- */
-static void __init omap1_map_common_io(void)
-{
- iotable_init(omap_io_desc, ARRAY_SIZE(omap_io_desc));
-}
-
#if defined (CONFIG_ARCH_OMAP730) || defined (CONFIG_ARCH_OMAP850)
void __init omap7xx_map_io(void)
{
- omap1_map_common_io();
iotable_init(omap7xx_io_desc, ARRAY_SIZE(omap7xx_io_desc));
}
#endif
#ifdef CONFIG_ARCH_OMAP15XX
void __init omap15xx_map_io(void)
{
- omap1_map_common_io();
iotable_init(omap1510_io_desc, ARRAY_SIZE(omap1510_io_desc));
}
#endif
#if defined(CONFIG_ARCH_OMAP16XX)
void __init omap16xx_map_io(void)
{
- omap1_map_common_io();
iotable_init(omap16xx_io_desc, ARRAY_SIZE(omap16xx_io_desc));
}
#endif
#define OMAP1610_MCBSP2_BASE 0xfffb1000
#define OMAP1610_MCBSP3_BASE 0xe1017000
-#if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850)
struct resource omap7xx_mcbsp_res[][6] = {
{
{
};
#define OMAP7XX_MCBSP_RES_SZ ARRAY_SIZE(omap7xx_mcbsp_res[1])
#define OMAP7XX_MCBSP_COUNT ARRAY_SIZE(omap7xx_mcbsp_res)
-#else
-#define omap7xx_mcbsp_res_0 NULL
-#define omap7xx_mcbsp_pdata NULL
-#define OMAP7XX_MCBSP_RES_SZ 0
-#define OMAP7XX_MCBSP_COUNT 0
-#endif
-#ifdef CONFIG_ARCH_OMAP15XX
struct resource omap15xx_mcbsp_res[][6] = {
{
{
};
#define OMAP15XX_MCBSP_RES_SZ ARRAY_SIZE(omap15xx_mcbsp_res[1])
#define OMAP15XX_MCBSP_COUNT ARRAY_SIZE(omap15xx_mcbsp_res)
-#else
-#define omap15xx_mcbsp_res_0 NULL
-#define omap15xx_mcbsp_pdata NULL
-#define OMAP15XX_MCBSP_RES_SZ 0
-#define OMAP15XX_MCBSP_COUNT 0
-#endif
-#ifdef CONFIG_ARCH_OMAP16XX
struct resource omap16xx_mcbsp_res[][6] = {
{
{
};
#define OMAP16XX_MCBSP_RES_SZ ARRAY_SIZE(omap16xx_mcbsp_res[1])
#define OMAP16XX_MCBSP_COUNT ARRAY_SIZE(omap16xx_mcbsp_res)
-#else
-#define omap16xx_mcbsp_res_0 NULL
-#define omap16xx_mcbsp_pdata NULL
-#define OMAP16XX_MCBSP_RES_SZ 0
-#define OMAP16XX_MCBSP_COUNT 0
-#endif
static void omap_mcbsp_register_board_cfg(struct resource *res, int res_count,
struct omap_mcbsp_platform_data *config, int size)
#define OMAP7XX_IDLECT3 0xfffece24
#define OMAP7XX_IDLE_LOOP_REQUEST 0x0C00
-#if !defined(CONFIG_ARCH_OMAP730) && \
- !defined(CONFIG_ARCH_OMAP850) && \
- !defined(CONFIG_ARCH_OMAP15XX) && \
- !defined(CONFIG_ARCH_OMAP16XX)
-#warning "Power management for this processor not implemented yet"
-#endif
-
#ifndef __ASSEMBLER__
#include <linux/clk.h>
config MACH_PXA3XX_DT
bool "Support PXA3xx platforms from device tree"
select CPU_PXA300
+ select CPU_PXA310
+ select CPU_PXA320
select PINCTRL
select POWER_SUPPLY
select PXA3xx
};
&usb {
- phys = <&usb2_phy1>;
- phy-names = "usb2-phy1";
-};
-
-&usb2_phy0 {
- status = "disabled";
+ phys = <&usb2_phy0>, <&usb2_phy1>;
+ phy-names = "usb2-phy0", "usb2-phy1";
};
&i2c0 {
status = "okay";
- pca9547@77 {
+ i2c-mux@77 {
compatible = "nxp,pca9547";
reg = <0x77>;
#address-cells = <1>;
&i2c0 {
status = "okay";
- pca9547@77 {
+ i2c-mux@77 {
compatible = "nxp,pca9547";
reg = <0x77>;
#address-cells = <1>;
&i2c0 {
status = "okay";
- pca9547@77 {
+ i2c-mux@77 {
compatible = "nxp,pca9547";
reg = <0x77>;
#address-cells = <1>;
&i2c0 {
status = "okay";
- i2c-switch@77 {
+ i2c-mux@77 {
compatible = "nxp,pca9547";
reg = <0x77>;
#address-cells = <1>;
&i2c0 {
status = "okay";
- i2c-switch@77 {
+ i2c-mux@77 {
compatible = "nxp,pca9547";
reg = <0x77>;
#address-cells = <1>;
&i2c3 {
status = "okay";
- i2c-switch@70 {
+ i2c-mux@70 {
compatible = "nxp,pca9540";
#address-cells = <1>;
#size-cells = <0>;
&i2c0 {
status = "okay";
- pca9547@77 {
+ i2c-mux@77 {
compatible = "nxp,pca9547";
reg = <0x77>;
#address-cells = <1>;
&i2c0 {
status = "okay";
- pca9547@75 {
+ i2c-mux@75 {
compatible = "nxp,pca9547";
reg = <0x75>;
#address-cells = <1>;
&i2c0 {
status = "okay";
- i2c-switch@77 {
+ i2c-mux@77 {
compatible = "nxp,pca9547";
#address-cells = <1>;
#size-cells = <0>;
&ecspi2 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_espi2>;
- cs-gpios = <&gpio5 9 GPIO_ACTIVE_LOW>;
+ cs-gpios = <&gpio5 13 GPIO_ACTIVE_LOW>;
status = "okay";
eeprom@0 {
MX8MM_IOMUXC_ECSPI2_SCLK_ECSPI2_SCLK 0x82
MX8MM_IOMUXC_ECSPI2_MOSI_ECSPI2_MOSI 0x82
MX8MM_IOMUXC_ECSPI2_MISO_ECSPI2_MISO 0x82
- MX8MM_IOMUXC_ECSPI1_SS0_GPIO5_IO9 0x41
+ MX8MM_IOMUXC_ECSPI2_SS0_GPIO5_IO13 0x41
>;
};
compatible = "rohm,bd71847";
reg = <0x4b>;
#clock-cells = <0>;
- clocks = <&clk_xtal32k 0>;
+ clocks = <&clk_xtal32k>;
clock-output-names = "clk-32k-out";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_pmic>;
pinctrl-0 = <&pinctrl_i2c3>;
status = "okay";
- i2cmux@70 {
+ i2c-mux@70 {
compatible = "nxp,pca9540";
reg = <0x70>;
#address-cells = <1>;
&usbotg2 {
dr_mode = "host";
vbus-supply = <®_usb2_vbus>;
+ over-current-active-low;
status = "okay";
};
simple-audio-card,bitclock-master = <&dailink_master>;
simple-audio-card,format = "i2s";
simple-audio-card,frame-master = <&dailink_master>;
+ simple-audio-card,mclk-fs = <256>;
simple-audio-card,name = "imx8mm-wm8904";
simple-audio-card,routing =
"Headphone Jack", "HPOUTL",
simple-audio-card,bitclock-master = <&dailink_master>;
simple-audio-card,format = "i2s";
simple-audio-card,frame-master = <&dailink_master>;
+ simple-audio-card,mclk-fs = <256>;
simple-audio-card,name = "imx8mm-nau8822";
simple-audio-card,routing =
"Headphones", "LHP",
pcie0_refclk: pcie0-refclk {
compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-frequency = <100000000>;
+ #clock-cells = <0>;
+ clock-frequency = <100000000>;
};
reg_can1_stby: regulator-can1-stby {
regulators {
buck1: BUCK1 {
- regulator-compatible = "BUCK1";
regulator-min-microvolt = <600000>;
regulator-max-microvolt = <2187500>;
regulator-boot-on;
};
buck2: BUCK2 {
- regulator-compatible = "BUCK2";
regulator-min-microvolt = <600000>;
regulator-max-microvolt = <2187500>;
regulator-boot-on;
};
buck4: BUCK4 {
- regulator-compatible = "BUCK4";
regulator-min-microvolt = <600000>;
regulator-max-microvolt = <3400000>;
regulator-boot-on;
};
buck5: BUCK5 {
- regulator-compatible = "BUCK5";
regulator-min-microvolt = <600000>;
regulator-max-microvolt = <3400000>;
regulator-boot-on;
};
buck6: BUCK6 {
- regulator-compatible = "BUCK6";
regulator-min-microvolt = <600000>;
regulator-max-microvolt = <3400000>;
regulator-boot-on;
};
ldo1: LDO1 {
- regulator-compatible = "LDO1";
regulator-min-microvolt = <1600000>;
regulator-max-microvolt = <3300000>;
regulator-boot-on;
};
ldo2: LDO2 {
- regulator-compatible = "LDO2";
regulator-min-microvolt = <800000>;
regulator-max-microvolt = <1150000>;
regulator-boot-on;
};
ldo3: LDO3 {
- regulator-compatible = "LDO3";
regulator-min-microvolt = <800000>;
regulator-max-microvolt = <3300000>;
regulator-boot-on;
};
ldo4: LDO4 {
- regulator-compatible = "LDO4";
regulator-min-microvolt = <800000>;
regulator-max-microvolt = <3300000>;
};
ldo5: LDO5 {
- regulator-compatible = "LDO5";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <3300000>;
regulator-boot-on;
compatible = "fsl,imx8mp-gpc";
reg = <0x303a0000 0x1000>;
interrupt-parent = <&gic>;
+ interrupts = <GIC_SPI 87 IRQ_TYPE_LEVEL_HIGH>;
interrupt-controller;
#interrupt-cells = <3>;
reg = <IMX8MP_POWER_DOMAIN_MIPI_PHY2>;
};
- pgc_hsiomix: power-domains@17 {
+ pgc_hsiomix: power-domain@17 {
#power-domain-cells = <0>;
reg = <IMX8MP_POWER_DOMAIN_HSIOMIX>;
clocks = <&clk IMX8MP_CLK_HSIO_AXI>,
reg = <0x32f10100 0x8>,
<0x381f0000 0x20>;
clocks = <&clk IMX8MP_CLK_HSIO_ROOT>,
- <&clk IMX8MP_CLK_USB_ROOT>;
+ <&clk IMX8MP_CLK_USB_SUSP>;
clock-names = "hsio", "suspend";
interrupts = <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>;
power-domains = <&hsio_blk_ctrl IMX8MP_HSIOBLK_PD_USB>;
usb_dwc3_0: usb@38100000 {
compatible = "snps,dwc3";
reg = <0x38100000 0x10000>;
- clocks = <&clk IMX8MP_CLK_HSIO_AXI>,
+ clocks = <&clk IMX8MP_CLK_USB_ROOT>,
<&clk IMX8MP_CLK_USB_CORE_REF>,
- <&clk IMX8MP_CLK_USB_ROOT>;
+ <&clk IMX8MP_CLK_USB_SUSP>;
clock-names = "bus_early", "ref", "suspend";
interrupts = <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>;
phys = <&usb3_phy0>, <&usb3_phy0>;
reg = <0x32f10108 0x8>,
<0x382f0000 0x20>;
clocks = <&clk IMX8MP_CLK_HSIO_ROOT>,
- <&clk IMX8MP_CLK_USB_ROOT>;
+ <&clk IMX8MP_CLK_USB_SUSP>;
clock-names = "hsio", "suspend";
interrupts = <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>;
power-domains = <&hsio_blk_ctrl IMX8MP_HSIOBLK_PD_USB>;
usb_dwc3_1: usb@38200000 {
compatible = "snps,dwc3";
reg = <0x38200000 0x10000>;
- clocks = <&clk IMX8MP_CLK_HSIO_AXI>,
+ clocks = <&clk IMX8MP_CLK_USB_ROOT>,
<&clk IMX8MP_CLK_USB_CORE_REF>,
- <&clk IMX8MP_CLK_USB_ROOT>;
+ <&clk IMX8MP_CLK_USB_SUSP>;
clock-names = "bus_early", "ref", "suspend";
interrupts = <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>;
phys = <&usb3_phy1>, <&usb3_phy1>;
pinctrl-0 = <&pinctrl_i2c1>;
status = "okay";
- i2cmux@70 {
+ i2c-mux@70 {
compatible = "nxp,pca9546";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_i2c1_pca9546>;
pinctrl-0 = <&pinctrl_i2c4>;
status = "okay";
- pca9546: i2cmux@70 {
+ pca9546: i2c-mux@70 {
compatible = "nxp,pca9546";
reg = <0x70>;
#address-cells = <1>;
bus-width = <4>;
non-removable;
no-sd;
- no-emmc;
+ no-mmc;
status = "okay";
brcmf: wifi@1 {
cd-gpios = <&gpio2 12 GPIO_ACTIVE_LOW>;
bus-width = <4>;
no-sdio;
- no-emmc;
+ no-mmc;
disable-wp;
status = "okay";
};
pinctrl-0 = <&pinctrl_lpi2c1 &pinctrl_ioexp_rst>;
status = "okay";
- i2c-switch@71 {
+ i2c-mux@71 {
compatible = "nxp,pca9646", "nxp,pca9546";
#address-cells = <1>;
#size-cells = <0>;
pinctrl_usdhc1: usdhc1grp {
fsl,pins = <
- MX93_PAD_SD1_CLK__USDHC1_CLK 0x17fe
+ MX93_PAD_SD1_CLK__USDHC1_CLK 0x15fe
MX93_PAD_SD1_CMD__USDHC1_CMD 0x13fe
MX93_PAD_SD1_DATA0__USDHC1_DATA0 0x13fe
MX93_PAD_SD1_DATA1__USDHC1_DATA1 0x13fe
MX93_PAD_SD1_DATA5__USDHC1_DATA5 0x13fe
MX93_PAD_SD1_DATA6__USDHC1_DATA6 0x13fe
MX93_PAD_SD1_DATA7__USDHC1_DATA7 0x13fe
- MX93_PAD_SD1_STROBE__USDHC1_STROBE 0x17fe
+ MX93_PAD_SD1_STROBE__USDHC1_STROBE 0x15fe
>;
};
pinctrl_usdhc2: usdhc2grp {
fsl,pins = <
- MX93_PAD_SD2_CLK__USDHC2_CLK 0x17fe
+ MX93_PAD_SD2_CLK__USDHC2_CLK 0x15fe
MX93_PAD_SD2_CMD__USDHC2_CMD 0x13fe
MX93_PAD_SD2_DATA0__USDHC2_DATA0 0x13fe
MX93_PAD_SD2_DATA1__USDHC2_DATA1 0x13fe
uart1: serial@12100 {
compatible = "snps,dw-apb-uart";
- reg = <0x11000 0x100>;
+ reg = <0x12100 0x100>;
reg-shift = <2>;
interrupts = <GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH>;
reg-io-width = <1>;
* Copyright (c) 2015, LGE Inc. All rights reserved.
* Copyright (c) 2016, The Linux Foundation. All rights reserved.
* Copyright (c) 2021, Petr Vorel <petr.vorel@gmail.com>
+ * Copyright (c) 2022, Dominik Kobinski <dominikkobinski314@gmail.com>
*/
/dts-v1/;
reg = <0 0x03400000 0 0x1200000>;
no-map;
};
+
+ removed_region: reserved@5000000 {
+ reg = <0 0x05000000 0 0x2200000>;
+ no-map;
+ };
};
};
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/input/gpio-keys.h>
+/delete-node/ &adsp_mem;
+/delete-node/ &audio_mem;
+/delete-node/ &mpss_mem;
+/delete-node/ &peripheral_region;
+/delete-node/ &rmtfs_mem;
+
/ {
model = "Xiaomi Mi 4C";
compatible = "xiaomi,libra", "qcom,msm8992";
#size-cells = <2>;
ranges;
- /* This is for getting crash logs using Android downstream kernels */
- ramoops@dfc00000 {
- compatible = "ramoops";
- reg = <0x0 0xdfc00000 0x0 0x40000>;
- console-size = <0x10000>;
- record-size = <0x10000>;
- ftrace-size = <0x10000>;
- pmsg-size = <0x20000>;
+ memory_hole: hole@6400000 {
+ reg = <0 0x06400000 0 0x600000>;
+ no-map;
+ };
+
+ memory_hole2: hole2@6c00000 {
+ reg = <0 0x06c00000 0 0x2400000>;
+ no-map;
+ };
+
+ mpss_mem: mpss@9000000 {
+ reg = <0 0x09000000 0 0x5a00000>;
+ no-map;
+ };
+
+ tzapp: tzapp@ea00000 {
+ reg = <0 0x0ea00000 0 0x1900000>;
+ no-map;
+ };
+
+ mdm_rfsa_mem: mdm-rfsa@ca0b0000 {
+ reg = <0 0xca0b0000 0 0x10000>;
+ no-map;
+ };
+
+ rmtfs_mem: rmtfs@ca100000 {
+ compatible = "qcom,rmtfs-mem";
+ reg = <0 0xca100000 0 0x180000>;
+ no-map;
+
+ qcom,client-id = <1>;
};
- modem_region: modem_region@9000000 {
- reg = <0x0 0x9000000 0x0 0x5a00000>;
+ audio_mem: audio@cb400000 {
+ reg = <0 0xcb000000 0 0x400000>;
+ no-mem;
+ };
+
+ qseecom_mem: qseecom@cb400000 {
+ reg = <0 0xcb400000 0 0x1c00000>;
+ no-mem;
+ };
+
+ adsp_rfsa_mem: adsp-rfsa@cd000000 {
+ reg = <0 0xcd000000 0 0x10000>;
no-map;
};
- tzapp: modem_region@ea00000 {
- reg = <0x0 0xea00000 0x0 0x1900000>;
+ sensor_rfsa_mem: sensor-rfsa@cd010000 {
+ reg = <0 0xcd010000 0 0x10000>;
no-map;
};
+
+ ramoops@dfc00000 {
+ compatible = "ramoops";
+ reg = <0 0xdfc00000 0 0x40000>;
+ console-size = <0x10000>;
+ record-size = <0x10000>;
+ ftrace-size = <0x10000>;
+ pmsg-size = <0x20000>;
+ };
};
};
status = "okay";
};
-&peripheral_region {
- reg = <0x0 0x7400000 0x0 0x1c00000>;
- no-map;
-};
-
&pm8994_spmi_regulators {
VDD_APC0: s8 {
regulator-min-microvolt = <680000>;
compatible = "qcom,rpmcc-msm8992", "qcom,rpmcc";
};
-&tcsr_mutex {
- compatible = "qcom,sfpb-mutex";
-};
-
&timer {
interrupts = <GIC_PPI 2 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
<GIC_PPI 3 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
#include "msm8994.dtsi"
-/* Angler's firmware does not report where the memory is allocated */
-/delete-node/ &cont_splash_mem;
-
/ {
model = "Huawei Nexus 6P";
compatible = "huawei,angler", "qcom,msm8994";
chosen {
stdout-path = "serial0:115200n8";
};
+
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ tzapp_mem: tzapp@4800000 {
+ reg = <0 0x04800000 0 0x1900000>;
+ no-map;
+ };
+
+ removed_region: reserved@6300000 {
+ reg = <0 0x06300000 0 0xD00000>;
+ no-map;
+ };
+ };
};
&blsp1_uart2 {
#include <dt-bindings/interconnect/qcom,sc8280xp.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/mailbox/qcom-ipcc.h>
+#include <dt-bindings/phy/phy-qcom-qmp.h>
#include <dt-bindings/power/qcom-rpmpd.h>
#include <dt-bindings/soc/qcom,rpmh-rsc.h>
#include <dt-bindings/thermal/thermal.h>
<0>,
<0>,
<0>,
- <&usb_0_ssphy>,
+ <&usb_0_qmpphy QMP_USB43DP_USB3_PIPE_CLK>,
<0>,
<0>,
<0>,
<0>,
<0>,
<0>,
- <&usb_1_ssphy>,
+ <&usb_1_qmpphy QMP_USB43DP_USB3_PIPE_CLK>,
<0>,
<0>,
<0>,
};
};
- usb_0_qmpphy: phy-wrapper@88ec000 {
+ usb_0_qmpphy: phy@88eb000 {
compatible = "qcom,sc8280xp-qmp-usb43dp-phy";
- reg = <0 0x088ec000 0 0x1e4>,
- <0 0x088eb000 0 0x40>,
- <0 0x088ed000 0 0x1c8>;
- #address-cells = <2>;
- #size-cells = <2>;
- ranges;
+ reg = <0 0x088eb000 0 0x4000>;
clocks = <&gcc GCC_USB3_PRIM_PHY_AUX_CLK>,
- <&rpmhcc RPMH_CXO_CLK>,
<&gcc GCC_USB4_EUD_CLKREF_CLK>,
- <&gcc GCC_USB3_PRIM_PHY_COM_AUX_CLK>;
- clock-names = "aux", "ref_clk_src", "ref", "com_aux";
+ <&gcc GCC_USB3_PRIM_PHY_COM_AUX_CLK>,
+ <&gcc GCC_USB3_PRIM_PHY_PIPE_CLK>;
+ clock-names = "aux", "ref", "com_aux", "usb3_pipe";
+
+ power-domains = <&gcc USB30_PRIM_GDSC>;
resets = <&gcc GCC_USB3_PHY_PRIM_BCR>,
- <&gcc GCC_USB3_DP_PHY_PRIM_BCR>;
+ <&gcc GCC_USB4_DP_PHY_PRIM_BCR>;
reset-names = "phy", "common";
- power-domains = <&gcc USB30_PRIM_GDSC>;
+ #clock-cells = <1>;
+ #phy-cells = <1>;
status = "disabled";
-
- usb_0_ssphy: usb3-phy@88eb400 {
- reg = <0 0x088eb400 0 0x100>,
- <0 0x088eb600 0 0x3ec>,
- <0 0x088ec400 0 0x364>,
- <0 0x088eba00 0 0x100>,
- <0 0x088ebc00 0 0x3ec>,
- <0 0x088ec200 0 0x18>;
- #phy-cells = <0>;
- #clock-cells = <0>;
- clocks = <&gcc GCC_USB3_PRIM_PHY_PIPE_CLK>;
- clock-names = "pipe0";
- clock-output-names = "usb0_phy_pipe_clk_src";
- };
};
usb_1_hsphy: phy@8902000 {
status = "disabled";
};
- usb_1_qmpphy: phy-wrapper@8904000 {
+ usb_1_qmpphy: phy@8903000 {
compatible = "qcom,sc8280xp-qmp-usb43dp-phy";
- reg = <0 0x08904000 0 0x1e4>,
- <0 0x08903000 0 0x40>,
- <0 0x08905000 0 0x1c8>;
- #address-cells = <2>;
- #size-cells = <2>;
- ranges;
+ reg = <0 0x08903000 0 0x4000>;
clocks = <&gcc GCC_USB3_SEC_PHY_AUX_CLK>,
- <&rpmhcc RPMH_CXO_CLK>,
<&gcc GCC_USB4_CLKREF_CLK>,
- <&gcc GCC_USB3_SEC_PHY_COM_AUX_CLK>;
- clock-names = "aux", "ref_clk_src", "ref", "com_aux";
+ <&gcc GCC_USB3_SEC_PHY_COM_AUX_CLK>,
+ <&gcc GCC_USB3_SEC_PHY_PIPE_CLK>;
+ clock-names = "aux", "ref", "com_aux", "usb3_pipe";
+
+ power-domains = <&gcc USB30_SEC_GDSC>;
resets = <&gcc GCC_USB3_PHY_SEC_BCR>,
<&gcc GCC_USB4_1_DP_PHY_PRIM_BCR>;
reset-names = "phy", "common";
- power-domains = <&gcc USB30_SEC_GDSC>;
+ #clock-cells = <1>;
+ #phy-cells = <1>;
status = "disabled";
-
- usb_1_ssphy: usb3-phy@8903400 {
- reg = <0 0x08903400 0 0x100>,
- <0 0x08903600 0 0x3ec>,
- <0 0x08904400 0 0x364>,
- <0 0x08903a00 0 0x100>,
- <0 0x08903c00 0 0x3ec>,
- <0 0x08904200 0 0x18>;
- #phy-cells = <0>;
- #clock-cells = <0>;
- clocks = <&gcc GCC_USB3_SEC_PHY_PIPE_CLK>;
- clock-names = "pipe0";
- clock-output-names = "usb1_phy_pipe_clk_src";
- };
};
pmu@9091000 {
reg = <0 0x0a600000 0 0xcd00>;
interrupts = <GIC_SPI 803 IRQ_TYPE_LEVEL_HIGH>;
iommus = <&apps_smmu 0x820 0x0>;
- phys = <&usb_0_hsphy>, <&usb_0_ssphy>;
+ phys = <&usb_0_hsphy>, <&usb_0_qmpphy QMP_USB43DP_USB3_PHY>;
phy-names = "usb2-phy", "usb3-phy";
};
};
reg = <0 0x0a800000 0 0xcd00>;
interrupts = <GIC_SPI 810 IRQ_TYPE_LEVEL_HIGH>;
iommus = <&apps_smmu 0x860 0x0>;
- phys = <&usb_1_hsphy>, <&usb_1_ssphy>;
+ phys = <&usb_1_hsphy>, <&usb_1_qmpphy QMP_USB43DP_USB3_PHY>;
phy-names = "usb2-phy", "usb3-phy";
};
};
exit-latency-us = <6562>;
min-residency-us = <9987>;
local-timer-stop;
- status = "disabled";
};
};
};
<&rpmhcc RPMH_CXO_CLK>;
clock-names = "iface", "core", "xo";
resets = <&gcc GCC_SDCC2_BCR>;
- interconnects = <&aggre2_noc MASTER_SDCC_2 0 &mc_virt SLAVE_EBI1 0>,
- <&gem_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_SDCC_2 0>;
+ interconnects = <&aggre2_noc MASTER_SDCC_2 &mc_virt SLAVE_EBI1>,
+ <&gem_noc MASTER_APPSS_PROC &config_noc SLAVE_SDCC_2>;
interconnect-names = "sdhc-ddr","cpu-sdhc";
iommus = <&apps_smmu 0x4a0 0x0>;
power-domains = <&rpmhpd SM8350_CX>;
})
extern spinlock_t efi_rt_lock;
+extern u64 *efi_rt_stack_top;
efi_status_t __efi_rt_asm_wrapper(void *, const char *, ...);
+/*
+ * efi_rt_stack_top[-1] contains the value the stack pointer had before
+ * switching to the EFI runtime stack.
+ */
+#define current_in_efi() \
+ (!preemptible() && efi_rt_stack_top != NULL && \
+ on_task_stack(current, READ_ONCE(efi_rt_stack_top[-1]), 1))
+
#define ARCH_EFI_IRQ_FLAGS_MASK (PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT)
/*
#define stackinfo_get_sdei_critical() stackinfo_get_unknown()
#endif
+#ifdef CONFIG_EFI
+extern u64 *efi_rt_stack_top;
+
+static inline struct stack_info stackinfo_get_efi(void)
+{
+ unsigned long high = (u64)efi_rt_stack_top;
+ unsigned long low = high - THREAD_SIZE;
+
+ return (struct stack_info) {
+ .low = low,
+ .high = high,
+ };
+}
+#endif
+
#endif /* __ASM_STACKTRACE_H */
mov x4, x6
blr x8
+ mov x16, sp
mov sp, x29
+ str xzr, [x16, #8] // clear recorded task SP value
+
ldp x1, x2, [sp, #16]
cmp x2, x18
ldp x29, x30, [sp], #112
SYM_CODE_START(__efi_rt_asm_recover)
mov sp, x30
+ ldr_l x16, efi_rt_stack_top // clear recorded task SP value
+ str xzr, [x16, #-8]
+
ldp x19, x20, [sp, #32]
ldp x21, x22, [sp, #48]
ldp x23, x24, [sp, #64]
#include <linux/init.h>
#include <asm/efi.h>
+#include <asm/stacktrace.h>
static bool region_is_misaligned(const efi_memory_desc_t *md)
{
bool efi_runtime_fixup_exception(struct pt_regs *regs, const char *msg)
{
/* Check whether the exception occurred while running the firmware */
- if (current_work() != &efi_rts_work.work || regs->pc >= TASK_SIZE_64)
+ if (!current_in_efi() || regs->pc >= TASK_SIZE_64)
return false;
pr_err(FW_BUG "Unable to handle %s in EFI runtime service\n", msg);
* Copyright (C) 2012 ARM Ltd.
*/
#include <linux/kernel.h>
+#include <linux/efi.h>
#include <linux/export.h>
#include <linux/ftrace.h>
#include <linux/sched.h>
#include <linux/sched/task_stack.h>
#include <linux/stacktrace.h>
+#include <asm/efi.h>
#include <asm/irq.h>
#include <asm/stack_pointer.h>
#include <asm/stacktrace.h>
: stackinfo_get_unknown(); \
})
+#define STACKINFO_EFI \
+ ({ \
+ ((task == current) && current_in_efi()) \
+ ? stackinfo_get_efi() \
+ : stackinfo_get_unknown(); \
+ })
+
noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry,
void *cookie, struct task_struct *task,
struct pt_regs *regs)
STACKINFO_SDEI(normal),
STACKINFO_SDEI(critical),
#endif
+#ifdef CONFIG_EFI
+ STACKINFO_EFI,
+#endif
};
struct unwind_state state = {
.stacks = stacks,
/* uaccess failed, don't leave stale tags */
if (num_tags != MTE_GRANULES_PER_PAGE)
- mte_clear_page_tags(page);
+ mte_clear_page_tags(maddr);
set_page_mte_tagged(page);
kvm_release_pfn_dirty(pfn);
* The deactivation of the doorbell interrupt will trigger the
* unmapping of the associated vPE.
*/
-static void unmap_all_vpes(struct vgic_dist *dist)
+static void unmap_all_vpes(struct kvm *kvm)
{
- struct irq_desc *desc;
+ struct vgic_dist *dist = &kvm->arch.vgic;
int i;
- for (i = 0; i < dist->its_vm.nr_vpes; i++) {
- desc = irq_to_desc(dist->its_vm.vpes[i]->irq);
- irq_domain_deactivate_irq(irq_desc_get_irq_data(desc));
- }
+ for (i = 0; i < dist->its_vm.nr_vpes; i++)
+ free_irq(dist->its_vm.vpes[i]->irq, kvm_get_vcpu(kvm, i));
}
-static void map_all_vpes(struct vgic_dist *dist)
+static void map_all_vpes(struct kvm *kvm)
{
- struct irq_desc *desc;
+ struct vgic_dist *dist = &kvm->arch.vgic;
int i;
- for (i = 0; i < dist->its_vm.nr_vpes; i++) {
- desc = irq_to_desc(dist->its_vm.vpes[i]->irq);
- irq_domain_activate_irq(irq_desc_get_irq_data(desc), false);
- }
+ for (i = 0; i < dist->its_vm.nr_vpes; i++)
+ WARN_ON(vgic_v4_request_vpe_irq(kvm_get_vcpu(kvm, i),
+ dist->its_vm.vpes[i]->irq));
}
/**
* and enabling of the doorbells have already been done.
*/
if (kvm_vgic_global_state.has_gicv4_1) {
- unmap_all_vpes(dist);
+ unmap_all_vpes(kvm);
vlpi_avail = true;
}
out:
if (vlpi_avail)
- map_all_vpes(dist);
+ map_all_vpes(kvm);
return ret;
}
*val = !!(*ptr & mask);
}
+int vgic_v4_request_vpe_irq(struct kvm_vcpu *vcpu, int irq)
+{
+ return request_irq(irq, vgic_v4_doorbell_handler, 0, "vcpu", vcpu);
+}
+
/**
* vgic_v4_init - Initialize the GICv4 data structures
* @kvm: Pointer to the VM being initialized
irq_flags &= ~IRQ_NOAUTOEN;
irq_set_status_flags(irq, irq_flags);
- ret = request_irq(irq, vgic_v4_doorbell_handler,
- 0, "vcpu", vcpu);
+ ret = vgic_v4_request_vpe_irq(vcpu, irq);
if (ret) {
kvm_err("failed to allocate vcpu IRQ%d\n", irq);
/*
void vgic_v4_teardown(struct kvm *kvm);
void vgic_v4_configure_vsgis(struct kvm *kvm);
void vgic_v4_get_vlpi_state(struct vgic_irq *irq, bool *val);
+int vgic_v4_request_vpe_irq(struct kvm_vcpu *vcpu, int irq);
#endif
bus-range = <0x0 0xff>;
ranges = <0x81000000 0x0 0x60080000 0x0 0x60080000 0x0 0x10000>, /* I/O */
<0x82000000 0x0 0x60090000 0x0 0x60090000 0x0 0xff70000>, /* mem */
- <0x82000000 0x0 0x70000000 0x0 0x70000000 0x0 0x1000000>, /* mem */
+ <0x82000000 0x0 0x70000000 0x0 0x70000000 0x0 0x10000000>, /* mem */
<0xc3000000 0x20 0x00000000 0x20 0x00000000 0x20 0x00000000>; /* mem prefetchable */
num-lanes = <0x8>;
interrupts = <56>, <57>, <58>, <59>, <60>, <61>, <62>, <63>, <64>;
break;
case INTEL_FAM6_SAPPHIRERAPIDS_X:
+ case INTEL_FAM6_EMERALDRAPIDS_X:
pmem = true;
x86_pmu.late_ack = true;
memcpy(hw_cache_event_ids, spr_hw_cache_event_ids, sizeof(hw_cache_event_ids));
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, &icx_cstates),
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, &icx_cstates),
X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &icx_cstates),
+ X86_MATCH_INTEL_FAM6_MODEL(EMERALDRAPIDS_X, &icx_cstates),
X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, &icl_cstates),
X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, &icl_cstates),
static void disable_freq_invariance_workfn(struct work_struct *work)
{
+ int cpu;
+
static_branch_disable(&arch_scale_freq_key);
+
+ /*
+ * Set arch_freq_scale to a default value on all cpus
+ * This negates the effect of scaling
+ */
+ for_each_possible_cpu(cpu)
+ per_cpu(arch_freq_scale, cpu) = SCHED_CAPACITY_SCALE;
}
static DECLARE_WORK(disable_freq_invariance_work,
{
u32 ar;
- if (var->unusable || !var->present)
- ar = 1 << 16;
- else {
- ar = var->type & 15;
- ar |= (var->s & 1) << 4;
- ar |= (var->dpl & 3) << 5;
- ar |= (var->present & 1) << 7;
- ar |= (var->avl & 1) << 12;
- ar |= (var->l & 1) << 13;
- ar |= (var->db & 1) << 14;
- ar |= (var->g & 1) << 15;
- }
+ ar = var->type & 15;
+ ar |= (var->s & 1) << 4;
+ ar |= (var->dpl & 3) << 5;
+ ar |= (var->present & 1) << 7;
+ ar |= (var->avl & 1) << 12;
+ ar |= (var->l & 1) << 13;
+ ar |= (var->db & 1) << 14;
+ ar |= (var->g & 1) << 15;
+ ar |= (var->unusable || !var->present) << 16;
return ar;
}
static void bfqg_get(struct bfq_group *bfqg)
{
- bfqg->ref++;
+ refcount_inc(&bfqg->ref);
}
static void bfqg_put(struct bfq_group *bfqg)
{
- bfqg->ref--;
-
- if (bfqg->ref == 0)
+ if (refcount_dec_and_test(&bfqg->ref))
kfree(bfqg);
}
}
/* see comments in bfq_bic_update_cgroup for why refcounting */
- bfqg_get(bfqg);
+ refcount_set(&bfqg->ref, 1);
return &bfqg->pd;
}
char blkg_path[128];
/* reference counter (see comments in bfq_bic_update_cgroup) */
- int ref;
+ refcount_t ref;
/* Is bfq_group still online? */
bool online;
list_for_each_entry_reverse(blkg, &q->blkg_list, q_node)
pol->pd_init_fn(blkg->pd[pol->plid]);
+ if (pol->pd_online_fn)
+ list_for_each_entry_reverse(blkg, &q->blkg_list, q_node)
+ pol->pd_online_fn(blkg->pd[pol->plid]);
+
__set_bit(pol->plid, q->blkcg_pols);
ret = 0;
struct blk_plug *plug, struct bio **bio, unsigned int nsegs)
{
struct request *rq;
+ enum hctx_type type, hctx_type;
if (!plug)
return NULL;
return NULL;
}
- if (blk_mq_get_hctx_type((*bio)->bi_opf) != rq->mq_hctx->type)
+ type = blk_mq_get_hctx_type((*bio)->bi_opf);
+ hctx_type = rq->mq_hctx->type;
+ if (type != hctx_type &&
+ !(type == HCTX_TYPE_READ && hctx_type == HCTX_TYPE_DEFAULT))
return NULL;
if (op_is_flush(rq->cmd_flags) != op_is_flush((*bio)->bi_opf))
return NULL;
{
struct tty_struct *tty = in_synth->dev;
+ if (tty == NULL)
+ return;
+
tty_lock(tty);
if (tty->ops->close)
efi_status_t status;
struct prm_context_buffer context;
+ if (!efi_enabled(EFI_RUNTIME_SERVICES)) {
+ pr_err_ratelimited("PRM: EFI runtime services no longer available\n");
+ return AE_NO_HANDLER;
+ }
+
/*
* The returned acpi_status will always be AE_OK. Error values will be
* saved in the first byte of the PRM message buffer to be used by ASL.
pr_info("PRM: found %u modules\n", mc);
+ if (!efi_enabled(EFI_RUNTIME_SERVICES)) {
+ pr_err("PRM: EFI runtime services unavailable\n");
+ return;
+ }
+
status = acpi_install_address_space_handler(ACPI_ROOT_OBJECT,
ACPI_ADR_SPACE_PLATFORM_RT,
&acpi_platformrt_space_handler,
},
{
.callback = video_detect_force_native,
+ /* Acer Aspire 4810T */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 4810T"),
+ },
+ },
+ {
+ .callback = video_detect_force_native,
/* Acer Aspire 5738z */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
fwnode_graph_get_next_endpoint(const struct fwnode_handle *fwnode,
struct fwnode_handle *prev)
{
+ struct fwnode_handle *ep, *port_parent = NULL;
const struct fwnode_handle *parent;
- struct fwnode_handle *ep;
/*
* If this function is in a loop and the previous iteration returned
* an endpoint from fwnode->secondary, then we need to use the secondary
* as parent rather than @fwnode.
*/
- if (prev)
- parent = fwnode_graph_get_port_parent(prev);
- else
+ if (prev) {
+ port_parent = fwnode_graph_get_port_parent(prev);
+ parent = port_parent;
+ } else {
parent = fwnode;
+ }
if (IS_ERR_OR_NULL(parent))
return NULL;
ep = fwnode_call_ptr_op(parent, graph_get_next_endpoint, prev);
if (ep)
- return ep;
+ goto out_put_port_parent;
+
+ ep = fwnode_graph_get_next_endpoint(parent->secondary, NULL);
- return fwnode_graph_get_next_endpoint(parent->secondary, NULL);
+out_put_port_parent:
+ fwnode_handle_put(port_parent);
+ return ep;
}
EXPORT_SYMBOL_GPL(fwnode_graph_get_next_endpoint);
calltime = ktime_get();
for_each_online_cpu(cpu) {
nid = cpu_to_node(cpu);
- pdev = &sync_dev[sync_id];
+ pdev = &async_dev[async_id];
*pdev = test_platform_device_register_node("test_async_driver",
async_id,
struct bio *split;
bio = bio_split_to_limits(bio);
+ if (!bio)
+ return;
pkt_dbg(2, pd, "start = %6llx stop = %6llx\n",
(unsigned long long)bio->bi_iter.bi_sector,
goto out_alloc;
}
- ret = ida_alloc_max(&index_ida, 1 << (MINORBITS - RNBD_PART_BITS),
+ ret = ida_alloc_max(&index_ida, (1 << (MINORBITS - RNBD_PART_BITS)) - 1,
GFP_KERNEL);
if (ret < 0) {
pr_err("Failed to initialize device '%s' from session %s, allocating idr failed, err: %d\n",
#define PCI1760_CMD_CLR_IMB2 0x00 /* Clears IMB2 */
#define PCI1760_CMD_SET_DO 0x01 /* Set output state */
#define PCI1760_CMD_GET_DO 0x02 /* Read output status */
-#define PCI1760_CMD_GET_STATUS 0x03 /* Read current status */
+#define PCI1760_CMD_GET_STATUS 0x07 /* Read current status */
#define PCI1760_CMD_GET_FW_VER 0x0e /* Read firmware version */
#define PCI1760_CMD_GET_HW_VER 0x0f /* Read hardware version */
#define PCI1760_CMD_SET_PWM_HI(x) (0x10 + (x) * 2) /* Set "hi" period */
/* The channel is already in use, update client count */
if (chan->client_count) {
__module_get(owner);
- goto out;
+ chan->client_count++;
+ return 0;
}
if (!try_module_get(owner))
goto err_out;
}
+ chan->client_count++;
+
if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
balance_ref_count(chan);
-out:
- chan->client_count++;
return 0;
err_out:
/* The bad descriptor currently is in the head of vc list */
vd = vchan_next_desc(&chan->vc);
+ if (!vd) {
+ dev_err(chan2dev(chan), "BUG: %s, IRQ with no descriptors\n",
+ axi_chan_name(chan));
+ goto out;
+ }
/* Remove the completed descriptor from issued list */
list_del(&vd->node);
/* Try to restart the controller */
axi_chan_start_first_queued(chan);
+out:
spin_unlock_irqrestore(&chan->vc.lock, flags);
}
spin_unlock(&ie->list_lock);
list_for_each_entry_safe(desc, itr, &flist, list) {
+ struct dma_async_tx_descriptor *tx;
+
list_del(&desc->list);
ctype = desc->completion->status ? IDXD_COMPLETE_NORMAL : IDXD_COMPLETE_ABORT;
+ /*
+ * wq is being disabled. Any remaining descriptors are
+ * likely to be stuck and can be dropped. callback could
+ * point to code that is no longer accessible, for example
+ * if dmatest module has been unloaded.
+ */
+ tx = &desc->txd;
+ tx->callback = NULL;
+ tx->callback_result = NULL;
idxd_dma_complete_txd(desc, ctype, true);
}
}
err_irq:
idxd_wq_unmap_portal(wq);
err_map_portal:
- rc = idxd_wq_disable(wq, false);
- if (rc < 0)
+ if (idxd_wq_disable(wq, false))
dev_dbg(dev, "wq %s disable failed\n", dev_name(wq_confdev(wq)));
err:
return rc;
dev_warn(dev, "Clients has claim on wq %d: %d\n",
wq->id, idxd_wq_refcount(wq));
- idxd_wq_free_resources(wq);
idxd_wq_unmap_portal(wq);
idxd_wq_drain(wq);
idxd_wq_free_irq(wq);
idxd_wq_reset(wq);
+ idxd_wq_free_resources(wq);
percpu_ref_exit(&wq->wq_active);
wq->type = IDXD_WQT_NONE;
wq->client_count = 0;
sdma_config_ownership(sdmac, false, true, false);
if (sdma_load_context(sdmac))
- goto err_desc_out;
+ goto err_bd_out;
return desc;
+err_bd_out:
+ sdma_free_bd(desc);
err_desc_out:
kfree(desc);
err_out:
}
}
-static int ldma_cfg_init(struct ldma_dev *d)
+static int ldma_parse_dt(struct ldma_dev *d)
{
struct fwnode_handle *fwnode = dev_fwnode(d->dev);
struct ldma_port *p;
p->ldev = d;
}
- ret = ldma_cfg_init(d);
- if (ret)
- return ret;
-
dma_dev->dev = &pdev->dev;
ch_mask = (unsigned long)d->channels_mask;
ldma_dma_init_v3X(j, d);
}
+ ret = ldma_parse_dt(d);
+ if (ret)
+ return ret;
+
dma_dev->device_alloc_chan_resources = ldma_alloc_chan_resources;
dma_dev->device_free_chan_resources = ldma_free_chan_resources;
dma_dev->device_terminate_all = ldma_terminate_all;
bool soc = FIELD_GET(DWORD0_SOC, desc->dw0);
u8 *q_desc = (u8 *)&cmd_q->qbase[cmd_q->qidx];
u32 tail;
+ unsigned long flags;
if (soc) {
desc->dw0 |= FIELD_PREP(DWORD0_IOC, desc->dw0);
desc->dw0 &= ~DWORD0_SOC;
}
- mutex_lock(&cmd_q->q_mutex);
+ spin_lock_irqsave(&cmd_q->q_lock, flags);
/* Copy 32-byte command descriptor to hw queue. */
memcpy(q_desc, desc, 32);
/* Turn the queue back on using our cached control register */
pt_start_queue(cmd_q);
- mutex_unlock(&cmd_q->q_mutex);
+ spin_unlock_irqrestore(&cmd_q->q_lock, flags);
return 0;
}
cmd_q->pt = pt;
cmd_q->dma_pool = dma_pool;
- mutex_init(&cmd_q->q_mutex);
+ spin_lock_init(&cmd_q->q_lock);
/* Page alignment satisfies our needs for N <= 128 */
cmd_q->qsize = Q_SIZE(Q_DESC_SIZE);
struct ptdma_desc *qbase;
/* Aligned queue start address (per requirement) */
- struct mutex q_mutex ____cacheline_aligned;
+ spinlock_t q_lock ____cacheline_aligned;
unsigned int qidx;
unsigned int qsize;
tre->dword[3] = u32_encode_bits(TRE_TYPE_GO, TRE_FLAGS_TYPE);
if (spi->cmd == SPI_RX) {
tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_IEOB);
+ tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_LINK);
} else if (spi->cmd == SPI_TX) {
tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_CHAIN);
} else { /* SPI_DUPLEX */
return err;
}
+ vchan_terminate_vdesc(&tdc->dma_desc->vd);
tegra_dma_disable(tdc);
tdc->dma_desc = NULL;
}
int ret;
/* Clear any interrupts */
- tdma_write(tdma, tdma->cdata->global_int_clear, 0x1);
+ tdma_write(tdma, tdma->cdata->ch_base_offset + tdma->cdata->global_int_clear, 0x1);
/* Assert soft reset */
tdma_write(tdma, ADMA_GLOBAL_SOFT_RESET, 0x1);
if (uc->desc->dir == DMA_DEV_TO_MEM) {
udma_rchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val);
udma_rchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val);
- udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
+ if (uc->config.ep_type != PSIL_EP_NATIVE)
+ udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
} else {
udma_tchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val);
udma_tchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val);
- if (!uc->bchan)
+ if (!uc->bchan && uc->config.ep_type != PSIL_EP_NATIVE)
udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
}
}
/* Initialize the channels */
for_each_child_of_node(node, child) {
err = xilinx_dma_child_probe(xdev, child);
- if (err < 0)
+ if (err < 0) {
+ of_node_put(child);
goto error;
+ }
}
if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
static DEFINE_MUTEX(device_ctls_mutex);
static LIST_HEAD(edac_device_list);
+/* Default workqueue processing interval on this instance, in msecs */
+#define DEFAULT_POLL_INTERVAL 1000
+
#ifdef CONFIG_EDAC_DEBUG
static void edac_device_dump_device(struct edac_device_ctl_info *edac_dev)
{
* whole one second to save timers firing all over the period
* between integral seconds
*/
- if (edac_dev->poll_msec == 1000)
+ if (edac_dev->poll_msec == DEFAULT_POLL_INTERVAL)
edac_queue_work(&edac_dev->work, round_jiffies_relative(edac_dev->delay));
else
edac_queue_work(&edac_dev->work, edac_dev->delay);
* timers firing on sub-second basis, while they are happy
* to fire together on the 1 second exactly
*/
- if (edac_dev->poll_msec == 1000)
+ if (edac_dev->poll_msec == DEFAULT_POLL_INTERVAL)
edac_queue_work(&edac_dev->work, round_jiffies_relative(edac_dev->delay));
else
edac_queue_work(&edac_dev->work, edac_dev->delay);
edac_dev->delay = msecs_to_jiffies(msec);
/* See comment in edac_device_workq_setup() above */
- if (edac_dev->poll_msec == 1000)
+ if (edac_dev->poll_msec == DEFAULT_POLL_INTERVAL)
edac_mod_work(&edac_dev->work, round_jiffies_relative(edac_dev->delay));
else
edac_mod_work(&edac_dev->work, edac_dev->delay);
/* This instance is NOW RUNNING */
edac_dev->op_state = OP_RUNNING_POLL;
- /*
- * enable workq processing on this instance,
- * default = 1000 msec
- */
- edac_device_workq_setup(edac_dev, 1000);
+ edac_device_workq_setup(edac_dev, edac_dev->poll_msec ?: DEFAULT_POLL_INTERVAL);
} else {
edac_dev->op_state = OP_RUNNING_INTERRUPT;
}
static int
dump_syn_reg(struct edac_device_ctl_info *edev_ctl, int err_type, u32 bank)
{
- struct llcc_drv_data *drv = edev_ctl->pvt_info;
+ struct llcc_drv_data *drv = edev_ctl->dev->platform_data;
int ret;
ret = dump_syn_reg_values(drv, bank, err_type);
llcc_ecc_irq_handler(int irq, void *edev_ctl)
{
struct edac_device_ctl_info *edac_dev_ctl = edev_ctl;
- struct llcc_drv_data *drv = edac_dev_ctl->pvt_info;
+ struct llcc_drv_data *drv = edac_dev_ctl->dev->platform_data;
irqreturn_t irq_rc = IRQ_NONE;
u32 drp_error, trp_error, i;
int ret;
edev_ctl->dev_name = dev_name(dev);
edev_ctl->ctl_name = "llcc";
edev_ctl->panic_on_ue = LLCC_ERP_PANIC_ON_UE;
- edev_ctl->pvt_info = llcc_driv_data;
rc = edac_device_add_device(edev_ctl);
if (rc)
xfer->hdr.protocol_id, xfer->hdr.seq,
xfer->hdr.poll_completion);
+ /* Clear any stale status */
+ xfer->hdr.status = SCMI_SUCCESS;
xfer->state = SCMI_XFER_SENT_OK;
/*
* Even though spinlocking is not needed here since no race is possible
void shmem_fetch_response(struct scmi_shared_mem __iomem *shmem,
struct scmi_xfer *xfer)
{
+ size_t len = ioread32(&shmem->length);
+
xfer->hdr.status = ioread32(shmem->msg_payload);
/* Skip the length of header and status in shmem area i.e 8 bytes */
- xfer->rx.len = min_t(size_t, xfer->rx.len,
- ioread32(&shmem->length) - 8);
+ xfer->rx.len = min_t(size_t, xfer->rx.len, len > 8 ? len - 8 : 0);
/* Take a copy to the rx buffer.. */
memcpy_fromio(xfer->rx.buf, shmem->msg_payload + 4, xfer->rx.len);
void shmem_fetch_notification(struct scmi_shared_mem __iomem *shmem,
size_t max_len, struct scmi_xfer *xfer)
{
+ size_t len = ioread32(&shmem->length);
+
/* Skip only the length of header in shmem area i.e 4 bytes */
- xfer->rx.len = min_t(size_t, max_len, ioread32(&shmem->length) - 4);
+ xfer->rx.len = min_t(size_t, max_len, len > 4 ? len - 4 : 0);
/* Take a copy to the rx buffer.. */
memcpy_fromio(xfer->rx.buf, shmem->msg_payload, xfer->rx.len);
}
vioch->shutdown_done = &vioch_shutdown_done;
- virtio_break_device(vioch->vqueue->vdev);
if (!vioch->is_rx && vioch->deferred_tx_wq)
/* Cannot be kicked anymore after this...*/
vioch->deferred_tx_wq = NULL;
struct scmi_chan_info *cinfo = p;
struct scmi_vio_channel *vioch = cinfo->transport_info;
+ /*
+ * Break device to inhibit further traffic flowing while shutting down
+ * the channels: doing it later holding vioch->lock creates unsafe
+ * locking dependency chains as reported by LOCKDEP.
+ */
+ virtio_break_device(vioch->vqueue->vdev);
scmi_vio_channel_cleanup_sync(vioch);
scmi_free_channel(cinfo, data, id);
memcpy(data, gsmi_dev.data_buf->start, *data_size);
/* All variables are have the following attributes */
- *attr = EFI_VARIABLE_NON_VOLATILE |
- EFI_VARIABLE_BOOTSERVICE_ACCESS |
- EFI_VARIABLE_RUNTIME_ACCESS;
+ if (attr)
+ *attr = EFI_VARIABLE_NON_VOLATILE |
+ EFI_VARIABLE_BOOTSERVICE_ACCESS |
+ EFI_VARIABLE_RUNTIME_ACCESS;
}
spin_unlock_irqrestore(&gsmi_dev.lock, flags);
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <linux/spinlock.h>
#include <linux/syscore_ops.h>
#include <linux/gpio/driver.h>
#include <linux/of.h>
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct mxc_gpio_port *port = gc->private;
+ unsigned long flags;
u32 bit, val;
u32 gpio_idx = d->hwirq;
int edge;
return -EINVAL;
}
+ raw_spin_lock_irqsave(&port->gc.bgpio_lock, flags);
+
if (GPIO_EDGE_SEL >= 0) {
val = readl(port->base + GPIO_EDGE_SEL);
if (edge == GPIO_INT_BOTH_EDGES)
writel(1 << gpio_idx, port->base + GPIO_ISR);
port->pad_type[gpio_idx] = type;
- return 0;
+ raw_spin_unlock_irqrestore(&port->gc.bgpio_lock, flags);
+
+ return port->gc.direction_input(&port->gc, gpio_idx);
}
static void mxc_flip_edge(struct mxc_gpio_port *port, u32 gpio)
{
void __iomem *reg = port->base;
+ unsigned long flags;
u32 bit, val;
int edge;
+ raw_spin_lock_irqsave(&port->gc.bgpio_lock, flags);
+
reg += GPIO_ICR1 + ((gpio & 0x10) >> 2); /* lower or upper register */
bit = gpio & 0xf;
val = readl(reg);
return;
}
writel(val | (edge << (bit << 1)), reg);
+
+ raw_spin_unlock_irqrestore(&port->gc.bgpio_lock, flags);
}
/* handle 32 interrupts in one status register */
}
static bool acpi_gpio_irq_is_wake(struct device *parent,
- struct acpi_resource_gpio *agpio)
+ const struct acpi_resource_gpio *agpio)
{
unsigned int pin = agpio->pin_table[0];
lookup->info.pin_config = agpio->pin_config;
lookup->info.debounce = agpio->debounce_timeout;
lookup->info.gpioint = gpioint;
- lookup->info.wake_capable = agpio->wake_capable == ACPI_WAKE_CAPABLE;
+ lookup->info.wake_capable = acpi_gpio_irq_is_wake(&lookup->info.adev->dev, agpio);
/*
* Polarity and triggering are only specified for GpioInt
.ignore_interrupt = "AMDI0030:00@18",
},
},
+ {
+ /*
+ * Spurious wakeups from TP_ATTN# pin
+ * Found in BIOS 1.7.8
+ * https://gitlab.freedesktop.org/drm/amd/-/issues/1722#note_1720627
+ */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "NL5xRU"),
+ },
+ .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+ .ignore_wake = "ELAN0415:00@9",
+ },
+ },
{} /* Terminating entry */
};
return amdgpu_compute_multipipe == 1;
}
+ if (adev->ip_versions[GC_HWIP][0] > IP_VERSION(9, 0, 0))
+ return true;
+
/* FIXME: spreading the queues across pipes causes perf regressions
* on POLARIS11 compute workloads */
if (adev->asic_type == CHIP_POLARIS11)
!--id_mgr->reserved_use_count) {
/* give the reserved ID back to normal round robin */
list_add(&id_mgr->reserved->list, &id_mgr->ids_lru);
+ id_mgr->reserved = NULL;
}
vm->reserved_vmid[vmhub] = false;
mutex_unlock(&id_mgr->lock);
struct dma_fence *f;
unsigned i;
- /* use sched fence if available */
- f = job->base.s_fence ? &job->base.s_fence->finished : &job->hw_fence;
+ /* Check if any fences where initialized */
+ if (job->base.s_fence && job->base.s_fence->finished.ops)
+ f = &job->base.s_fence->finished;
+ else if (job->hw_fence.ops)
+ f = &job->hw_fence;
+ else
+ f = NULL;
+
for (i = 0; i < job->num_ibs; ++i)
amdgpu_ib_free(ring->adev, &job->ibs[i], f);
}
switch (adev->ip_versions[GC_HWIP][0]) {
case IP_VERSION(11, 0, 0):
- case IP_VERSION(11, 0, 1):
case IP_VERSION(11, 0, 2):
case IP_VERSION(11, 0, 3):
- case IP_VERSION(11, 0, 4):
adev->gfx.me.num_me = 1;
adev->gfx.me.num_pipe_per_me = 1;
adev->gfx.me.num_queue_per_pipe = 1;
adev->gfx.mec.num_pipe_per_mec = 4;
adev->gfx.mec.num_queue_per_pipe = 4;
break;
+ case IP_VERSION(11, 0, 1):
+ case IP_VERSION(11, 0, 4):
+ adev->gfx.me.num_me = 1;
+ adev->gfx.me.num_pipe_per_me = 1;
+ adev->gfx.me.num_queue_per_pipe = 1;
+ adev->gfx.mec.num_mec = 1;
+ adev->gfx.mec.num_pipe_per_mec = 4;
+ adev->gfx.mec.num_queue_per_pipe = 4;
+ break;
default:
adev->gfx.me.num_me = 1;
adev->gfx.me.num_pipe_per_me = 1;
case IP_VERSION(3, 0, 1):
case IP_VERSION(3, 1, 2):
case IP_VERSION(3, 1, 3):
- case IP_VERSION(3, 1, 4):
- case IP_VERSION(3, 1, 5):
case IP_VERSION(3, 1, 6):
init_data.flags.gpu_vm_support = true;
break;
adev->dm.vblank_control_workqueue = NULL;
}
- for (i = 0; i < adev->dm.display_indexes_num; i++) {
- drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
- }
-
amdgpu_dm_destroy_drm_device(&adev->dm);
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
timing_out->aspect_ratio = get_aspect_ratio(mode_in);
- stream->output_color_space = get_output_color_space(timing_out);
-
stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
adjust_colour_depth_from_display_info(timing_out, info);
}
}
+
+ stream->output_color_space = get_output_color_space(timing_out);
}
static void fill_audio_info(struct audio_info *audio_info,
goto fail;
}
- if (dm_old_con_state->abm_level !=
- dm_new_con_state->abm_level)
+ if (dm_old_con_state->abm_level != dm_new_con_state->abm_level ||
+ dm_old_con_state->scaling != dm_new_con_state->scaling)
new_crtc_state->connectors_changed = true;
}
static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
{
drm_encoder_cleanup(encoder);
- kfree(encoder);
}
static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
{ 0xE00, 0xF349, 0xFEB7, 0x1000, 0x6CE, 0x16E3,
0x24F, 0x200, 0xFCCB, 0xF535, 0xE00, 0x1000} },
{ COLOR_SPACE_YCBCR2020_TYPE,
- { 0x1000, 0xF149, 0xFEB7, 0x0000, 0x0868, 0x15B2,
- 0x01E6, 0x0000, 0xFB88, 0xF478, 0x1000, 0x0000} },
+ { 0x1000, 0xF149, 0xFEB7, 0x1004, 0x0868, 0x15B2,
+ 0x01E6, 0x201, 0xFB88, 0xF478, 0x1000, 0x1004} },
{ COLOR_SPACE_YCBCR709_BLACK_TYPE,
{ 0x0000, 0x0000, 0x0000, 0x1000, 0x0000, 0x0000,
0x0000, 0x0200, 0x0000, 0x0000, 0x0000, 0x1000} },
int ret = 0;
uint32_t apu_percent = 0;
uint32_t dgpu_percent = 0;
+ struct amdgpu_device *adev = smu->adev;
ret = smu_cmn_get_metrics_table(smu,
*value = metrics->AverageUvdActivity / 100;
break;
case METRICS_AVERAGE_SOCKETPOWER:
- *value = (metrics->CurrentSocketPower << 8) / 1000;
+ if (((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(12, 0, 1)) && (adev->pm.fw_version >= 0x40000f)) ||
+ ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(12, 0, 0)) && (adev->pm.fw_version >= 0x373200)))
+ *value = metrics->CurrentSocketPower << 8;
+ else
+ *value = (metrics->CurrentSocketPower << 8) / 1000;
break;
case METRICS_TEMPERATURE_EDGE:
*value = (metrics->GfxTemperature / 100) *
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/console.h>
+#include <linux/pci.h>
#include <linux/sysrq.h>
+#include <linux/vga_switcheroo.h>
#include <drm/drm_atomic.h>
#include <drm/drm_drv.h>
return ret;
strcpy(fb_helper->fb->comm, "[fbcon]");
+
+ /* Set the fb info for vgaswitcheroo clients. Does nothing otherwise. */
+ if (dev_is_pci(dev->dev))
+ vga_switcheroo_client_fb_set(to_pci_dev(dev->dev), fb_helper->info);
+
return 0;
}
u32 offset;
int ret;
- if (w > max_width || w < min_width || h > max_height) {
+ if (w > max_width || w < min_width || h > max_height || h < 1) {
drm_dbg_kms(&dev_priv->drm,
"requested Y/RGB source size %dx%d outside limits (min: %dx1 max: %dx%d)\n",
w, h, min_width, max_width, max_height);
I915_SHRINK_ACTIVE);
i915_vma_unpin(vma);
if (err)
- goto out_put;
+ goto out_wf;
/*
* Now that the pages are *unpinned* shrinking should invoke
pr_err("unexpected pages mismatch, should_swap=%s\n",
str_yes_no(should_swap));
err = -EINVAL;
- goto out_put;
+ goto out_wf;
}
if (should_swap == (obj->mm.page_sizes.sg || obj->mm.page_sizes.phys)) {
pr_err("unexpected residual page-size bits, should_swap=%s\n",
str_yes_no(should_swap));
err = -EINVAL;
- goto out_put;
+ goto out_wf;
}
err = i915_vma_pin(vma, 0, 0, flags);
if (err)
- goto out_put;
+ goto out_wf;
while (n--) {
err = cpu_check(obj, n, 0xdeadbeaf);
#define RC_OP_FLUSH_ENABLE (1 << 0)
#define HIZ_RAW_STALL_OPT_DISABLE (1 << 2)
#define CACHE_MODE_1 _MMIO(0x7004) /* IVB+ */
-#define PIXEL_SUBSPAN_COLLECT_OPT_DISABLE (1 << 6)
-#define GEN8_4x4_STC_OPTIMIZATION_DISABLE (1 << 6)
-#define GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE (1 << 1)
+#define MSAA_OPTIMIZATION_REDUC_DISABLE REG_BIT(11)
+#define PIXEL_SUBSPAN_COLLECT_OPT_DISABLE REG_BIT(6)
+#define GEN8_4x4_STC_OPTIMIZATION_DISABLE REG_BIT(6)
+#define GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE REG_BIT(1)
#define GEN7_GT_MODE _MMIO(0x7008)
#define GEN9_IZ_HASHING_MASK(slice) (0x3 << ((slice) * 2))
#define GEN8_L3CNTLREG _MMIO(0x7034)
#define GEN8_ERRDETBCTRL (1 << 9)
+#define PSS_MODE2 _MMIO(0x703c)
+#define SCOREBOARD_STALL_FLUSH_CONTROL REG_BIT(5)
+
#define GEN7_SC_INSTDONE _MMIO(0x7100)
#define GEN12_SC_INSTDONE_EXTRA _MMIO(0x7104)
#define GEN12_SC_INSTDONE_EXTRA2 _MMIO(0x7108)
/* Wa_14014947963:dg2 */
if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_B0, STEP_FOREVER) ||
- IS_DG2_G11(engine->i915) || IS_DG2_G12(engine->i915))
+ IS_DG2_G11(engine->i915) || IS_DG2_G12(engine->i915))
wa_masked_field_set(wal, VF_PREEMPTION, PREEMPTION_VERTEX_COUNT, 0x4000);
+ /* Wa_18018764978:dg2 */
+ if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_C0, STEP_FOREVER) ||
+ IS_DG2_G11(engine->i915) || IS_DG2_G12(engine->i915))
+ wa_masked_en(wal, PSS_MODE2, SCOREBOARD_STALL_FLUSH_CONTROL);
+
/* Wa_15010599737:dg2 */
wa_mcr_masked_en(wal, CHICKEN_RASTER_1, DIS_SF_ROUND_NEAREST_EVEN);
+
+ /* Wa_18019271663:dg2 */
+ wa_masked_en(wal, CACHE_MODE_1, MSAA_OPTIMIZATION_REDUC_DISABLE);
}
static void fakewa_disable_nestedbb_mode(struct intel_engine_cs *engine,
*/
static void i915_driver_lastclose(struct drm_device *dev)
{
- struct drm_i915_private *i915 = to_i915(dev);
-
intel_fbdev_restore_mode(dev);
- if (HAS_DISPLAY(i915))
- vga_switcheroo_process_delayed_switch();
+ vga_switcheroo_process_delayed_switch();
}
static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
.has_coherent_ggtt = true, \
.has_llc = 1, \
.has_rc6 = 1, \
- .has_rc6p = 1, \
+ /* snb does support rc6p, but enabling it causes various issues */ \
+ .has_rc6p = 0, \
.has_rps = true, \
.dma_mask_size = 40, \
.__runtime.ppgtt_type = INTEL_PPGTT_ALIASING, \
dev_err(&pdev->dev, "DRM not initialized, aborting switch.\n");
return;
}
+ if (!HAS_DISPLAY(i915)) {
+ dev_err(&pdev->dev, "Device state not initialized, aborting switch.\n");
+ return;
+ }
if (state == VGA_SWITCHEROO_ON) {
drm_info(&i915->drm, "switched on\n");
* locking inversion with the driver load path. And the access here is
* completely racy anyway. So don't bother with locking for now.
*/
- return i915 && atomic_read(&i915->drm.open_count) == 0;
+ return i915 && HAS_DISPLAY(i915) && atomic_read(&i915->drm.open_count) == 0;
}
static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
return 0;
}
+static int adreno_system_suspend(struct device *dev);
static void adreno_unbind(struct device *dev, struct device *master,
void *data)
{
struct msm_drm_private *priv = dev_get_drvdata(master);
struct msm_gpu *gpu = dev_to_gpu(dev);
- pm_runtime_force_suspend(dev);
+ WARN_ON_ONCE(adreno_system_suspend(dev));
gpu->funcs->destroy(gpu);
priv->gpu_pdev = NULL;
static void adreno_shutdown(struct platform_device *pdev)
{
- pm_runtime_force_suspend(&pdev->dev);
+ WARN_ON_ONCE(adreno_system_suspend(&pdev->dev));
}
static const struct of_device_id dt_match[] = {
/* Ensure string is null terminated: */
str[len] = '\0';
+ mutex_lock(&gpu->lock);
+
if (param == MSM_PARAM_COMM) {
paramp = &ctx->comm;
} else {
kfree(*paramp);
*paramp = str;
+ mutex_unlock(&gpu->lock);
+
return 0;
}
case MSM_PARAM_SYSPROF:
struct msm_file_private *ctx = submit->queue->ctx;
struct task_struct *task;
+ WARN_ON(!mutex_is_locked(&submit->gpu->lock));
+
/* Note that kstrdup will return NULL if argument is NULL: */
*comm = kstrdup(ctx->comm, GFP_KERNEL);
*cmd = kstrdup(ctx->cmdline, GFP_KERNEL);
*/
int sysprof;
- /** comm: Overridden task comm, see MSM_PARAM_COMM */
+ /**
+ * comm: Overridden task comm, see MSM_PARAM_COMM
+ *
+ * Accessed under msm_gpu::lock
+ */
char *comm;
- /** cmdline: Overridden task cmdline, see MSM_PARAM_CMDLINE */
+ /**
+ * cmdline: Overridden task cmdline, see MSM_PARAM_CMDLINE
+ *
+ * Accessed under msm_gpu::lock
+ */
char *cmdline;
/**
config DRM_PANFROST
tristate "Panfrost (DRM support for ARM Mali Midgard/Bifrost GPUs)"
depends on DRM
- depends on ARM || ARM64 || (COMPILE_TEST && !GENERIC_ATOMIC64)
+ depends on ARM || ARM64 || COMPILE_TEST
+ depends on !GENERIC_ATOMIC64 # for IOMMU_IO_PGTABLE_LPAE
depends on MMU
select DRM_SCHED
select IOMMU_SUPPORT
bo->validated_shader = NULL;
}
+ mutex_destroy(&bo->madv_lock);
drm_gem_dma_free(&bo->base);
}
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_bo *bo;
- int ret;
if (WARN_ON_ONCE(vc4->is_vc5))
return ERR_PTR(-ENODEV);
bo->madv = VC4_MADV_WILLNEED;
refcount_set(&bo->usecnt, 0);
- ret = drmm_mutex_init(dev, &bo->madv_lock);
- if (ret)
- return ERR_PTR(ret);
+ mutex_init(&bo->madv_lock);
mutex_lock(&vc4->bo_lock);
bo->label = VC4_BO_TYPE_KERNEL;
bool __rdma_block_iter_next(struct ib_block_iter *biter)
{
unsigned int block_offset;
+ unsigned int sg_delta;
if (!biter->__sg_nents || !biter->__sg)
return false;
biter->__dma_addr = sg_dma_address(biter->__sg) + biter->__sg_advance;
block_offset = biter->__dma_addr & (BIT_ULL(biter->__pg_bit) - 1);
- biter->__sg_advance += BIT_ULL(biter->__pg_bit) - block_offset;
+ sg_delta = BIT_ULL(biter->__pg_bit) - block_offset;
- if (biter->__sg_advance >= sg_dma_len(biter->__sg)) {
+ if (sg_dma_len(biter->__sg) - biter->__sg_advance > sg_delta) {
+ biter->__sg_advance += sg_delta;
+ } else {
biter->__sg_advance = 0;
biter->__sg = sg_next(biter->__sg);
biter->__sg_nents--;
static bool tid_rb_invalidate(struct mmu_interval_notifier *mni,
const struct mmu_notifier_range *range,
unsigned long cur_seq);
+static bool tid_cover_invalidate(struct mmu_interval_notifier *mni,
+ const struct mmu_notifier_range *range,
+ unsigned long cur_seq);
static int program_rcvarray(struct hfi1_filedata *fd, struct tid_user_buf *,
struct tid_group *grp,
unsigned int start, u16 count,
u32 *tidlist, unsigned int *tididx,
unsigned int *pmapped);
-static int unprogram_rcvarray(struct hfi1_filedata *fd, u32 tidinfo,
- struct tid_group **grp);
+static int unprogram_rcvarray(struct hfi1_filedata *fd, u32 tidinfo);
+static void __clear_tid_node(struct hfi1_filedata *fd,
+ struct tid_rb_node *node);
static void clear_tid_node(struct hfi1_filedata *fd, struct tid_rb_node *node);
static const struct mmu_interval_notifier_ops tid_mn_ops = {
.invalidate = tid_rb_invalidate,
};
+static const struct mmu_interval_notifier_ops tid_cover_ops = {
+ .invalidate = tid_cover_invalidate,
+};
/*
* Initialize context and file private data needed for Expected
tididx = 0, mapped, mapped_pages = 0;
u32 *tidlist = NULL;
struct tid_user_buf *tidbuf;
+ unsigned long mmu_seq = 0;
if (!PAGE_ALIGNED(tinfo->vaddr))
return -EINVAL;
+ if (tinfo->length == 0)
+ return -EINVAL;
tidbuf = kzalloc(sizeof(*tidbuf), GFP_KERNEL);
if (!tidbuf)
return -ENOMEM;
+ mutex_init(&tidbuf->cover_mutex);
tidbuf->vaddr = tinfo->vaddr;
tidbuf->length = tinfo->length;
tidbuf->psets = kcalloc(uctxt->expected_count, sizeof(*tidbuf->psets),
GFP_KERNEL);
if (!tidbuf->psets) {
- kfree(tidbuf);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto fail_release_mem;
+ }
+
+ if (fd->use_mn) {
+ ret = mmu_interval_notifier_insert(
+ &tidbuf->notifier, current->mm,
+ tidbuf->vaddr, tidbuf->npages * PAGE_SIZE,
+ &tid_cover_ops);
+ if (ret)
+ goto fail_release_mem;
+ mmu_seq = mmu_interval_read_begin(&tidbuf->notifier);
}
pinned = pin_rcv_pages(fd, tidbuf);
if (pinned <= 0) {
- kfree(tidbuf->psets);
- kfree(tidbuf);
- return pinned;
+ ret = (pinned < 0) ? pinned : -ENOSPC;
+ goto fail_unpin;
}
/* Find sets of physically contiguous pages */
tidbuf->n_psets = find_phys_blocks(tidbuf, pinned);
- /*
- * We don't need to access this under a lock since tid_used is per
- * process and the same process cannot be in hfi1_user_exp_rcv_clear()
- * and hfi1_user_exp_rcv_setup() at the same time.
- */
+ /* Reserve the number of expected tids to be used. */
spin_lock(&fd->tid_lock);
if (fd->tid_used + tidbuf->n_psets > fd->tid_limit)
pageset_count = fd->tid_limit - fd->tid_used;
else
pageset_count = tidbuf->n_psets;
+ fd->tid_used += pageset_count;
spin_unlock(&fd->tid_lock);
- if (!pageset_count)
- goto bail;
+ if (!pageset_count) {
+ ret = -ENOSPC;
+ goto fail_unreserve;
+ }
ngroups = pageset_count / dd->rcv_entries.group_size;
tidlist = kcalloc(pageset_count, sizeof(*tidlist), GFP_KERNEL);
if (!tidlist) {
ret = -ENOMEM;
- goto nomem;
+ goto fail_unreserve;
}
tididx = 0;
}
unlock:
mutex_unlock(&uctxt->exp_mutex);
-nomem:
hfi1_cdbg(TID, "total mapped: tidpairs:%u pages:%u (%d)", tididx,
mapped_pages, ret);
- if (tididx) {
- spin_lock(&fd->tid_lock);
- fd->tid_used += tididx;
- spin_unlock(&fd->tid_lock);
- tinfo->tidcnt = tididx;
- tinfo->length = mapped_pages * PAGE_SIZE;
-
- if (copy_to_user(u64_to_user_ptr(tinfo->tidlist),
- tidlist, sizeof(tidlist[0]) * tididx)) {
- /*
- * On failure to copy to the user level, we need to undo
- * everything done so far so we don't leak resources.
- */
- tinfo->tidlist = (unsigned long)&tidlist;
- hfi1_user_exp_rcv_clear(fd, tinfo);
- tinfo->tidlist = 0;
- ret = -EFAULT;
- goto bail;
+
+ /* fail if nothing was programmed, set error if none provided */
+ if (tididx == 0) {
+ if (ret >= 0)
+ ret = -ENOSPC;
+ goto fail_unreserve;
+ }
+
+ /* adjust reserved tid_used to actual count */
+ spin_lock(&fd->tid_lock);
+ fd->tid_used -= pageset_count - tididx;
+ spin_unlock(&fd->tid_lock);
+
+ /* unpin all pages not covered by a TID */
+ unpin_rcv_pages(fd, tidbuf, NULL, mapped_pages, pinned - mapped_pages,
+ false);
+
+ if (fd->use_mn) {
+ /* check for an invalidate during setup */
+ bool fail = false;
+
+ mutex_lock(&tidbuf->cover_mutex);
+ fail = mmu_interval_read_retry(&tidbuf->notifier, mmu_seq);
+ mutex_unlock(&tidbuf->cover_mutex);
+
+ if (fail) {
+ ret = -EBUSY;
+ goto fail_unprogram;
}
}
- /*
- * If not everything was mapped (due to insufficient RcvArray entries,
- * for example), unpin all unmapped pages so we can pin them nex time.
- */
- if (mapped_pages != pinned)
- unpin_rcv_pages(fd, tidbuf, NULL, mapped_pages,
- (pinned - mapped_pages), false);
-bail:
+ tinfo->tidcnt = tididx;
+ tinfo->length = mapped_pages * PAGE_SIZE;
+
+ if (copy_to_user(u64_to_user_ptr(tinfo->tidlist),
+ tidlist, sizeof(tidlist[0]) * tididx)) {
+ ret = -EFAULT;
+ goto fail_unprogram;
+ }
+
+ if (fd->use_mn)
+ mmu_interval_notifier_remove(&tidbuf->notifier);
+ kfree(tidbuf->pages);
kfree(tidbuf->psets);
+ kfree(tidbuf);
kfree(tidlist);
+ return 0;
+
+fail_unprogram:
+ /* unprogram, unmap, and unpin all allocated TIDs */
+ tinfo->tidlist = (unsigned long)tidlist;
+ hfi1_user_exp_rcv_clear(fd, tinfo);
+ tinfo->tidlist = 0;
+ pinned = 0; /* nothing left to unpin */
+ pageset_count = 0; /* nothing left reserved */
+fail_unreserve:
+ spin_lock(&fd->tid_lock);
+ fd->tid_used -= pageset_count;
+ spin_unlock(&fd->tid_lock);
+fail_unpin:
+ if (fd->use_mn)
+ mmu_interval_notifier_remove(&tidbuf->notifier);
+ if (pinned > 0)
+ unpin_rcv_pages(fd, tidbuf, NULL, 0, pinned, false);
+fail_release_mem:
kfree(tidbuf->pages);
+ kfree(tidbuf->psets);
kfree(tidbuf);
- return ret > 0 ? 0 : ret;
+ kfree(tidlist);
+ return ret;
}
int hfi1_user_exp_rcv_clear(struct hfi1_filedata *fd,
mutex_lock(&uctxt->exp_mutex);
for (tididx = 0; tididx < tinfo->tidcnt; tididx++) {
- ret = unprogram_rcvarray(fd, tidinfo[tididx], NULL);
+ ret = unprogram_rcvarray(fd, tidinfo[tididx]);
if (ret) {
hfi1_cdbg(TID, "Failed to unprogram rcv array %d",
ret);
}
node->fdata = fd;
+ mutex_init(&node->invalidate_mutex);
node->phys = page_to_phys(pages[0]);
node->npages = npages;
node->rcventry = rcventry;
&tid_mn_ops);
if (ret)
goto out_unmap;
- /*
- * FIXME: This is in the wrong order, the notifier should be
- * established before the pages are pinned by pin_rcv_pages.
- */
- mmu_interval_read_begin(&node->notifier);
}
fd->entry_to_rb[node->rcventry - uctxt->expected_base] = node;
return -EFAULT;
}
-static int unprogram_rcvarray(struct hfi1_filedata *fd, u32 tidinfo,
- struct tid_group **grp)
+static int unprogram_rcvarray(struct hfi1_filedata *fd, u32 tidinfo)
{
struct hfi1_ctxtdata *uctxt = fd->uctxt;
struct hfi1_devdata *dd = uctxt->dd;
if (!node || node->rcventry != (uctxt->expected_base + rcventry))
return -EBADF;
- if (grp)
- *grp = node->grp;
-
if (fd->use_mn)
mmu_interval_notifier_remove(&node->notifier);
cacheless_tid_rb_remove(fd, node);
return 0;
}
-static void clear_tid_node(struct hfi1_filedata *fd, struct tid_rb_node *node)
+static void __clear_tid_node(struct hfi1_filedata *fd, struct tid_rb_node *node)
{
struct hfi1_ctxtdata *uctxt = fd->uctxt;
struct hfi1_devdata *dd = uctxt->dd;
+ mutex_lock(&node->invalidate_mutex);
+ if (node->freed)
+ goto done;
+ node->freed = true;
+
trace_hfi1_exp_tid_unreg(uctxt->ctxt, fd->subctxt, node->rcventry,
node->npages,
node->notifier.interval_tree.start, node->phys,
node->dma_addr);
- /*
- * Make sure device has seen the write before we unpin the
- * pages.
- */
+ /* Make sure device has seen the write before pages are unpinned */
hfi1_put_tid(dd, node->rcventry, PT_INVALID_FLUSH, 0, 0);
unpin_rcv_pages(fd, NULL, node, 0, node->npages, true);
+done:
+ mutex_unlock(&node->invalidate_mutex);
+}
+
+static void clear_tid_node(struct hfi1_filedata *fd, struct tid_rb_node *node)
+{
+ struct hfi1_ctxtdata *uctxt = fd->uctxt;
+
+ __clear_tid_node(fd, node);
node->grp->used--;
node->grp->map &= ~(1 << (node->rcventry - node->grp->base));
if (node->freed)
return true;
+ /* take action only if unmapping */
+ if (range->event != MMU_NOTIFY_UNMAP)
+ return true;
+
trace_hfi1_exp_tid_inval(uctxt->ctxt, fdata->subctxt,
node->notifier.interval_tree.start,
node->rcventry, node->npages, node->dma_addr);
- node->freed = true;
+
+ /* clear the hardware rcvarray entry */
+ __clear_tid_node(fdata, node);
spin_lock(&fdata->invalid_lock);
if (fdata->invalid_tid_idx < uctxt->expected_count) {
return true;
}
+static bool tid_cover_invalidate(struct mmu_interval_notifier *mni,
+ const struct mmu_notifier_range *range,
+ unsigned long cur_seq)
+{
+ struct tid_user_buf *tidbuf =
+ container_of(mni, struct tid_user_buf, notifier);
+
+ /* take action only if unmapping */
+ if (range->event == MMU_NOTIFY_UNMAP) {
+ mutex_lock(&tidbuf->cover_mutex);
+ mmu_interval_set_seq(mni, cur_seq);
+ mutex_unlock(&tidbuf->cover_mutex);
+ }
+
+ return true;
+}
+
static void cacheless_tid_rb_remove(struct hfi1_filedata *fdata,
struct tid_rb_node *tnode)
{
};
struct tid_user_buf {
+ struct mmu_interval_notifier notifier;
+ struct mutex cover_mutex;
unsigned long vaddr;
unsigned long length;
unsigned int npages;
struct tid_rb_node {
struct mmu_interval_notifier notifier;
struct hfi1_filedata *fdata;
+ struct mutex invalidate_mutex; /* covers hw removal */
unsigned long phys;
struct tid_group *grp;
u32 rcventry;
RXE_MAX_SRQ = DEFAULT_MAX_VALUE - RXE_MIN_SRQ_INDEX,
RXE_MIN_MR_INDEX = 0x00000001,
- RXE_MAX_MR_INDEX = DEFAULT_MAX_VALUE,
- RXE_MAX_MR = DEFAULT_MAX_VALUE - RXE_MIN_MR_INDEX,
- RXE_MIN_MW_INDEX = 0x00010001,
- RXE_MAX_MW_INDEX = 0x00020000,
- RXE_MAX_MW = 0x00001000,
+ RXE_MAX_MR_INDEX = DEFAULT_MAX_VALUE >> 1,
+ RXE_MAX_MR = RXE_MAX_MR_INDEX - RXE_MIN_MR_INDEX,
+ RXE_MIN_MW_INDEX = RXE_MAX_MR_INDEX + 1,
+ RXE_MAX_MW_INDEX = DEFAULT_MAX_VALUE,
+ RXE_MAX_MW = RXE_MAX_MW_INDEX - RXE_MIN_MW_INDEX,
RXE_MAX_PKT_PER_ACK = 64,
.size = sizeof(struct rxe_ucontext),
.elem_offset = offsetof(struct rxe_ucontext, elem),
.min_index = 1,
- .max_index = UINT_MAX,
- .max_elem = UINT_MAX,
+ .max_index = RXE_MAX_UCONTEXT,
+ .max_elem = RXE_MAX_UCONTEXT,
},
[RXE_TYPE_PD] = {
.name = "pd",
.size = sizeof(struct rxe_pd),
.elem_offset = offsetof(struct rxe_pd, elem),
.min_index = 1,
- .max_index = UINT_MAX,
- .max_elem = UINT_MAX,
+ .max_index = RXE_MAX_PD,
+ .max_elem = RXE_MAX_PD,
},
[RXE_TYPE_AH] = {
.name = "ah",
.elem_offset = offsetof(struct rxe_ah, elem),
.min_index = RXE_MIN_AH_INDEX,
.max_index = RXE_MAX_AH_INDEX,
- .max_elem = RXE_MAX_AH_INDEX - RXE_MIN_AH_INDEX + 1,
+ .max_elem = RXE_MAX_AH,
},
[RXE_TYPE_SRQ] = {
.name = "srq",
.cleanup = rxe_srq_cleanup,
.min_index = RXE_MIN_SRQ_INDEX,
.max_index = RXE_MAX_SRQ_INDEX,
- .max_elem = RXE_MAX_SRQ_INDEX - RXE_MIN_SRQ_INDEX + 1,
+ .max_elem = RXE_MAX_SRQ,
},
[RXE_TYPE_QP] = {
.name = "qp",
.cleanup = rxe_qp_cleanup,
.min_index = RXE_MIN_QP_INDEX,
.max_index = RXE_MAX_QP_INDEX,
- .max_elem = RXE_MAX_QP_INDEX - RXE_MIN_QP_INDEX + 1,
+ .max_elem = RXE_MAX_QP,
},
[RXE_TYPE_CQ] = {
.name = "cq",
.elem_offset = offsetof(struct rxe_cq, elem),
.cleanup = rxe_cq_cleanup,
.min_index = 1,
- .max_index = UINT_MAX,
- .max_elem = UINT_MAX,
+ .max_index = RXE_MAX_CQ,
+ .max_elem = RXE_MAX_CQ,
},
[RXE_TYPE_MR] = {
.name = "mr",
.cleanup = rxe_mr_cleanup,
.min_index = RXE_MIN_MR_INDEX,
.max_index = RXE_MAX_MR_INDEX,
- .max_elem = RXE_MAX_MR_INDEX - RXE_MIN_MR_INDEX + 1,
+ .max_elem = RXE_MAX_MR,
},
[RXE_TYPE_MW] = {
.name = "mw",
.cleanup = rxe_mw_cleanup,
.min_index = RXE_MIN_MW_INDEX,
.max_index = RXE_MAX_MW_INDEX,
- .max_elem = RXE_MAX_MW_INDEX - RXE_MIN_MW_INDEX + 1,
+ .max_elem = RXE_MAX_MW,
},
};
}
regmap_done:
- ret = devm_clk_bulk_get(dev, qp->num_clks, qp->bus_clks);
+ ret = devm_clk_bulk_get_optional(dev, qp->num_clks, qp->bus_clks);
if (ret)
return ret;
"aggre0_noc_mpu_cfg"
};
+static const char * const bus_a2noc_clocks[] = {
+ "bus",
+ "bus_a",
+ "aggre2_ufs_axi",
+ "ufs_axi"
+};
+
static const u16 mas_a0noc_common_links[] = {
MSM8996_SLAVE_A0NOC_SNOC
};
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
- .max_register = 0x9000,
+ .max_register = 0x6000,
.fast_io = true
};
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
- .max_register = 0x7000,
+ .max_register = 0x5000,
.fast_io = true
};
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
- .max_register = 0xa000,
+ .max_register = 0x7000,
.fast_io = true
};
.type = QCOM_ICC_NOC,
.nodes = a2noc_nodes,
.num_nodes = ARRAY_SIZE(a2noc_nodes),
+ .clocks = bus_a2noc_clocks,
+ .num_clocks = ARRAY_SIZE(bus_a2noc_clocks),
.regmap_cfg = &msm8996_a2noc_regmap_config
};
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
- .max_register = 0x62000,
+ .max_register = 0x5a000,
.fast_io = true
};
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
- .max_register = 0x20000,
+ .max_register = 0x1c000,
.fast_io = true
};
*/
static struct md_rdev *md_import_device(dev_t newdev, int super_format, int super_minor)
{
- static struct md_rdev *claim_rdev; /* just for claiming the bdev */
+ static struct md_rdev claim_rdev; /* just for claiming the bdev */
struct md_rdev *rdev;
sector_t size;
int err;
rdev->bdev = blkdev_get_by_dev(newdev,
FMODE_READ | FMODE_WRITE | FMODE_EXCL,
- super_format == -2 ? claim_rdev : rdev);
+ super_format == -2 ? &claim_rdev : rdev);
if (IS_ERR(rdev->bdev)) {
pr_warn("md: could not open device unknown-block(%u,%u).\n",
MAJOR(newdev), MINOR(newdev));
caps = of_device_get_match_data(&pdev->dev);
if (caps->has_ddrck) {
- clk = devm_clk_get(&pdev->dev, "ddrck");
+ clk = devm_clk_get_enabled(&pdev->dev, "ddrck");
if (IS_ERR(clk))
return PTR_ERR(clk);
- clk_prepare_enable(clk);
}
if (caps->has_mpddr_clk) {
- clk = devm_clk_get(&pdev->dev, "mpddr");
+ clk = devm_clk_get_enabled(&pdev->dev, "mpddr");
if (IS_ERR(clk)) {
pr_err("AT91 RAMC: couldn't get mpddr clock\n");
return PTR_ERR(clk);
}
- clk_prepare_enable(clk);
}
return 0;
if (IS_ERR(devbus->base))
return PTR_ERR(devbus->base);
- clk = devm_clk_get(&pdev->dev, NULL);
+ clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(clk))
return PTR_ERR(clk);
- clk_prepare_enable(clk);
/*
* Obtain clock period in picoseconds,
}
}
- if (p->wait_pin > gpmc_nr_waitpins) {
+ if (p->wait_pin != GPMC_WAITPIN_INVALID &&
+ p->wait_pin > gpmc_nr_waitpins) {
pr_err("%s: invalid wait-pin (%d)\n", __func__, p->wait_pin);
return -EINVAL;
}
#define MC_SID_STREAMID_SECURITY_WRITE_ACCESS_DISABLED BIT(16)
#define MC_SID_STREAMID_SECURITY_OVERRIDE BIT(8)
-static void tegra186_mc_program_sid(struct tegra_mc *mc)
-{
- unsigned int i;
-
- for (i = 0; i < mc->soc->num_clients; i++) {
- const struct tegra_mc_client *client = &mc->soc->clients[i];
- u32 override, security;
-
- override = readl(mc->regs + client->regs.sid.override);
- security = readl(mc->regs + client->regs.sid.security);
-
- dev_dbg(mc->dev, "client %s: override: %x security: %x\n",
- client->name, override, security);
-
- dev_dbg(mc->dev, "setting SID %u for %s\n", client->sid,
- client->name);
- writel(client->sid, mc->regs + client->regs.sid.override);
-
- override = readl(mc->regs + client->regs.sid.override);
- security = readl(mc->regs + client->regs.sid.security);
-
- dev_dbg(mc->dev, "client %s: override: %x security: %x\n",
- client->name, override, security);
- }
-}
-
static int tegra186_mc_probe(struct tegra_mc *mc)
{
struct platform_device *pdev = to_platform_device(mc->dev);
if (err < 0)
return err;
- tegra186_mc_program_sid(mc);
-
return 0;
}
of_platform_depopulate(mc->dev);
}
-static int tegra186_mc_resume(struct tegra_mc *mc)
-{
- tegra186_mc_program_sid(mc);
-
- return 0;
-}
-
#if IS_ENABLED(CONFIG_IOMMU_API)
static void tegra186_mc_client_sid_override(struct tegra_mc *mc,
const struct tegra_mc_client *client,
const struct tegra_mc_ops tegra186_mc_ops = {
.probe = tegra186_mc_probe,
.remove = tegra186_mc_remove,
- .resume = tegra186_mc_resume,
.probe_device = tegra186_mc_probe_device,
.handle_irq = tegra30_mc_handle_irq,
};
perm.vmid = QCOM_SCM_VMID_HLOS;
perm.perm = QCOM_SCM_PERM_RWX;
err = qcom_scm_assign_mem(map->phys, map->size,
- &(map->fl->cctx->vmperms[0].vmid), &perm, 1);
+ &map->fl->cctx->perms, &perm, 1);
if (err) {
dev_err(map->fl->sctx->dev, "Failed to assign memory phys 0x%llx size 0x%llx err %d",
map->phys, map->size, err);
dma_buf_put(map->buf);
}
+ if (map->fl) {
+ spin_lock(&map->fl->lock);
+ list_del(&map->node);
+ spin_unlock(&map->fl->lock);
+ map->fl = NULL;
+ }
+
kfree(map);
}
kref_put(&map->refcount, fastrpc_free_map);
}
-static void fastrpc_map_get(struct fastrpc_map *map)
+static int fastrpc_map_get(struct fastrpc_map *map)
{
- if (map)
- kref_get(&map->refcount);
+ if (!map)
+ return -ENOENT;
+
+ return kref_get_unless_zero(&map->refcount) ? 0 : -ENOENT;
}
static int fastrpc_map_lookup(struct fastrpc_user *fl, int fd,
- struct fastrpc_map **ppmap)
+ struct fastrpc_map **ppmap, bool take_ref)
{
+ struct fastrpc_session_ctx *sess = fl->sctx;
struct fastrpc_map *map = NULL;
+ int ret = -ENOENT;
- mutex_lock(&fl->mutex);
+ spin_lock(&fl->lock);
list_for_each_entry(map, &fl->maps, node) {
- if (map->fd == fd) {
- *ppmap = map;
- mutex_unlock(&fl->mutex);
- return 0;
- }
- }
- mutex_unlock(&fl->mutex);
-
- return -ENOENT;
-}
+ if (map->fd != fd)
+ continue;
-static int fastrpc_map_find(struct fastrpc_user *fl, int fd,
- struct fastrpc_map **ppmap)
-{
- int ret = fastrpc_map_lookup(fl, fd, ppmap);
+ if (take_ref) {
+ ret = fastrpc_map_get(map);
+ if (ret) {
+ dev_dbg(sess->dev, "%s: Failed to get map fd=%d ret=%d\n",
+ __func__, fd, ret);
+ break;
+ }
+ }
- if (!ret)
- fastrpc_map_get(*ppmap);
+ *ppmap = map;
+ ret = 0;
+ break;
+ }
+ spin_unlock(&fl->lock);
return ret;
}
struct fastrpc_map *map = NULL;
int err = 0;
- if (!fastrpc_map_find(fl, fd, ppmap))
+ if (!fastrpc_map_lookup(fl, fd, ppmap, true))
return 0;
map = kzalloc(sizeof(*map), GFP_KERNEL);
* If subsystem VMIDs are defined in DTSI, then do
* hyp_assign from HLOS to those VM(s)
*/
- unsigned int perms = BIT(QCOM_SCM_VMID_HLOS);
-
map->attr = attr;
- err = qcom_scm_assign_mem(map->phys, (u64)map->size, &perms,
+ err = qcom_scm_assign_mem(map->phys, (u64)map->size, &fl->cctx->perms,
fl->cctx->vmperms, fl->cctx->vmcount);
if (err) {
dev_err(sess->dev, "Failed to assign memory with phys 0x%llx size 0x%llx err %d",
for (i = 0; i < FASTRPC_MAX_FDLIST; i++) {
if (!fdlist[i])
break;
- if (!fastrpc_map_lookup(fl, (int)fdlist[i], &mmap))
+ if (!fastrpc_map_lookup(fl, (int)fdlist[i], &mmap, false))
fastrpc_map_put(mmap);
}
/* Map if we have any heap VMIDs associated with this ADSP Static Process. */
if (fl->cctx->vmcount) {
- unsigned int perms = BIT(QCOM_SCM_VMID_HLOS);
-
err = qcom_scm_assign_mem(fl->cctx->remote_heap->phys,
- (u64)fl->cctx->remote_heap->size, &perms,
+ (u64)fl->cctx->remote_heap->size,
+ &fl->cctx->perms,
fl->cctx->vmperms, fl->cctx->vmcount);
if (err) {
dev_err(fl->sctx->dev, "Failed to assign memory with phys 0x%llx size 0x%llx err %d",
perm.perm = QCOM_SCM_PERM_RWX;
err = qcom_scm_assign_mem(fl->cctx->remote_heap->phys,
(u64)fl->cctx->remote_heap->size,
- &(fl->cctx->vmperms[0].vmid), &perm, 1);
+ &fl->cctx->perms, &perm, 1);
if (err)
dev_err(fl->sctx->dev, "Failed to assign memory phys 0x%llx size 0x%llx err %d",
fl->cctx->remote_heap->phys, fl->cctx->remote_heap->size, err);
fl->init_mem = NULL;
fastrpc_buf_free(imem);
err_alloc:
- if (map) {
- spin_lock(&fl->lock);
- list_del(&map->node);
- spin_unlock(&fl->lock);
- fastrpc_map_put(map);
- }
+ fastrpc_map_put(map);
err:
kfree(args);
fastrpc_context_put(ctx);
}
- list_for_each_entry_safe(map, m, &fl->maps, node) {
- list_del(&map->node);
+ list_for_each_entry_safe(map, m, &fl->maps, node)
fastrpc_map_put(map);
- }
list_for_each_entry_safe(buf, b, &fl->mmaps, node) {
list_del(&buf->node);
/* Add memory to static PD pool, protection thru hypervisor */
if (req.flags != ADSP_MMAP_REMOTE_HEAP_ADDR && fl->cctx->vmcount) {
struct qcom_scm_vmperm perm;
- int err = 0;
perm.vmid = QCOM_SCM_VMID_HLOS;
perm.perm = QCOM_SCM_PERM_RWX;
err = qcom_scm_assign_mem(buf->phys, buf->size,
- &(fl->cctx->vmperms[0].vmid), &perm, 1);
+ &fl->cctx->perms, &perm, 1);
if (err) {
dev_err(fl->sctx->dev, "Failed to assign memory phys 0x%llx size 0x%llx err %d",
buf->phys, buf->size, err);
if (cl->state == MEI_FILE_UNINITIALIZED) {
ret = mei_cl_link(cl);
if (ret)
- goto out;
+ goto notlinked;
/* update pointers */
cl->cldev = cldev;
}
ret = mei_cl_dma_alloc_and_map(cl, NULL, buffer_id, size);
-out:
+ if (ret)
+ mei_cl_unlink(cl);
+notlinked:
mutex_unlock(&bus->device_lock);
if (ret)
return ERR_PTR(ret);
if (cl->state == MEI_FILE_UNINITIALIZED) {
ret = mei_cl_link(cl);
if (ret)
- goto out;
+ goto notlinked;
/* update pointers */
cl->cldev = cldev;
}
}
out:
+ if (ret)
+ mei_cl_unlink(cl);
+notlinked:
mutex_unlock(&bus->device_lock);
return ret;
mei_cl_flush_queues(cldev->cl, NULL);
mei_me_cl_put(cldev->me_cl);
mei_dev_bus_put(cldev->bus);
- mei_cl_unlink(cldev->cl);
kfree(cldev->cl);
kfree(cldev);
}
#define MEI_DEV_ID_RPL_S 0x7A68 /* Raptor Lake Point S */
+#define MEI_DEV_ID_MTL_M 0x7E70 /* Meteor Lake Point M */
+
/*
* MEI HW Section
*/
{MEI_PCI_DEVICE(MEI_DEV_ID_RPL_S, MEI_ME_PCH15_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_MTL_M, MEI_ME_PCH15_CFG)},
+
/* required last entry */
{0, }
};
bool exclusive_vectors;
- struct tasklet_struct datagram_tasklet;
- struct tasklet_struct bm_tasklet;
struct wait_queue_head inout_wq;
void *data_buffer;
* This function assumes that it has exclusive access to the data
* in register(s) for the duration of the call.
*/
-static void vmci_dispatch_dgs(unsigned long data)
+static void vmci_dispatch_dgs(struct vmci_guest_device *vmci_dev)
{
- struct vmci_guest_device *vmci_dev = (struct vmci_guest_device *)data;
u8 *dg_in_buffer = vmci_dev->data_buffer;
struct vmci_datagram *dg;
size_t dg_in_buffer_size = VMCI_MAX_DG_SIZE;
* Scans the notification bitmap for raised flags, clears them
* and handles the notifications.
*/
-static void vmci_process_bitmap(unsigned long data)
+static void vmci_process_bitmap(struct vmci_guest_device *dev)
{
- struct vmci_guest_device *dev = (struct vmci_guest_device *)data;
-
if (!dev->notification_bitmap) {
dev_dbg(dev->dev, "No bitmap present in %s\n", __func__);
return;
struct vmci_guest_device *dev = _dev;
/*
- * If we are using MSI-X with exclusive vectors then we simply schedule
- * the datagram tasklet, since we know the interrupt was meant for us.
+ * If we are using MSI-X with exclusive vectors then we simply call
+ * vmci_dispatch_dgs(), since we know the interrupt was meant for us.
* Otherwise we must read the ICR to determine what to do.
*/
if (dev->exclusive_vectors) {
- tasklet_schedule(&dev->datagram_tasklet);
+ vmci_dispatch_dgs(dev);
} else {
unsigned int icr;
return IRQ_NONE;
if (icr & VMCI_ICR_DATAGRAM) {
- tasklet_schedule(&dev->datagram_tasklet);
+ vmci_dispatch_dgs(dev);
icr &= ~VMCI_ICR_DATAGRAM;
}
if (icr & VMCI_ICR_NOTIFICATION) {
- tasklet_schedule(&dev->bm_tasklet);
+ vmci_process_bitmap(dev);
icr &= ~VMCI_ICR_NOTIFICATION;
}
struct vmci_guest_device *dev = _dev;
/* For MSI-X we can just assume it was meant for us. */
- tasklet_schedule(&dev->bm_tasklet);
+ vmci_process_bitmap(dev);
return IRQ_HANDLED;
}
vmci_dev->iobase = iobase;
vmci_dev->mmio_base = mmio_base;
- tasklet_init(&vmci_dev->datagram_tasklet,
- vmci_dispatch_dgs, (unsigned long)vmci_dev);
- tasklet_init(&vmci_dev->bm_tasklet,
- vmci_process_bitmap, (unsigned long)vmci_dev);
init_waitqueue_head(&vmci_dev->inout_wq);
if (mmio_base != NULL) {
* Request IRQ for legacy or MSI interrupts, or for first
* MSI-X vector.
*/
- error = request_irq(pci_irq_vector(pdev, 0), vmci_interrupt,
- IRQF_SHARED, KBUILD_MODNAME, vmci_dev);
+ error = request_threaded_irq(pci_irq_vector(pdev, 0), NULL,
+ vmci_interrupt, IRQF_SHARED,
+ KBUILD_MODNAME, vmci_dev);
if (error) {
dev_err(&pdev->dev, "Irq %u in use: %d\n",
pci_irq_vector(pdev, 0), error);
* between the vectors.
*/
if (vmci_dev->exclusive_vectors) {
- error = request_irq(pci_irq_vector(pdev, 1),
- vmci_interrupt_bm, 0, KBUILD_MODNAME,
- vmci_dev);
+ error = request_threaded_irq(pci_irq_vector(pdev, 1), NULL,
+ vmci_interrupt_bm, 0,
+ KBUILD_MODNAME, vmci_dev);
if (error) {
dev_err(&pdev->dev,
"Failed to allocate irq %u: %d\n",
goto err_free_irq;
}
if (caps_in_use & VMCI_CAPS_DMA_DATAGRAM) {
- error = request_irq(pci_irq_vector(pdev, 2),
- vmci_interrupt_dma_datagram,
- 0, KBUILD_MODNAME, vmci_dev);
+ error = request_threaded_irq(pci_irq_vector(pdev, 2),
+ NULL,
+ vmci_interrupt_dma_datagram,
+ 0, KBUILD_MODNAME,
+ vmci_dev);
if (error) {
dev_err(&pdev->dev,
"Failed to allocate irq %u: %d\n",
err_free_irq:
free_irq(pci_irq_vector(pdev, 0), vmci_dev);
- tasklet_kill(&vmci_dev->datagram_tasklet);
- tasklet_kill(&vmci_dev->bm_tasklet);
err_disable_msi:
pci_free_irq_vectors(pdev);
free_irq(pci_irq_vector(pdev, 0), vmci_dev);
pci_free_irq_vectors(pdev);
- tasklet_kill(&vmci_dev->datagram_tasklet);
- tasklet_kill(&vmci_dev->bm_tasklet);
-
if (vmci_dev->notification_bitmap) {
/*
* The device reset above cleared the bitmap state of the
#define ESDHC_TUNING_START_TAP_DEFAULT 0x1
#define ESDHC_TUNING_START_TAP_MASK 0x7f
#define ESDHC_TUNING_CMD_CRC_CHECK_DISABLE (1 << 7)
+#define ESDHC_TUNING_STEP_DEFAULT 0x1
#define ESDHC_TUNING_STEP_MASK 0x00070000
#define ESDHC_TUNING_STEP_SHIFT 16
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
struct cqhci_host *cq_host = host->mmc->cqe_private;
- int tmp;
+ u32 tmp;
if (esdhc_is_usdhc(imx_data)) {
/*
if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING) {
tmp = readl(host->ioaddr + ESDHC_TUNING_CTRL);
- tmp |= ESDHC_STD_TUNING_EN |
- ESDHC_TUNING_START_TAP_DEFAULT;
- if (imx_data->boarddata.tuning_start_tap) {
- tmp &= ~ESDHC_TUNING_START_TAP_MASK;
+ tmp |= ESDHC_STD_TUNING_EN;
+
+ /*
+ * ROM code or bootloader may config the start tap
+ * and step, unmask them first.
+ */
+ tmp &= ~(ESDHC_TUNING_START_TAP_MASK | ESDHC_TUNING_STEP_MASK);
+ if (imx_data->boarddata.tuning_start_tap)
tmp |= imx_data->boarddata.tuning_start_tap;
- }
+ else
+ tmp |= ESDHC_TUNING_START_TAP_DEFAULT;
if (imx_data->boarddata.tuning_step) {
- tmp &= ~ESDHC_TUNING_STEP_MASK;
tmp |= imx_data->boarddata.tuning_step
<< ESDHC_TUNING_STEP_SHIFT;
+ } else {
+ tmp |= ESDHC_TUNING_STEP_DEFAULT
+ << ESDHC_TUNING_STEP_SHIFT;
}
/* Disable the CMD CRC check for tuning, if not, need to
struct sunxi_mmc_host *host = mmc_priv(mmc);
mmc_remove_host(mmc);
- pm_runtime_force_suspend(&pdev->dev);
- disable_irq(host->irq);
- sunxi_mmc_disable(host);
+ pm_runtime_disable(&pdev->dev);
+ if (!pm_runtime_status_suspended(&pdev->dev)) {
+ disable_irq(host->irq);
+ sunxi_mmc_disable(host);
+ }
dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma);
mmc_free_host(mmc);
the xrx200 / VR9 SoC.
config NET_DSA_MT7530
- tristate "MediaTek MT753x and MT7621 Ethernet switch support"
+ tristate "MediaTek MT7530 and MT7531 Ethernet switch support"
select NET_DSA_TAG_MTK
select MEDIATEK_GE_PHY
help
- This enables support for the MediaTek MT7530, MT7531, and MT7621
- Ethernet switch chips.
+ This enables support for the MediaTek MT7530 and MT7531 Ethernet
+ switch chips. Multi-chip module MT7530 in MT7621AT, MT7621DAT,
+ MT7621ST and MT7623AI SoCs is supported.
config NET_DSA_MV88E6060
tristate "Marvell 88E6060 ethernet switch chip support"
},
{
.compatible = "microchip,ksz8563",
- .data = &ksz_switch_chips[KSZ9893]
+ .data = &ksz_switch_chips[KSZ8563]
},
{
.compatible = "microchip,ksz9567",
if ((port_priv->flags & IFF_ALLMULTI && rxb->pkt_type == PACKET_MULTICAST) ||
(port_priv->flags & IFF_BROADCAST && rxb->pkt_type == PACKET_BROADCAST))
- rxb->offload_fwd_mark = 1;
+ rxb->offload_fwd_mark = port_priv->priv->forwarding;
netif_rx(rxb);
rtnl_lock();
tg3_full_lock(tp, 0);
- if (!netif_running(tp->dev)) {
+ if (tp->pcierr_recovery || !netif_running(tp->dev)) {
tg3_flag_clear(tp, RESET_TASK_PENDING);
tg3_full_unlock(tp);
rtnl_unlock();
netdev_info(netdev, "PCI I/O error detected\n");
+ /* Want to make sure that the reset task doesn't run */
+ tg3_reset_task_cancel(tp);
+
rtnl_lock();
/* Could be second call or maybe we don't have netdev yet */
tg3_timer_stop(tp);
- /* Want to make sure that the reset task doesn't run */
- tg3_reset_task_cancel(tp);
-
netif_device_detach(netdev);
/* Clean up software state, even if MMIO is blocked */
/* ring full, shall not happen because queue is stopped if full
* below
*/
- netif_stop_queue(tx->adapter->netdev);
+ netif_stop_subqueue(tx->adapter->netdev, tx->queue_index);
return NETDEV_TX_BUSY;
}
if (tsnep_tx_desc_available(tx) < (MAX_SKB_FRAGS + 1)) {
/* ring can get full with next frame */
- netif_stop_queue(tx->adapter->netdev);
+ netif_stop_subqueue(tx->adapter->netdev, tx->queue_index);
}
return NETDEV_TX_OK;
} while (likely(budget));
if ((tsnep_tx_desc_available(tx) >= ((MAX_SKB_FRAGS + 1) * 2)) &&
- netif_queue_stopped(tx->adapter->netdev)) {
- netif_wake_queue(tx->adapter->netdev);
+ netif_tx_queue_stopped(nq)) {
+ netif_tx_wake_queue(nq);
}
__netif_tx_unlock(nq);
cleaned = qman_p_poll_dqrr(np->p, budget);
+ if (np->xdp_act & XDP_REDIRECT)
+ xdp_do_flush();
+
if (cleaned < budget) {
napi_complete_done(napi, cleaned);
qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
}
- if (np->xdp_act & XDP_REDIRECT)
- xdp_do_flush();
-
return cleaned;
}
if (rx_cleaned >= budget ||
txconf_cleaned >= DPAA2_ETH_TXCONF_PER_NAPI) {
work_done = budget;
+ if (ch->xdp.res & XDP_REDIRECT)
+ xdp_do_flush();
goto out;
}
} while (store_cleaned);
+ if (ch->xdp.res & XDP_REDIRECT)
+ xdp_do_flush();
+
/* Update NET DIM with the values for this CDAN */
dpaa2_io_update_net_dim(ch->dpio, ch->stats.frames_per_cdan,
ch->stats.bytes_per_cdan);
txc_fq->dq_bytes = 0;
}
- if (ch->xdp.res & XDP_REDIRECT)
- xdp_do_flush_map();
- else if (rx_cleaned && ch->xdp.res & XDP_TX)
+ if (rx_cleaned && ch->xdp.res & XDP_TX)
dpaa2_eth_xdp_tx_flush(priv, ch, &priv->fq[flowid]);
return work_done;
for (q = 0; q < fep->num_rx_queues; q++) {
rxq = fep->rx_queue[q];
for (i = 0; i < rxq->bd.ring_size; i++)
- page_pool_release_page(rxq->page_pool, rxq->rx_skb_info[i].page);
+ page_pool_put_full_page(rxq->page_pool, rxq->rx_skb_info[i].page, false);
for (i = 0; i < XDP_STATS_TOTAL; i++)
rxq->stats[i] = 0;
/* board specific private data structure */
struct iavf_adapter {
+ struct workqueue_struct *wq;
struct work_struct reset_task;
struct work_struct adminq_task;
struct delayed_work client_task;
/* needed by iavf_ethtool.c */
extern char iavf_driver_name[];
-extern struct workqueue_struct *iavf_wq;
static inline const char *iavf_state_str(enum iavf_state_t state)
{
if (changed_flags & IAVF_FLAG_LEGACY_RX) {
if (netif_running(netdev)) {
adapter->flags |= IAVF_FLAG_RESET_NEEDED;
- queue_work(iavf_wq, &adapter->reset_task);
+ queue_work(adapter->wq, &adapter->reset_task);
}
}
if (netif_running(netdev)) {
adapter->flags |= IAVF_FLAG_RESET_NEEDED;
- queue_work(iavf_wq, &adapter->reset_task);
+ queue_work(adapter->wq, &adapter->reset_task);
}
return 0;
adapter->aq_required |= IAVF_FLAG_AQ_ADD_FDIR_FILTER;
spin_unlock_bh(&adapter->fdir_fltr_lock);
- mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
+ mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
ret:
if (err && fltr)
spin_unlock_bh(&adapter->fdir_fltr_lock);
if (fltr && fltr->state == IAVF_FDIR_FLTR_DEL_REQUEST)
- mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
+ mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
return err;
}
spin_unlock_bh(&adapter->adv_rss_lock);
if (!err)
- mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
+ mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
mutex_unlock(&adapter->crit_lock);
MODULE_LICENSE("GPL v2");
static const struct net_device_ops iavf_netdev_ops;
-struct workqueue_struct *iavf_wq;
int iavf_status_to_errno(enum iavf_status status)
{
if (!(adapter->flags &
(IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED))) {
adapter->flags |= IAVF_FLAG_RESET_NEEDED;
- queue_work(iavf_wq, &adapter->reset_task);
+ queue_work(adapter->wq, &adapter->reset_task);
}
}
void iavf_schedule_request_stats(struct iavf_adapter *adapter)
{
adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_STATS;
- mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
+ mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
}
/**
if (adapter->state != __IAVF_REMOVE)
/* schedule work on the private workqueue */
- queue_work(iavf_wq, &adapter->adminq_task);
+ queue_work(adapter->wq, &adapter->adminq_task);
return IRQ_HANDLED;
}
/* schedule the watchdog task to immediately process the request */
if (f) {
- queue_work(iavf_wq, &adapter->watchdog_task.work);
+ mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
return 0;
}
return -ENOMEM;
adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_QUEUES;
if (CLIENT_ENABLED(adapter))
adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_OPEN;
- mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
+ mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
}
/**
adapter->aq_required |= IAVF_FLAG_AQ_DISABLE_QUEUES;
}
- mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
+ mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
}
/**
if (aq_required) {
adapter->aq_required |= aq_required;
- mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
+ mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
}
}
goto restart_watchdog;
}
+ if ((adapter->flags & IAVF_FLAG_SETUP_NETDEV_FEATURES) &&
+ adapter->netdev_registered &&
+ !test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section) &&
+ rtnl_trylock()) {
+ netdev_update_features(adapter->netdev);
+ rtnl_unlock();
+ adapter->flags &= ~IAVF_FLAG_SETUP_NETDEV_FEATURES;
+ }
+
if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
iavf_change_state(adapter, __IAVF_COMM_FAILED);
adapter->aq_required = 0;
adapter->current_op = VIRTCHNL_OP_UNKNOWN;
mutex_unlock(&adapter->crit_lock);
- queue_work(iavf_wq, &adapter->reset_task);
+ queue_work(adapter->wq, &adapter->reset_task);
return;
}
case __IAVF_STARTUP:
iavf_startup(adapter);
mutex_unlock(&adapter->crit_lock);
- queue_delayed_work(iavf_wq, &adapter->watchdog_task,
+ queue_delayed_work(adapter->wq, &adapter->watchdog_task,
msecs_to_jiffies(30));
return;
case __IAVF_INIT_VERSION_CHECK:
iavf_init_version_check(adapter);
mutex_unlock(&adapter->crit_lock);
- queue_delayed_work(iavf_wq, &adapter->watchdog_task,
+ queue_delayed_work(adapter->wq, &adapter->watchdog_task,
msecs_to_jiffies(30));
return;
case __IAVF_INIT_GET_RESOURCES:
iavf_init_get_resources(adapter);
mutex_unlock(&adapter->crit_lock);
- queue_delayed_work(iavf_wq, &adapter->watchdog_task,
+ queue_delayed_work(adapter->wq, &adapter->watchdog_task,
msecs_to_jiffies(1));
return;
case __IAVF_INIT_EXTENDED_CAPS:
iavf_init_process_extended_caps(adapter);
mutex_unlock(&adapter->crit_lock);
- queue_delayed_work(iavf_wq, &adapter->watchdog_task,
+ queue_delayed_work(adapter->wq, &adapter->watchdog_task,
msecs_to_jiffies(1));
return;
case __IAVF_INIT_CONFIG_ADAPTER:
iavf_init_config_adapter(adapter);
mutex_unlock(&adapter->crit_lock);
- queue_delayed_work(iavf_wq, &adapter->watchdog_task,
+ queue_delayed_work(adapter->wq, &adapter->watchdog_task,
msecs_to_jiffies(1));
return;
case __IAVF_INIT_FAILED:
adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED;
iavf_shutdown_adminq(hw);
mutex_unlock(&adapter->crit_lock);
- queue_delayed_work(iavf_wq,
+ queue_delayed_work(adapter->wq,
&adapter->watchdog_task, (5 * HZ));
return;
}
/* Try again from failed step*/
iavf_change_state(adapter, adapter->last_state);
mutex_unlock(&adapter->crit_lock);
- queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ);
+ queue_delayed_work(adapter->wq, &adapter->watchdog_task, HZ);
return;
case __IAVF_COMM_FAILED:
if (test_bit(__IAVF_IN_REMOVE_TASK,
adapter->aq_required = 0;
adapter->current_op = VIRTCHNL_OP_UNKNOWN;
mutex_unlock(&adapter->crit_lock);
- queue_delayed_work(iavf_wq,
+ queue_delayed_work(adapter->wq,
&adapter->watchdog_task,
msecs_to_jiffies(10));
return;
case __IAVF_RESETTING:
mutex_unlock(&adapter->crit_lock);
- queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2);
+ queue_delayed_work(adapter->wq, &adapter->watchdog_task,
+ HZ * 2);
return;
case __IAVF_DOWN:
case __IAVF_DOWN_PENDING:
adapter->aq_required = 0;
adapter->current_op = VIRTCHNL_OP_UNKNOWN;
dev_err(&adapter->pdev->dev, "Hardware reset detected\n");
- queue_work(iavf_wq, &adapter->reset_task);
+ queue_work(adapter->wq, &adapter->reset_task);
mutex_unlock(&adapter->crit_lock);
- queue_delayed_work(iavf_wq,
+ queue_delayed_work(adapter->wq,
&adapter->watchdog_task, HZ * 2);
return;
}
mutex_unlock(&adapter->crit_lock);
restart_watchdog:
if (adapter->state >= __IAVF_DOWN)
- queue_work(iavf_wq, &adapter->adminq_task);
+ queue_work(adapter->wq, &adapter->adminq_task);
if (adapter->aq_required)
- queue_delayed_work(iavf_wq, &adapter->watchdog_task,
+ queue_delayed_work(adapter->wq, &adapter->watchdog_task,
msecs_to_jiffies(20));
else
- queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2);
+ queue_delayed_work(adapter->wq, &adapter->watchdog_task,
+ HZ * 2);
}
/**
*/
if (!mutex_trylock(&adapter->crit_lock)) {
if (adapter->state != __IAVF_REMOVE)
- queue_work(iavf_wq, &adapter->reset_task);
+ queue_work(adapter->wq, &adapter->reset_task);
goto reset_finish;
}
bitmap_clear(adapter->vsi.active_cvlans, 0, VLAN_N_VID);
bitmap_clear(adapter->vsi.active_svlans, 0, VLAN_N_VID);
- mod_delayed_work(iavf_wq, &adapter->watchdog_task, 2);
+ mod_delayed_work(adapter->wq, &adapter->watchdog_task, 2);
/* We were running when the reset started, so we need to restore some
* state here.
if (adapter->state == __IAVF_REMOVE)
return;
- queue_work(iavf_wq, &adapter->adminq_task);
+ queue_work(adapter->wq, &adapter->adminq_task);
goto out;
}
} while (pending);
mutex_unlock(&adapter->crit_lock);
- if ((adapter->flags & IAVF_FLAG_SETUP_NETDEV_FEATURES)) {
- if (adapter->netdev_registered ||
- !test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section)) {
- struct net_device *netdev = adapter->netdev;
-
- rtnl_lock();
- netdev_update_features(netdev);
- rtnl_unlock();
- /* Request VLAN offload settings */
- if (VLAN_V2_ALLOWED(adapter))
- iavf_set_vlan_offload_features
- (adapter, 0, netdev->features);
-
- iavf_set_queue_vlan_tag_loc(adapter);
- }
-
- adapter->flags &= ~IAVF_FLAG_SETUP_NETDEV_FEATURES;
- }
if ((adapter->flags &
(IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED)) ||
adapter->state == __IAVF_RESETTING)
if (netif_running(netdev)) {
adapter->flags |= IAVF_FLAG_RESET_NEEDED;
- queue_work(iavf_wq, &adapter->reset_task);
+ queue_work(adapter->wq, &adapter->reset_task);
}
return 0;
hw = &adapter->hw;
hw->back = adapter;
+ adapter->wq = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM,
+ iavf_driver_name);
+ if (!adapter->wq) {
+ err = -ENOMEM;
+ goto err_alloc_wq;
+ }
+
adapter->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
iavf_change_state(adapter, __IAVF_STARTUP);
INIT_WORK(&adapter->adminq_task, iavf_adminq_task);
INIT_DELAYED_WORK(&adapter->watchdog_task, iavf_watchdog_task);
INIT_DELAYED_WORK(&adapter->client_task, iavf_client_task);
- queue_delayed_work(iavf_wq, &adapter->watchdog_task,
+ queue_delayed_work(adapter->wq, &adapter->watchdog_task,
msecs_to_jiffies(5 * (pdev->devfn & 0x07)));
/* Setup the wait queue for indicating transition to down status */
return 0;
err_ioremap:
+ destroy_workqueue(adapter->wq);
+err_alloc_wq:
free_netdev(netdev);
err_alloc_etherdev:
pci_disable_pcie_error_reporting(pdev);
return err;
}
- queue_work(iavf_wq, &adapter->reset_task);
+ queue_work(adapter->wq, &adapter->reset_task);
netif_device_attach(adapter->netdev);
}
spin_unlock_bh(&adapter->adv_rss_lock);
+ destroy_workqueue(adapter->wq);
+
free_netdev(netdev);
pci_disable_pcie_error_reporting(pdev);
**/
static int __init iavf_init_module(void)
{
- int ret;
-
pr_info("iavf: %s\n", iavf_driver_string);
pr_info("%s\n", iavf_copyright);
- iavf_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1,
- iavf_driver_name);
- if (!iavf_wq) {
- pr_err("%s: Failed to create workqueue\n", iavf_driver_name);
- return -ENOMEM;
- }
-
- ret = pci_register_driver(&iavf_driver);
- if (ret)
- destroy_workqueue(iavf_wq);
-
- return ret;
+ return pci_register_driver(&iavf_driver);
}
module_init(iavf_init_module);
static void __exit iavf_exit_module(void)
{
pci_unregister_driver(&iavf_driver);
- destroy_workqueue(iavf_wq);
}
module_exit(iavf_exit_module);
if (!(adapter->flags & IAVF_FLAG_RESET_PENDING)) {
adapter->flags |= IAVF_FLAG_RESET_PENDING;
dev_info(&adapter->pdev->dev, "Scheduling reset task\n");
- queue_work(iavf_wq, &adapter->reset_task);
+ queue_work(adapter->wq, &adapter->reset_task);
}
break;
default:
iavf_process_config(adapter);
adapter->flags |= IAVF_FLAG_SETUP_NETDEV_FEATURES;
+
+ /* Request VLAN offload settings */
+ if (VLAN_V2_ALLOWED(adapter))
+ iavf_set_vlan_offload_features(adapter, 0,
+ netdev->features);
+
+ iavf_set_queue_vlan_tag_loc(adapter);
+
was_mac_changed = !ether_addr_equal(netdev->dev_addr,
adapter->hw.mac.addr);
}
}
- if (vsi->type == ICE_VSI_PF)
- ice_devlink_destroy_pf_port(pf);
-
if (vsi->type == ICE_VSI_VF &&
vsi->agg_node && vsi->agg_node->valid)
vsi->agg_node->num_vsis--;
}
/**
- * ice_register_netdev - register netdev and devlink port
+ * ice_register_netdev - register netdev
* @pf: pointer to the PF struct
*/
static int ice_register_netdev(struct ice_pf *pf)
if (!vsi || !vsi->netdev)
return -EIO;
- err = ice_devlink_create_pf_port(pf);
- if (err)
- goto err_devlink_create;
-
- SET_NETDEV_DEVLINK_PORT(vsi->netdev, &pf->devlink_port);
err = register_netdev(vsi->netdev);
if (err)
goto err_register_netdev;
return 0;
err_register_netdev:
- ice_devlink_destroy_pf_port(pf);
-err_devlink_create:
free_netdev(vsi->netdev);
vsi->netdev = NULL;
clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
{
struct device *dev = &pdev->dev;
+ struct ice_vsi *vsi;
struct ice_pf *pf;
struct ice_hw *hw;
int i, err;
pcie_print_link_status(pf->pdev);
probe_done:
+ err = ice_devlink_create_pf_port(pf);
+ if (err)
+ goto err_create_pf_port;
+
+ vsi = ice_get_main_vsi(pf);
+ if (!vsi || !vsi->netdev) {
+ err = -EINVAL;
+ goto err_netdev_reg;
+ }
+
+ SET_NETDEV_DEVLINK_PORT(vsi->netdev, &pf->devlink_port);
+
err = ice_register_netdev(pf);
if (err)
goto err_netdev_reg;
err_devlink_reg_param:
ice_devlink_unregister_params(pf);
err_netdev_reg:
+ ice_devlink_destroy_pf_port(pf);
+err_create_pf_port:
err_send_version_unroll:
ice_vsi_release_all(pf);
err_alloc_sw_unroll:
ice_setup_mc_magic_wake(pf);
ice_vsi_release_all(pf);
mutex_destroy(&hw->fdir_fltr_lock);
+ ice_devlink_destroy_pf_port(pf);
ice_set_wake(pf);
ice_free_irq_msix_misc(pf);
ice_for_each_vsi(pf, i) {
lan966x_fdma_rx_reload(rx);
}
- if (counter < weight && napi_complete_done(napi, counter))
- lan_wr(0xff, lan966x, FDMA_INTR_DB_ENA);
-
if (redirect)
xdp_do_flush();
+ if (counter < weight && napi_complete_done(napi, counter))
+ lan_wr(0xff, lan966x, FDMA_INTR_DB_ENA);
+
return counter;
}
gic->handler = NULL;
gic->arg = NULL;
+ if (!i)
+ snprintf(gic->name, MANA_IRQ_NAME_SZ, "mana_hwc@pci:%s",
+ pci_name(pdev));
+ else
+ snprintf(gic->name, MANA_IRQ_NAME_SZ, "mana_q%d@pci:%s",
+ i - 1, pci_name(pdev));
+
irq = pci_irq_vector(pdev, i);
if (irq < 0) {
err = irq;
goto free_mask;
}
- err = request_irq(irq, mana_gd_intr, 0, "mana_intr", gic);
+ err = request_irq(irq, mana_gd_intr, 0, gic->name, gic);
if (err)
goto free_mask;
irq_set_affinity_and_hint(irq, req_mask);
rx_work_done = (likely(fp->type & QEDE_FASTPATH_RX) &&
qede_has_rx_work(fp->rxq)) ?
qede_rx_int(fp, budget) : 0;
+
+ if (fp->xdp_xmit & QEDE_XDP_REDIRECT)
+ xdp_do_flush();
+
/* Handle case where we are called by netpoll with a budget of 0 */
if (rx_work_done < budget || !budget) {
if (!qede_poll_is_more_work(fp)) {
qede_update_tx_producer(fp->xdp_tx);
}
- if (fp->xdp_xmit & QEDE_XDP_REDIRECT)
- xdp_do_flush_map();
-
return rx_work_done;
}
ravb_write(ndev, ~(EIS_QFS | EIS_RESERVED), EIS);
if (eis & EIS_QFS) {
ris2 = ravb_read(ndev, RIS2);
- ravb_write(ndev, ~(RIS2_QFF0 | RIS2_RFFF | RIS2_RESERVED),
+ ravb_write(ndev, ~(RIS2_QFF0 | RIS2_QFF1 | RIS2_RFFF | RIS2_RESERVED),
RIS2);
/* Receive Descriptor Empty int */
if (ris2 & RIS2_QFF0)
priv->stats[RAVB_BE].rx_over_errors++;
- /* Receive Descriptor Empty int */
+ /* Receive Descriptor Empty int */
if (ris2 & RIS2_QFF1)
priv->stats[RAVB_NC].rx_over_errors++;
else
ret = ravb_close(ndev);
+ if (priv->info->ccc_gac)
+ ravb_ptp_stop(ndev);
+
return ret;
}
/* Restore descriptor base address table */
ravb_write(ndev, priv->desc_bat_dma, DBAT);
+ if (priv->info->ccc_gac)
+ ravb_ptp_init(ndev, priv->pdev);
+
if (netif_running(ndev)) {
if (priv->wol_enabled) {
ret = ravb_wol_restore(ndev);
port = NULL;
goto out;
}
- if (index == rdev->etha->index)
+ if (index == rdev->etha->index) {
+ if (!of_device_is_available(port))
+ port = NULL;
break;
+ }
}
out:
port = rswitch_get_port_node(rdev);
if (!port)
- return -ENODEV;
+ return 0; /* ignored */
err = of_get_phy_mode(port, &rdev->etha->phy_interface);
of_node_put(port);
{
int i, err;
- for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
+ rswitch_for_each_enabled_port(priv, i) {
err = rswitch_ether_port_init_one(priv->rdev[i]);
if (err)
goto err_init_one;
}
- for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
+ rswitch_for_each_enabled_port(priv, i) {
err = rswitch_serdes_init(priv->rdev[i]);
if (err)
goto err_serdes;
return 0;
err_serdes:
- for (i--; i >= 0; i--)
+ rswitch_for_each_enabled_port_continue_reverse(priv, i)
rswitch_serdes_deinit(priv->rdev[i]);
i = RSWITCH_NUM_PORTS;
err_init_one:
- for (i--; i >= 0; i--)
+ rswitch_for_each_enabled_port_continue_reverse(priv, i)
rswitch_ether_port_deinit_one(priv->rdev[i]);
return err;
netif_napi_add(ndev, &rdev->napi, rswitch_poll);
port = rswitch_get_port_node(rdev);
+ rdev->disabled = !port;
err = of_get_ethdev_address(port, ndev);
of_node_put(port);
if (err) {
if (err)
goto err_ether_port_init_all;
- for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
+ rswitch_for_each_enabled_port(priv, i) {
err = register_netdev(priv->rdev[i]->ndev);
if (err) {
- for (i--; i >= 0; i--)
+ rswitch_for_each_enabled_port_continue_reverse(priv, i)
unregister_netdev(priv->rdev[i]->ndev);
goto err_register_netdev;
}
}
- for (i = 0; i < RSWITCH_NUM_PORTS; i++)
+ rswitch_for_each_enabled_port(priv, i)
netdev_info(priv->rdev[i]->ndev, "MAC address %pM\n",
priv->rdev[i]->ndev->dev_addr);
#define RSWITCH_MAX_NUM_QUEUES 128
#define RSWITCH_NUM_PORTS 3
+#define rswitch_for_each_enabled_port(priv, i) \
+ for (i = 0; i < RSWITCH_NUM_PORTS; i++) \
+ if (priv->rdev[i]->disabled) \
+ continue; \
+ else
+
+#define rswitch_for_each_enabled_port_continue_reverse(priv, i) \
+ for (i--; i >= 0; i--) \
+ if (priv->rdev[i]->disabled) \
+ continue; \
+ else
#define TX_RING_SIZE 1024
#define RX_RING_SIZE 1024
struct rswitch_gwca_queue *tx_queue;
struct rswitch_gwca_queue *rx_queue;
u8 ts_tag;
+ bool disabled;
int port;
struct rswitch_etha *etha;
*/
#include <linux/bitfield.h>
+#include <linux/delay.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/device.h>
static int g12a_enable_internal_mdio(struct g12a_mdio_mux *priv)
{
+ u32 value;
int ret;
/* Enable the phy clock */
/* Initialize ephy control */
writel(EPHY_G12A_ID, priv->regs + ETH_PHY_CNTL0);
- writel(FIELD_PREP(PHY_CNTL1_ST_MODE, 3) |
- FIELD_PREP(PHY_CNTL1_ST_PHYADD, EPHY_DFLT_ADD) |
- FIELD_PREP(PHY_CNTL1_MII_MODE, EPHY_MODE_RMII) |
- PHY_CNTL1_CLK_EN |
- PHY_CNTL1_CLKFREQ |
- PHY_CNTL1_PHY_ENB,
- priv->regs + ETH_PHY_CNTL1);
+
+ /* Make sure we get a 0 -> 1 transition on the enable bit */
+ value = FIELD_PREP(PHY_CNTL1_ST_MODE, 3) |
+ FIELD_PREP(PHY_CNTL1_ST_PHYADD, EPHY_DFLT_ADD) |
+ FIELD_PREP(PHY_CNTL1_MII_MODE, EPHY_MODE_RMII) |
+ PHY_CNTL1_CLK_EN |
+ PHY_CNTL1_CLKFREQ;
+ writel(value, priv->regs + ETH_PHY_CNTL1);
writel(PHY_CNTL2_USE_INTERNAL |
PHY_CNTL2_SMI_SRC_MAC |
PHY_CNTL2_RX_CLK_EPHY,
priv->regs + ETH_PHY_CNTL2);
+ value |= PHY_CNTL1_PHY_ENB;
+ writel(value, priv->regs + ETH_PHY_CNTL1);
+
+ /* The phy needs a bit of time to power up */
+ mdelay(10);
+
return 0;
}
received = virtnet_receive(rq, budget, &xdp_xmit);
+ if (xdp_xmit & VIRTIO_XDP_REDIR)
+ xdp_do_flush();
+
/* Out of packets? */
if (received < budget)
virtqueue_napi_complete(napi, rq->vq, received);
- if (xdp_xmit & VIRTIO_XDP_REDIR)
- xdp_do_flush();
-
if (xdp_xmit & VIRTIO_XDP_TX) {
sq = virtnet_xdp_get_sq(vi);
if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
apple_nvme_remove_cq(anv);
}
- nvme_disable_ctrl(&anv->ctrl, shutdown);
+ /*
+ * Always disable the NVMe controller after shutdown.
+ * We need to do this to bring it back up later anyway, and we
+ * can't do it while the firmware is not running (e.g. in the
+ * resume reset path before RTKit is initialized), so for Apple
+ * controllers it makes sense to unconditionally do it here.
+ * Additionally, this sequence of events is reliable, while
+ * others (like disabling after bringing back the firmware on
+ * resume) seem to run into trouble under some circumstances.
+ *
+ * Both U-Boot and m1n1 also use this convention (i.e. an ANS
+ * NVMe controller is handed off with firmware shut down, in an
+ * NVMe disabled state, after a clean shutdown).
+ */
+ if (shutdown)
+ nvme_disable_ctrl(&anv->ctrl, shutdown);
+ nvme_disable_ctrl(&anv->ctrl, false);
}
WRITE_ONCE(anv->ioq.enabled, false);
goto out;
}
- if (anv->ctrl.ctrl_config & NVME_CC_ENABLE)
- apple_nvme_disable(anv, false);
-
/* RTKit must be shut down cleanly for the (soft)-reset to work */
if (apple_rtkit_is_running(anv->rtk)) {
+ /* reset the controller if it is enabled */
+ if (anv->ctrl.ctrl_config & NVME_CC_ENABLE)
+ apple_nvme_disable(anv, false);
dev_dbg(anv->dev, "Trying to shut down RTKit before reset.");
ret = apple_rtkit_shutdown(anv->rtk);
if (ret)
else
nvme_poll_irqdisable(nvmeq);
- if (blk_mq_request_completed(req)) {
+ if (blk_mq_rq_state(req) != MQ_RQ_IN_FLIGHT) {
dev_warn(dev->ctrl.device,
"I/O %d QID %d timeout, completion polled\n",
req->tag, nvmeq->qid);
imx8_phy->perst =
devm_reset_control_get_exclusive(dev, "perst");
if (IS_ERR(imx8_phy->perst))
- dev_err_probe(dev, PTR_ERR(imx8_phy->perst),
+ return dev_err_probe(dev, PTR_ERR(imx8_phy->perst),
"Failed to get PCIE PHY PERST control\n");
}
struct gpio_desc *standby_gpio;
struct gpio_desc *enable_gpio;
u32 max_bitrate = 0;
+ int err;
can_transceiver_phy = devm_kzalloc(dev, sizeof(struct can_transceiver_phy), GFP_KERNEL);
if (!can_transceiver_phy)
return PTR_ERR(phy);
}
- device_property_read_u32(dev, "max-bitrate", &max_bitrate);
- if (!max_bitrate)
+ err = device_property_read_u32(dev, "max-bitrate", &max_bitrate);
+ if ((err != -EINVAL) && !max_bitrate)
dev_warn(dev, "Invalid value for transceiver max bitrate. Ignoring bitrate limit\n");
phy->attrs.max_link_rate = max_bitrate;
HSPHY_INIT_CFG(0x90, 0x60, 0),
};
-static const struct hsphy_init_seq init_seq_mdm9607[] = {
- HSPHY_INIT_CFG(0x80, 0x44, 0),
- HSPHY_INIT_CFG(0x81, 0x38, 0),
- HSPHY_INIT_CFG(0x82, 0x24, 0),
- HSPHY_INIT_CFG(0x83, 0x13, 0),
-};
-
static const struct hsphy_data hsphy_data_femtophy = {
.init_seq = init_seq_femtophy,
.init_seq_num = ARRAY_SIZE(init_seq_femtophy),
};
-static const struct hsphy_data hsphy_data_mdm9607 = {
- .init_seq = init_seq_mdm9607,
- .init_seq_num = ARRAY_SIZE(init_seq_mdm9607),
-};
-
static const struct of_device_id qcom_snps_hsphy_match[] = {
{ .compatible = "qcom,usb-hs-28nm-femtophy", .data = &hsphy_data_femtophy, },
- { .compatible = "qcom,usb-hs-28nm-mdm9607", .data = &hsphy_data_mdm9607, },
{ },
};
MODULE_DEVICE_TABLE(of, qcom_snps_hsphy_match);
r8a779f0_eth_serdes_write32(channel->addr, 0x0160, 0x180, 0x0007);
r8a779f0_eth_serdes_write32(channel->addr, 0x01ac, 0x180, 0x0000);
r8a779f0_eth_serdes_write32(channel->addr, 0x00c4, 0x180, 0x0310);
- r8a779f0_eth_serdes_write32(channel->addr, 0x00c8, 0x380, 0x0101);
+ r8a779f0_eth_serdes_write32(channel->addr, 0x00c8, 0x180, 0x0101);
ret = r8a779f0_eth_serdes_reg_wait(channel, 0x00c8, 0x0180, BIT(0), 0);
if (ret)
return ret;
return ret;
ret = property_enable(base, &rport->port_cfg->phy_sus, false);
- if (ret)
+ if (ret) {
+ clk_disable_unprepare(rphy->clk480m);
return ret;
+ }
/* waiting for the utmi_clk to become stable */
usleep_range(1500, 2000);
return PTR_ERR(usbphy->phy_regs);
usbphy->moon4_res_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "moon4");
+ if (!usbphy->moon4_res_mem)
+ return -EINVAL;
+
usbphy->moon4_regs = devm_ioremap(&pdev->dev, usbphy->moon4_res_mem->start,
resource_size(usbphy->moon4_res_mem));
if (!usbphy->moon4_regs)
config PHY_AM654_SERDES
tristate "TI AM654 SERDES support"
- depends on OF && ARCH_K3 || COMPILE_TEST
+ depends on OF && (ARCH_K3 || COMPILE_TEST)
depends on COMMON_CLK
select GENERIC_PHY
select MULTIPLEXER
config PHY_J721E_WIZ
tristate "TI J721E WIZ (SERDES Wrapper) support"
- depends on OF && ARCH_K3 || COMPILE_TEST
+ depends on OF && (ARCH_K3 || COMPILE_TEST)
depends on HAS_IOMEM && OF_ADDRESS
depends on COMMON_CLK
select GENERIC_PHY
*/
#include <linux/kernel.h>
-#include <linux/gpio/driver.h>
#include <linux/pinctrl/pinctrl.h>
+
#include <linux/mfd/abx500/ab8500.h>
+
#include "pinctrl-abx500.h"
/* All the pins that can be used for GPIO and some other functions */
*/
#include <linux/kernel.h>
-#include <linux/gpio/driver.h>
#include <linux/pinctrl/pinctrl.h>
+
#include <linux/mfd/abx500/ab8500.h>
+
#include "pinctrl-abx500.h"
/* All the pins that can be used for GPIO and some other functions */
*
* Driver allows to use AxB5xx unused pins to be used as GPIO
*/
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/slab.h>
-#include <linux/init.h>
+#include <linux/bitops.h>
#include <linux/err.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/platform_device.h>
#include <linux/gpio/driver.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
-#include <linux/interrupt.h>
-#include <linux/bitops.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
#include <linux/mfd/abx500.h>
#include <linux/mfd/abx500/ab8500.h>
-#include <linux/pinctrl/pinctrl.h>
+
#include <linux/pinctrl/consumer.h>
-#include <linux/pinctrl/pinmux.h>
-#include <linux/pinctrl/pinconf.h>
-#include <linux/pinctrl/pinconf-generic.h>
#include <linux/pinctrl/machine.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
-#include "pinctrl-abx500.h"
#include "../core.h"
#include "../pinconf.h"
#include "../pinctrl-utils.h"
+#include "pinctrl-abx500.h"
+
/*
* GPIO registers offset
* Bank: 0x10
#ifndef PINCTRL_PINCTRL_ABx500_H
#define PINCTRL_PINCTRL_ABx500_H
+#include <linux/types.h>
+
+struct pinctrl_pin_desc;
+
/* Package definitions */
#define PINCTRL_AB8500 0
#define PINCTRL_AB8505 1
// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
+#include <linux/types.h>
+
#include <linux/pinctrl/pinctrl.h>
+
#include "pinctrl-nomadik.h"
/* All the pins that can be used for GPIO and some other functions */
// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
+#include <linux/types.h>
+
#include <linux/pinctrl/pinctrl.h>
+
#include "pinctrl-nomadik.h"
/* All the pins that can be used for GPIO and some other functions */
* Rewritten based on work by Prafulla WADASKAR <prafulla.wadaskar@st.com>
* Copyright (C) 2011-2013 Linus Walleij <linus.walleij@linaro.org>
*/
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/device.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
+#include <linux/bitops.h>
#include <linux/clk.h>
+#include <linux/device.h>
#include <linux/err.h>
#include <linux/gpio/driver.h>
-#include <linux/spinlock.h>
+#include <linux/init.h>
#include <linux/interrupt.h>
-#include <linux/slab.h>
-#include <linux/of_device.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
#include <linux/of_address.h>
-#include <linux/bitops.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+/* Since we request GPIOs from ourself */
+#include <linux/pinctrl/consumer.h>
#include <linux/pinctrl/machine.h>
+#include <linux/pinctrl/pinconf.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/pinctrl/pinmux.h>
-#include <linux/pinctrl/pinconf.h>
-/* Since we request GPIOs from ourself */
-#include <linux/pinctrl/consumer.h>
-#include "pinctrl-nomadik.h"
+
#include "../core.h"
#include "../pinctrl-utils.h"
+#include "pinctrl-nomadik.h"
+
/*
* The GPIO module in the Nomadik family of Systems-on-Chip is an
* AMBA device, managing 32 pins and alternate functions. The logic block
return (afunc ? NMK_GPIO_ALT_A : 0) | (bfunc ? NMK_GPIO_ALT_B : 0);
}
-#include <linux/seq_file.h>
-
static void nmk_gpio_dbg_show_one(struct seq_file *s,
struct pinctrl_dev *pctldev, struct gpio_chip *chip,
unsigned offset, unsigned gpio)
#ifndef PINCTRL_PINCTRL_NOMADIK_H
#define PINCTRL_PINCTRL_NOMADIK_H
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+#include <linux/pinctrl/pinctrl.h>
+
/* Package definitions */
#define PINCTRL_NMK_STN8815 0
#define PINCTRL_NMK_DB8500 1
RK_MUXROUTE_PMU(0, RK_PB5, 4, 0x0110, WRITE_MASK_VAL(3, 2, 1)), /* PWM1 IO mux M1 */
RK_MUXROUTE_PMU(0, RK_PC1, 1, 0x0110, WRITE_MASK_VAL(5, 4, 0)), /* PWM2 IO mux M0 */
RK_MUXROUTE_PMU(0, RK_PB6, 4, 0x0110, WRITE_MASK_VAL(5, 4, 1)), /* PWM2 IO mux M1 */
- RK_MUXROUTE_PMU(0, RK_PB3, 2, 0x0300, WRITE_MASK_VAL(0, 0, 0)), /* CAN0 IO mux M0 */
+ RK_MUXROUTE_GRF(0, RK_PB3, 2, 0x0300, WRITE_MASK_VAL(0, 0, 0)), /* CAN0 IO mux M0 */
RK_MUXROUTE_GRF(2, RK_PA1, 4, 0x0300, WRITE_MASK_VAL(0, 0, 1)), /* CAN0 IO mux M1 */
RK_MUXROUTE_GRF(1, RK_PA1, 3, 0x0300, WRITE_MASK_VAL(2, 2, 0)), /* CAN1 IO mux M0 */
RK_MUXROUTE_GRF(4, RK_PC3, 3, 0x0300, WRITE_MASK_VAL(2, 2, 1)), /* CAN1 IO mux M1 */
RK_MUXROUTE_GRF(4, RK_PB5, 3, 0x0300, WRITE_MASK_VAL(4, 4, 0)), /* CAN2 IO mux M0 */
RK_MUXROUTE_GRF(2, RK_PB2, 4, 0x0300, WRITE_MASK_VAL(4, 4, 1)), /* CAN2 IO mux M1 */
RK_MUXROUTE_GRF(4, RK_PC4, 1, 0x0300, WRITE_MASK_VAL(6, 6, 0)), /* HPDIN IO mux M0 */
- RK_MUXROUTE_PMU(0, RK_PC2, 2, 0x0300, WRITE_MASK_VAL(6, 6, 1)), /* HPDIN IO mux M1 */
+ RK_MUXROUTE_GRF(0, RK_PC2, 2, 0x0300, WRITE_MASK_VAL(6, 6, 1)), /* HPDIN IO mux M1 */
RK_MUXROUTE_GRF(3, RK_PB1, 3, 0x0300, WRITE_MASK_VAL(8, 8, 0)), /* GMAC1 IO mux M0 */
RK_MUXROUTE_GRF(4, RK_PA7, 3, 0x0300, WRITE_MASK_VAL(8, 8, 1)), /* GMAC1 IO mux M1 */
RK_MUXROUTE_GRF(4, RK_PD1, 1, 0x0300, WRITE_MASK_VAL(10, 10, 0)), /* HDMITX IO mux M0 */
- RK_MUXROUTE_PMU(0, RK_PC7, 1, 0x0300, WRITE_MASK_VAL(10, 10, 1)), /* HDMITX IO mux M1 */
- RK_MUXROUTE_PMU(0, RK_PB6, 1, 0x0300, WRITE_MASK_VAL(14, 14, 0)), /* I2C2 IO mux M0 */
+ RK_MUXROUTE_GRF(0, RK_PC7, 1, 0x0300, WRITE_MASK_VAL(10, 10, 1)), /* HDMITX IO mux M1 */
+ RK_MUXROUTE_GRF(0, RK_PB6, 1, 0x0300, WRITE_MASK_VAL(14, 14, 0)), /* I2C2 IO mux M0 */
RK_MUXROUTE_GRF(4, RK_PB4, 1, 0x0300, WRITE_MASK_VAL(14, 14, 1)), /* I2C2 IO mux M1 */
RK_MUXROUTE_GRF(1, RK_PA0, 1, 0x0304, WRITE_MASK_VAL(0, 0, 0)), /* I2C3 IO mux M0 */
RK_MUXROUTE_GRF(3, RK_PB6, 4, 0x0304, WRITE_MASK_VAL(0, 0, 1)), /* I2C3 IO mux M1 */
RK_MUXROUTE_GRF(4, RK_PC3, 1, 0x0308, WRITE_MASK_VAL(12, 12, 1)), /* PWM15 IO mux M1 */
RK_MUXROUTE_GRF(3, RK_PD2, 3, 0x0308, WRITE_MASK_VAL(14, 14, 0)), /* SDMMC2 IO mux M0 */
RK_MUXROUTE_GRF(3, RK_PA5, 5, 0x0308, WRITE_MASK_VAL(14, 14, 1)), /* SDMMC2 IO mux M1 */
- RK_MUXROUTE_PMU(0, RK_PB5, 2, 0x030c, WRITE_MASK_VAL(0, 0, 0)), /* SPI0 IO mux M0 */
+ RK_MUXROUTE_GRF(0, RK_PB5, 2, 0x030c, WRITE_MASK_VAL(0, 0, 0)), /* SPI0 IO mux M0 */
RK_MUXROUTE_GRF(2, RK_PD3, 3, 0x030c, WRITE_MASK_VAL(0, 0, 1)), /* SPI0 IO mux M1 */
RK_MUXROUTE_GRF(2, RK_PB5, 3, 0x030c, WRITE_MASK_VAL(2, 2, 0)), /* SPI1 IO mux M0 */
RK_MUXROUTE_GRF(3, RK_PC3, 3, 0x030c, WRITE_MASK_VAL(2, 2, 1)), /* SPI1 IO mux M1 */
RK_MUXROUTE_GRF(4, RK_PB3, 4, 0x030c, WRITE_MASK_VAL(6, 6, 0)), /* SPI3 IO mux M0 */
RK_MUXROUTE_GRF(4, RK_PC2, 2, 0x030c, WRITE_MASK_VAL(6, 6, 1)), /* SPI3 IO mux M1 */
RK_MUXROUTE_GRF(2, RK_PB4, 2, 0x030c, WRITE_MASK_VAL(8, 8, 0)), /* UART1 IO mux M0 */
- RK_MUXROUTE_PMU(0, RK_PD1, 1, 0x030c, WRITE_MASK_VAL(8, 8, 1)), /* UART1 IO mux M1 */
- RK_MUXROUTE_PMU(0, RK_PD1, 1, 0x030c, WRITE_MASK_VAL(10, 10, 0)), /* UART2 IO mux M0 */
+ RK_MUXROUTE_GRF(3, RK_PD6, 4, 0x030c, WRITE_MASK_VAL(8, 8, 1)), /* UART1 IO mux M1 */
+ RK_MUXROUTE_GRF(0, RK_PD1, 1, 0x030c, WRITE_MASK_VAL(10, 10, 0)), /* UART2 IO mux M0 */
RK_MUXROUTE_GRF(1, RK_PD5, 2, 0x030c, WRITE_MASK_VAL(10, 10, 1)), /* UART2 IO mux M1 */
RK_MUXROUTE_GRF(1, RK_PA1, 2, 0x030c, WRITE_MASK_VAL(12, 12, 0)), /* UART3 IO mux M0 */
RK_MUXROUTE_GRF(3, RK_PB7, 4, 0x030c, WRITE_MASK_VAL(12, 12, 1)), /* UART3 IO mux M1 */
RK_MUXROUTE_GRF(3, RK_PD6, 5, 0x0314, WRITE_MASK_VAL(1, 0, 1)), /* PDM IO mux M1 */
RK_MUXROUTE_GRF(4, RK_PA0, 4, 0x0314, WRITE_MASK_VAL(1, 0, 1)), /* PDM IO mux M1 */
RK_MUXROUTE_GRF(3, RK_PC4, 5, 0x0314, WRITE_MASK_VAL(1, 0, 2)), /* PDM IO mux M2 */
- RK_MUXROUTE_PMU(0, RK_PA5, 3, 0x0314, WRITE_MASK_VAL(3, 2, 0)), /* PCIE20 IO mux M0 */
+ RK_MUXROUTE_GRF(0, RK_PA5, 3, 0x0314, WRITE_MASK_VAL(3, 2, 0)), /* PCIE20 IO mux M0 */
RK_MUXROUTE_GRF(2, RK_PD0, 4, 0x0314, WRITE_MASK_VAL(3, 2, 1)), /* PCIE20 IO mux M1 */
RK_MUXROUTE_GRF(1, RK_PB0, 4, 0x0314, WRITE_MASK_VAL(3, 2, 2)), /* PCIE20 IO mux M2 */
- RK_MUXROUTE_PMU(0, RK_PA4, 3, 0x0314, WRITE_MASK_VAL(5, 4, 0)), /* PCIE30X1 IO mux M0 */
+ RK_MUXROUTE_GRF(0, RK_PA4, 3, 0x0314, WRITE_MASK_VAL(5, 4, 0)), /* PCIE30X1 IO mux M0 */
RK_MUXROUTE_GRF(2, RK_PD2, 4, 0x0314, WRITE_MASK_VAL(5, 4, 1)), /* PCIE30X1 IO mux M1 */
RK_MUXROUTE_GRF(1, RK_PA5, 4, 0x0314, WRITE_MASK_VAL(5, 4, 2)), /* PCIE30X1 IO mux M2 */
- RK_MUXROUTE_PMU(0, RK_PA6, 2, 0x0314, WRITE_MASK_VAL(7, 6, 0)), /* PCIE30X2 IO mux M0 */
+ RK_MUXROUTE_GRF(0, RK_PA6, 2, 0x0314, WRITE_MASK_VAL(7, 6, 0)), /* PCIE30X2 IO mux M0 */
RK_MUXROUTE_GRF(2, RK_PD4, 4, 0x0314, WRITE_MASK_VAL(7, 6, 1)), /* PCIE30X2 IO mux M1 */
RK_MUXROUTE_GRF(4, RK_PC2, 4, 0x0314, WRITE_MASK_VAL(7, 6, 2)), /* PCIE30X2 IO mux M2 */
};
case RK3308:
case RK3368:
case RK3399:
+ case RK3568:
case RK3588:
pull_type = bank->pull_type[pin_num / 8];
data >>= bit;
data &= (1 << RK3188_PULL_BITS_PER_PIN) - 1;
+ /*
+ * In the TRM, pull-up being 1 for everything except the GPIO0_D3-D6,
+ * where that pull up value becomes 3.
+ */
+ if (ctrl->type == RK3568 && bank->bank_num == 0 && pin_num >= 27 && pin_num <= 30) {
+ if (data == 3)
+ data = 1;
+ }
return rockchip_pull_list[pull_type][data];
default:
}
}
/*
- * In the TRM, pull-up being 1 for everything except the GPIO0_D0-D6,
+ * In the TRM, pull-up being 1 for everything except the GPIO0_D3-D6,
* where that pull up value becomes 3.
*/
if (ctrl->type == RK3568 && bank->bank_num == 0 && pin_num >= 27 && pin_num <= 30) {
return 0;
}
-#ifdef CONFIG_DEBUG_FS
static void sppctl_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
{
const char *label;
seq_puts(s, "\n");
}
}
-#endif
static int sppctl_gpio_new(struct platform_device *pdev, struct sppctl_pdata *pctl)
{
gchip->get = sppctl_gpio_get;
gchip->set = sppctl_gpio_set;
gchip->set_config = sppctl_gpio_set_config;
-#ifdef CONFIG_DEBUG_FS
- gchip->dbg_show = sppctl_gpio_dbg_show;
-#endif
+ gchip->dbg_show = IS_ENABLED(CONFIG_DEBUG_FS) ?
+ sppctl_gpio_dbg_show : NULL;
gchip->base = -1;
gchip->ngpio = sppctl_gpio_list_sz;
gchip->names = sppctl_gpio_list_s;
config RESET_TI_SCI
tristate "TI System Control Interface (TI-SCI) reset driver"
- depends on TI_SCI_PROTOCOL || COMPILE_TEST
+ depends on TI_SCI_PROTOCOL || (COMPILE_TEST && TI_SCI_PROTOCOL=n)
help
This enables the reset driver support over TI System Control Interface
available on some new TI's SoCs. If you wish to use reset resources
struct device *dev = &pdev->dev;
struct uniphier_glue_reset_priv *priv;
struct resource *res;
- resource_size_t size;
int i, ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
return -EINVAL;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- size = resource_size(res);
priv->rdata.membase = devm_ioremap_resource(dev, res);
if (IS_ERR(priv->rdata.membase))
return PTR_ERR(priv->rdata.membase);
spin_lock_init(&priv->rdata.lock);
priv->rdata.rcdev.owner = THIS_MODULE;
- priv->rdata.rcdev.nr_resets = size * BITS_PER_BYTE;
+ priv->rdata.rcdev.nr_resets = resource_size(res) * BITS_PER_BYTE;
priv->rdata.rcdev.ops = &reset_simple_ops;
priv->rdata.rcdev.of_node = dev->of_node;
priv->rdata.active_low = true;
*
* Returns true if and only if alua_rtpg_work() will be called asynchronously.
* That function is responsible for calling @qdata->fn().
+ *
+ * Context: may be called from atomic context (alua_check()) only if the caller
+ * holds an sdev reference.
*/
static bool alua_rtpg_queue(struct alua_port_group *pg,
struct scsi_device *sdev,
int start_queue = 0;
unsigned long flags;
- might_sleep();
-
if (WARN_ON_ONCE(!pg) || scsi_device_get(sdev))
return false;
{
struct Scsi_Host *sh;
- sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h));
+ sh = scsi_host_alloc(&hpsa_driver_template, sizeof(struct ctlr_info));
if (sh == NULL) {
dev_err(&h->pdev->dev, "scsi_host_alloc failed\n");
return -ENOMEM;
enum iscsi_host_param param, char *buf)
{
struct iscsi_sw_tcp_host *tcp_sw_host = iscsi_host_priv(shost);
- struct iscsi_session *session = tcp_sw_host->session;
+ struct iscsi_session *session;
struct iscsi_conn *conn;
struct iscsi_tcp_conn *tcp_conn;
struct iscsi_sw_tcp_conn *tcp_sw_conn;
switch (param) {
case ISCSI_HOST_PARAM_IPADDRESS:
+ session = tcp_sw_host->session;
if (!session)
return -ENOTCONN;
if (!cls_session)
goto remove_host;
session = cls_session->dd_data;
- tcp_sw_host = iscsi_host_priv(shost);
- tcp_sw_host->session = session;
if (iscsi_tcp_r2tpool_alloc(session))
goto remove_session;
+
+ /* We are now fully setup so expose the session to sysfs. */
+ tcp_sw_host = iscsi_host_priv(shost);
+ tcp_sw_host->session = session;
return cls_session;
remove_session:
if (WARN_ON_ONCE(session->leadconn))
return;
+ iscsi_session_remove(cls_session);
+ /*
+ * Our get_host_param needs to access the session, so remove the
+ * host from sysfs before freeing the session to make sure userspace
+ * is no longer accessing the callout.
+ */
+ iscsi_host_remove(shost, false);
+
iscsi_tcp_r2tpool_free(cls_session->dd_data);
- iscsi_session_teardown(cls_session);
- iscsi_host_remove(shost, false);
+ iscsi_session_free(cls_session);
iscsi_host_free(shost);
}
}
EXPORT_SYMBOL_GPL(iscsi_session_setup);
-/**
- * iscsi_session_teardown - destroy session, host, and cls_session
- * @cls_session: iscsi session
+/*
+ * issi_session_remove - Remove session from iSCSI class.
*/
-void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
+void iscsi_session_remove(struct iscsi_cls_session *cls_session)
{
struct iscsi_session *session = cls_session->dd_data;
- struct module *owner = cls_session->transport->owner;
struct Scsi_Host *shost = session->host;
iscsi_remove_session(cls_session);
+ /*
+ * host removal only has to wait for its children to be removed from
+ * sysfs, and iscsi_tcp needs to do iscsi_host_remove before freeing
+ * the session, so drop the session count here.
+ */
+ iscsi_host_dec_session_cnt(shost);
+}
+EXPORT_SYMBOL_GPL(iscsi_session_remove);
+
+/**
+ * iscsi_session_free - Free iscsi session and it's resources
+ * @cls_session: iscsi session
+ */
+void iscsi_session_free(struct iscsi_cls_session *cls_session)
+{
+ struct iscsi_session *session = cls_session->dd_data;
+ struct module *owner = cls_session->transport->owner;
iscsi_pool_free(&session->cmdpool);
kfree(session->password);
kfree(session->discovery_parent_type);
iscsi_free_session(cls_session);
-
- iscsi_host_dec_session_cnt(shost);
module_put(owner);
}
+EXPORT_SYMBOL_GPL(iscsi_session_free);
+
+/**
+ * iscsi_session_teardown - destroy session and cls_session
+ * @cls_session: iscsi session
+ */
+void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
+{
+ iscsi_session_remove(cls_session);
+ iscsi_session_free(cls_session);
+}
EXPORT_SYMBOL_GPL(iscsi_session_teardown);
/**
break;
case IMX8MP_HDMIBLK_PD_LCDIF:
regmap_set_bits(bc->regmap, HDMI_RTX_CLK_CTL0,
- BIT(7) | BIT(16) | BIT(17) | BIT(18) |
+ BIT(16) | BIT(17) | BIT(18) |
BIT(19) | BIT(20));
regmap_set_bits(bc->regmap, HDMI_RTX_CLK_CTL1, BIT(11));
regmap_set_bits(bc->regmap, HDMI_RTX_RESET_CTL0,
regmap_set_bits(bc->regmap, HDMI_TX_CONTROL0, BIT(1));
break;
case IMX8MP_HDMIBLK_PD_HDMI_TX_PHY:
+ regmap_set_bits(bc->regmap, HDMI_RTX_CLK_CTL0, BIT(7));
regmap_set_bits(bc->regmap, HDMI_RTX_CLK_CTL1, BIT(22) | BIT(24));
regmap_set_bits(bc->regmap, HDMI_RTX_RESET_CTL0, BIT(12));
regmap_clear_bits(bc->regmap, HDMI_TX_CONTROL0, BIT(3));
BIT(4) | BIT(5) | BIT(6));
regmap_clear_bits(bc->regmap, HDMI_RTX_CLK_CTL1, BIT(11));
regmap_clear_bits(bc->regmap, HDMI_RTX_CLK_CTL0,
- BIT(7) | BIT(16) | BIT(17) | BIT(18) |
+ BIT(16) | BIT(17) | BIT(18) |
BIT(19) | BIT(20));
break;
case IMX8MP_HDMIBLK_PD_PAI:
case IMX8MP_HDMIBLK_PD_HDMI_TX_PHY:
regmap_set_bits(bc->regmap, HDMI_TX_CONTROL0, BIT(3));
regmap_clear_bits(bc->regmap, HDMI_RTX_RESET_CTL0, BIT(12));
+ regmap_clear_bits(bc->regmap, HDMI_RTX_CLK_CTL0, BIT(7));
regmap_clear_bits(bc->regmap, HDMI_RTX_CLK_CTL1, BIT(22) | BIT(24));
break;
case IMX8MP_HDMIBLK_PD_HDCP:
ret = PTR_ERR(domain->power_dev);
goto cleanup_pds;
}
- dev_set_name(domain->power_dev, "%s", data->name);
domain->genpd.name = data->name;
domain->genpd.power_on = imx8mp_blk_ctrl_power_on;
ocotp_base = of_iomap(np, 0);
WARN_ON(!ocotp_base);
clk = of_clk_get_by_name(np, NULL);
- if (!clk) {
- WARN_ON(!clk);
+ if (IS_ERR(clk)) {
+ WARN_ON(IS_ERR(clk));
return 0;
}
goto out;
}
+ /* Protection domain is optional, it does not exist on older platforms */
ret = of_property_read_string_index(np, "qcom,protection-domain",
1, &adev->service_path);
- if (ret < 0) {
+ if (ret < 0 && ret != -EINVAL) {
dev_err(dev, "Failed to read second value of qcom,protection-domain\n");
goto out;
}
ret = of_genpd_add_provider_simple(dev->of_node, &drv->pd);
if (ret)
- return ret;
+ goto err_remove_genpd;
platform_set_drvdata(pdev, drv);
cpr_debugfs_init(drv);
return 0;
+
+err_remove_genpd:
+ pm_genpd_remove(&drv->pd);
+ return ret;
}
static int cpr_remove(struct platform_device *pdev)
struct vchiq_instance;
-extern enum vchiq_status vchiq_initialise(struct vchiq_instance **pinstance);
+extern int vchiq_initialise(struct vchiq_instance **pinstance);
extern enum vchiq_status vchiq_shutdown(struct vchiq_instance *instance);
extern enum vchiq_status vchiq_connect(struct vchiq_instance *instance);
extern enum vchiq_status vchiq_open_service(struct vchiq_instance *instance,
extern void
vchiq_dump_service_use_state(struct vchiq_state *state);
-extern enum vchiq_status
+extern int
vchiq_use_internal(struct vchiq_state *state, struct vchiq_service *service,
enum USE_TYPE_E use_type);
-extern enum vchiq_status
+extern int
vchiq_release_internal(struct vchiq_state *state,
struct vchiq_service *service);
{
struct se_session *sess = se_cmd->se_sess;
- assert_spin_locked(&sess->sess_cmd_lock);
- WARN_ON_ONCE(!irqs_disabled());
+ lockdep_assert_held(&sess->sess_cmd_lock);
+
/*
* If command already reached CMD_T_COMPLETE state within
* target_complete_cmd() or CMD_T_FABRIC_STOP due to shutdown,
cdev->devdata = devdata;
ret = cdev->ops->get_max_state(cdev, &cdev->max_state);
- if (ret)
- goto out_kfree_type;
+ if (ret) {
+ kfree(cdev->type);
+ goto out_ida_remove;
+ }
thermal_cooling_device_setup_sysfs(cdev);
+
ret = dev_set_name(&cdev->device, "cooling_device%d", cdev->id);
if (ret) {
+ kfree(cdev->type);
thermal_cooling_device_destroy_sysfs(cdev);
- goto out_kfree_type;
+ goto out_ida_remove;
}
+
ret = device_register(&cdev->device);
if (ret)
goto out_kfree_type;
thermal_cooling_device_destroy_sysfs(cdev);
kfree(cdev->type);
put_device(&cdev->device);
+
+ /* thermal_release() takes care of the rest */
cdev = NULL;
out_ida_remove:
ida_free(&thermal_cdev_ida, id);
{
u32 status[TB_MAX_RETIMER_INDEX + 1] = {};
int ret, i, last_idx = 0;
- struct usb4_port *usb4;
-
- usb4 = port->usb4;
- if (!usb4)
- return 0;
-
- pm_runtime_get_sync(&usb4->dev);
/*
* Send broadcast RT to make sure retimer indices facing this
*/
ret = usb4_port_enumerate_retimers(port);
if (ret)
- goto out;
+ return ret;
/*
* Enable sideband channel for each retimer. We can do this
break;
}
- if (!last_idx) {
- ret = 0;
- goto out;
- }
+ if (!last_idx)
+ return 0;
/* Add on-board retimers if they do not exist already */
+ ret = 0;
for (i = 1; i <= last_idx; i++) {
struct tb_retimer *rt;
}
}
-out:
- pm_runtime_mark_last_busy(&usb4->dev);
- pm_runtime_put_autosuspend(&usb4->dev);
-
return ret;
}
* Downstream switch is reachable through two ports.
* Only scan on the primary port (link_nr == 0).
*/
+
+ if (port->usb4)
+ pm_runtime_get_sync(&port->usb4->dev);
+
if (tb_wait_for_port(port, false) <= 0)
- return;
+ goto out_rpm_put;
if (port->remote) {
tb_port_dbg(port, "port already has a remote\n");
- return;
+ goto out_rpm_put;
}
tb_retimer_scan(port, true);
*/
if (PTR_ERR(sw) == -EIO || PTR_ERR(sw) == -EADDRNOTAVAIL)
tb_scan_xdomain(port);
- return;
+ goto out_rpm_put;
}
if (tb_switch_configure(sw)) {
tb_switch_put(sw);
- return;
+ goto out_rpm_put;
}
/*
if (tb_switch_add(sw)) {
tb_switch_put(sw);
- return;
+ goto out_rpm_put;
}
/* Link the switches using both links if available */
tb_add_dp_resources(sw);
tb_scan_switch(sw);
+
+out_rpm_put:
+ if (port->usb4) {
+ pm_runtime_mark_last_busy(&port->usb4->dev);
+ pm_runtime_put_autosuspend(&port->usb4->dev);
+ }
}
static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel)
return;
} else if (!ret) {
/* Use maximum link rate if the link valid is not set */
- ret = usb4_usb3_port_max_link_rate(tunnel->src_port);
+ ret = tb_usb3_max_link_rate(tunnel->dst_port, tunnel->src_port);
if (ret < 0) {
tb_tunnel_warn(tunnel, "failed to read maximum link rate\n");
return;
* registered, we notify the userspace that it has changed.
*/
if (!update) {
- struct tb_port *port;
+ /*
+ * Now disable lane 1 if bonding was not enabled. Do
+ * this only if bonding was possible at the beginning
+ * (that is we are the connection manager and there are
+ * two lanes).
+ */
+ if (xd->bonding_possible) {
+ struct tb_port *port;
- /* Now disable lane 1 if bonding was not enabled */
- port = tb_port_at(xd->route, tb_xdomain_parent(xd));
- if (!port->bonded)
- tb_port_disable(port->dual_link_port);
+ port = tb_port_at(xd->route, tb_xdomain_parent(xd));
+ if (!port->bonded)
+ tb_port_disable(port->dual_link_port);
+ }
if (device_add(&xd->dev)) {
dev_err(&xd->dev, "failed to add XDomain device\n");
#define PCI_DEVICE_ID_EXAR_XR17V4358 0x4358
#define PCI_DEVICE_ID_EXAR_XR17V8358 0x8358
+#define PCI_DEVICE_ID_SEALEVEL_710xC 0x1001
+#define PCI_DEVICE_ID_SEALEVEL_720xC 0x1002
+#define PCI_DEVICE_ID_SEALEVEL_740xC 0x1004
+#define PCI_DEVICE_ID_SEALEVEL_780xC 0x1008
+#define PCI_DEVICE_ID_SEALEVEL_716xC 0x1010
+
#define UART_EXAR_INT0 0x80
#define UART_EXAR_8XMODE 0x88 /* 8X sampling rate select */
#define UART_EXAR_SLEEP 0x8b /* Sleep mode */
nr_ports = BIT(((pcidev->device & 0x38) >> 3) - 1);
else if (board->num_ports)
nr_ports = board->num_ports;
+ else if (pcidev->vendor == PCI_VENDOR_ID_SEALEVEL)
+ nr_ports = pcidev->device & 0xff;
else
nr_ports = pcidev->device & 0x0f;
EXAR_DEVICE(COMMTECH, 4224PCI335, pbn_fastcom335_4),
EXAR_DEVICE(COMMTECH, 2324PCI335, pbn_fastcom335_4),
EXAR_DEVICE(COMMTECH, 2328PCI335, pbn_fastcom335_8),
+
+ EXAR_DEVICE(SEALEVEL, 710xC, pbn_exar_XR17V35x),
+ EXAR_DEVICE(SEALEVEL, 720xC, pbn_exar_XR17V35x),
+ EXAR_DEVICE(SEALEVEL, 740xC, pbn_exar_XR17V35x),
+ EXAR_DEVICE(SEALEVEL, 780xC, pbn_exar_XR17V35x),
+ EXAR_DEVICE(SEALEVEL, 716xC, pbn_exar_XR17V35x),
{ 0, }
};
MODULE_DEVICE_TABLE(pci, exar_pci_tbl);
struct circ_buf *xmit = &uap->port.state->xmit;
int count = uap->fifosize >> 1;
+ if ((uap->port.rs485.flags & SER_RS485_ENABLED) &&
+ !uap->rs485_tx_started)
+ pl011_rs485_tx_start(uap);
+
if (uap->port.x_char) {
if (!pl011_tx_char(uap, uap->port.x_char, from_irq))
return true;
return false;
}
- if ((uap->port.rs485.flags & SER_RS485_ENABLED) &&
- !uap->rs485_tx_started)
- pl011_rs485_tx_start(uap);
-
/* If we are using DMA mode, try to send some characters. */
if (pl011_dma_tx_irq(uap))
return true;
else if (mr == ATMEL_US_PAR_ODD)
*parity = 'o';
- /*
- * The serial core only rounds down when matching this to a
- * supported baud rate. Make sure we don't end up slightly
- * lower than one of those, as it would make us fall through
- * to a much lower baud rate than we really want.
- */
- *baud = port->uartclk / (16 * (quot - 1));
+ *baud = port->uartclk / (16 * quot);
}
static int __init atmel_console_setup(struct console *co, char *options)
uart_xmit_advance(port, sg_dma_len(sg));
async_tx_ack(priv->desc_tx);
- dma_unmap_sg(port->dev, sg, priv->orig_nent, DMA_TO_DEVICE);
+ dma_unmap_sg(port->dev, priv->sg_tx_p, priv->orig_nent, DMA_TO_DEVICE);
priv->tx_dma_use = 0;
priv->nent = 0;
priv->orig_nent = 0;
return IRQ_HANDLED;
}
-static void get_tx_fifo_size(struct qcom_geni_serial_port *port)
+static int setup_fifos(struct qcom_geni_serial_port *port)
{
struct uart_port *uport;
+ u32 old_rx_fifo_depth = port->rx_fifo_depth;
uport = &port->uport;
port->tx_fifo_depth = geni_se_get_tx_fifo_depth(&port->se);
port->rx_fifo_depth = geni_se_get_rx_fifo_depth(&port->se);
uport->fifosize =
(port->tx_fifo_depth * port->tx_fifo_width) / BITS_PER_BYTE;
+
+ if (port->rx_fifo && (old_rx_fifo_depth != port->rx_fifo_depth) && port->rx_fifo_depth) {
+ port->rx_fifo = devm_krealloc(uport->dev, port->rx_fifo,
+ port->rx_fifo_depth * sizeof(u32),
+ GFP_KERNEL);
+ if (!port->rx_fifo)
+ return -ENOMEM;
+ }
+
+ return 0;
}
u32 rxstale = DEFAULT_BITS_PER_CHAR * STALE_TIMEOUT;
u32 proto;
u32 pin_swap;
+ int ret;
proto = geni_se_read_proto(&port->se);
if (proto != GENI_SE_UART) {
qcom_geni_serial_stop_rx(uport);
- get_tx_fifo_size(port);
+ ret = setup_fifos(port);
+ if (ret)
+ return ret;
writel(rxstale, uport->membase + SE_UART_RX_STALE_CNT);
return 0;
}
-static int __maybe_unused qcom_geni_serial_sys_suspend(struct device *dev)
+static int qcom_geni_serial_sys_suspend(struct device *dev)
{
struct qcom_geni_serial_port *port = dev_get_drvdata(dev);
struct uart_port *uport = &port->uport;
return uart_suspend_port(private_data->drv, uport);
}
-static int __maybe_unused qcom_geni_serial_sys_resume(struct device *dev)
+static int qcom_geni_serial_sys_resume(struct device *dev)
{
int ret;
struct qcom_geni_serial_port *port = dev_get_drvdata(dev);
}
static const struct dev_pm_ops qcom_geni_serial_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(qcom_geni_serial_sys_suspend,
- qcom_geni_serial_sys_resume)
- .restore = qcom_geni_serial_sys_hib_resume,
- .thaw = qcom_geni_serial_sys_hib_resume,
+ .suspend = pm_sleep_ptr(qcom_geni_serial_sys_suspend),
+ .resume = pm_sleep_ptr(qcom_geni_serial_sys_resume),
+ .freeze = pm_sleep_ptr(qcom_geni_serial_sys_suspend),
+ .poweroff = pm_sleep_ptr(qcom_geni_serial_sys_suspend),
+ .restore = pm_sleep_ptr(qcom_geni_serial_sys_hib_resume),
+ .thaw = pm_sleep_ptr(qcom_geni_serial_sys_hib_resume),
};
static const struct of_device_id qcom_geni_serial_match_table[] = {
* clock scaling is in progress
*/
ufshcd_scsi_block_requests(hba);
+ mutex_lock(&hba->wb_mutex);
down_write(&hba->clk_scaling_lock);
if (!hba->clk_scaling.is_allowed ||
ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
ret = -EBUSY;
up_write(&hba->clk_scaling_lock);
+ mutex_unlock(&hba->wb_mutex);
ufshcd_scsi_unblock_requests(hba);
goto out;
}
return ret;
}
-static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, bool writelock)
+static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, int err, bool scale_up)
{
- if (writelock)
- up_write(&hba->clk_scaling_lock);
- else
- up_read(&hba->clk_scaling_lock);
+ up_write(&hba->clk_scaling_lock);
+
+ /* Enable Write Booster if we have scaled up else disable it */
+ if (ufshcd_enable_wb_if_scaling_up(hba) && !err)
+ ufshcd_wb_toggle(hba, scale_up);
+
+ mutex_unlock(&hba->wb_mutex);
+
ufshcd_scsi_unblock_requests(hba);
ufshcd_release(hba);
}
static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
{
int ret = 0;
- bool is_writelock = true;
ret = ufshcd_clock_scaling_prepare(hba);
if (ret)
}
}
- /* Enable Write Booster if we have scaled up else disable it */
- if (ufshcd_enable_wb_if_scaling_up(hba)) {
- downgrade_write(&hba->clk_scaling_lock);
- is_writelock = false;
- ufshcd_wb_toggle(hba, scale_up);
- }
-
out_unprepare:
- ufshcd_clock_scaling_unprepare(hba, is_writelock);
+ ufshcd_clock_scaling_unprepare(hba, ret, scale_up);
return ret;
}
static void ufshcd_clk_scaling_allow(struct ufs_hba *hba, bool allow)
{
+ mutex_lock(&hba->wb_mutex);
down_write(&hba->clk_scaling_lock);
hba->clk_scaling.is_allowed = allow;
up_write(&hba->clk_scaling_lock);
+ mutex_unlock(&hba->wb_mutex);
}
static void ufshcd_clk_scaling_suspend(struct ufs_hba *hba, bool suspend)
/* Initialize mutex for exception event control */
mutex_init(&hba->ee_ctrl_mutex);
+ mutex_init(&hba->wb_mutex);
init_rwsem(&hba->clk_scaling_lock);
ufshcd_init_clk_gating(hba);
u8 req_on_hw_ring = 0;
unsigned long flags;
int ret = 0;
+ int val;
if (!ep || !request || !ep->desc)
return -EINVAL;
/* Update ring only if removed request is on pending_req_list list */
if (req_on_hw_ring && link_trb) {
+ /* Stop DMA */
+ writel(EP_CMD_DFLUSH, &priv_dev->regs->ep_cmd);
+
+ /* wait for DFLUSH cleared */
+ readl_poll_timeout_atomic(&priv_dev->regs->ep_cmd, val,
+ !(val & EP_CMD_DFLUSH), 1, 1000);
+
link_trb->buffer = cpu_to_le32(TRB_BUFFER(priv_ep->trb_pool_dma +
((priv_req->end_trb + 1) * TRB_SIZE)));
link_trb->control = cpu_to_le32((le32_to_cpu(link_trb->control) & TRB_CYCLE) |
cdns3_gadget_giveback(priv_ep, priv_req, -ECONNRESET);
+ req = cdns3_next_request(&priv_ep->pending_req_list);
+ if (req)
+ cdns3_rearm_transfer(priv_ep, 1);
+
not_found:
spin_unlock_irqrestore(&priv_dev->lock, flags);
return ret;
cable_id = &ci->platdata->id_extcon;
cable_vbus = &ci->platdata->vbus_extcon;
- if ((!IS_ERR(cable_id->edev) || !IS_ERR(ci->role_switch))
+ if ((!IS_ERR(cable_id->edev) || ci->role_switch)
&& ci->is_otg &&
(otgsc & OTGSC_IDIE) && (otgsc & OTGSC_IDIS))
ci_irq(ci);
- if ((!IS_ERR(cable_vbus->edev) || !IS_ERR(ci->role_switch))
+ if ((!IS_ERR(cable_vbus->edev) || ci->role_switch)
&& ci->is_otg &&
(otgsc & OTGSC_BSVIE) && (otgsc & OTGSC_BSVIS))
ci_irq(ci);
#define USB_PRODUCT_USB5534B 0x5534
#define USB_VENDOR_CYPRESS 0x04b4
#define USB_PRODUCT_CY7C65632 0x6570
+#define USB_VENDOR_TEXAS_INSTRUMENTS 0x0451
+#define USB_PRODUCT_TUSB8041_USB3 0x8140
+#define USB_PRODUCT_TUSB8041_USB2 0x8142
#define HUB_QUIRK_CHECK_PORT_AUTOSUSPEND 0x01
#define HUB_QUIRK_DISABLE_AUTOSUSPEND 0x02
.idVendor = USB_VENDOR_GENESYS_LOGIC,
.bInterfaceClass = USB_CLASS_HUB,
.driver_info = HUB_QUIRK_CHECK_PORT_AUTOSUSPEND},
+ { .match_flags = USB_DEVICE_ID_MATCH_VENDOR
+ | USB_DEVICE_ID_MATCH_PRODUCT,
+ .idVendor = USB_VENDOR_TEXAS_INSTRUMENTS,
+ .idProduct = USB_PRODUCT_TUSB8041_USB2,
+ .driver_info = HUB_QUIRK_DISABLE_AUTOSUSPEND},
+ { .match_flags = USB_DEVICE_ID_MATCH_VENDOR
+ | USB_DEVICE_ID_MATCH_PRODUCT,
+ .idVendor = USB_VENDOR_TEXAS_INSTRUMENTS,
+ .idProduct = USB_PRODUCT_TUSB8041_USB3,
+ .driver_info = HUB_QUIRK_DISABLE_AUTOSUSPEND},
{ .match_flags = USB_DEVICE_ID_MATCH_DEV_CLASS,
.bDeviceClass = USB_CLASS_HUB},
{ .match_flags = USB_DEVICE_ID_MATCH_INT_CLASS,
}
EXPORT_SYMBOL_GPL(usb_acpi_power_manageable);
+#define UUID_USB_CONTROLLER_DSM "ce2ee385-00e6-48cb-9f05-2edb927c4899"
+#define USB_DSM_DISABLE_U1_U2_FOR_PORT 5
+
+/**
+ * usb_acpi_port_lpm_incapable - check if lpm should be disabled for a port.
+ * @hdev: USB device belonging to the usb hub
+ * @index: zero based port index
+ *
+ * Some USB3 ports may not support USB3 link power management U1/U2 states
+ * due to different retimer setup. ACPI provides _DSM method which returns 0x01
+ * if U1 and U2 states should be disabled. Evaluate _DSM with:
+ * Arg0: UUID = ce2ee385-00e6-48cb-9f05-2edb927c4899
+ * Arg1: Revision ID = 0
+ * Arg2: Function Index = 5
+ * Arg3: (empty)
+ *
+ * Return 1 if USB3 port is LPM incapable, negative on error, otherwise 0
+ */
+
+int usb_acpi_port_lpm_incapable(struct usb_device *hdev, int index)
+{
+ union acpi_object *obj;
+ acpi_handle port_handle;
+ int port1 = index + 1;
+ guid_t guid;
+ int ret;
+
+ ret = guid_parse(UUID_USB_CONTROLLER_DSM, &guid);
+ if (ret)
+ return ret;
+
+ port_handle = usb_get_hub_port_acpi_handle(hdev, port1);
+ if (!port_handle) {
+ dev_dbg(&hdev->dev, "port-%d no acpi handle\n", port1);
+ return -ENODEV;
+ }
+
+ if (!acpi_check_dsm(port_handle, &guid, 0,
+ BIT(USB_DSM_DISABLE_U1_U2_FOR_PORT))) {
+ dev_dbg(&hdev->dev, "port-%d no _DSM function %d\n",
+ port1, USB_DSM_DISABLE_U1_U2_FOR_PORT);
+ return -ENODEV;
+ }
+
+ obj = acpi_evaluate_dsm(port_handle, &guid, 0,
+ USB_DSM_DISABLE_U1_U2_FOR_PORT, NULL);
+
+ if (!obj)
+ return -ENODEV;
+
+ if (obj->type != ACPI_TYPE_INTEGER) {
+ dev_dbg(&hdev->dev, "evaluate port-%d _DSM failed\n", port1);
+ ACPI_FREE(obj);
+ return -EINVAL;
+ }
+
+ if (obj->integer.value == 0x01)
+ ret = 1;
+
+ ACPI_FREE(obj);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(usb_acpi_port_lpm_incapable);
+
/**
* usb_acpi_set_power_state - control usb port's power via acpi power
* resource
config USB_DWC3
tristate "DesignWare USB3 DRD Core Support"
depends on (USB || USB_GADGET) && HAS_DMA
+ depends on (EXTCON || EXTCON=n)
select USB_XHCI_PLATFORM if USB_XHCI_HCD
select USB_ROLE_SWITCH if USB_DWC3_DUAL_ROLE
help
config USB_DWC3_DUAL_ROLE
bool "Dual Role mode"
depends on ((USB=y || USB=USB_DWC3) && (USB_GADGET=y || USB_GADGET=USB_DWC3))
- depends on (EXTCON=y || EXTCON=USB_DWC3)
help
This is the default mode of working of DWC3 controller where
both host and gadget features are enabled.
WARN_ON(!list_empty(&gi->string_list));
WARN_ON(!list_empty(&gi->available_func));
kfree(gi->composite.gadget_driver.function);
+ kfree(gi->composite.gadget_driver.driver.name);
kfree(gi);
}
.max_speed = USB_SPEED_SUPER_PLUS,
.driver = {
.owner = THIS_MODULE,
- .name = "configfs-gadget",
},
.match_existing_only = 1,
};
gi->composite.gadget_driver = configfs_driver_template;
+ gi->composite.gadget_driver.driver.name = kasprintf(GFP_KERNEL,
+ "configfs-gadget.%s", name);
+ if (!gi->composite.gadget_driver.driver.name)
+ goto err;
+
gi->composite.gadget_driver.function = kstrdup(name, GFP_KERNEL);
gi->composite.name = gi->composite.gadget_driver.function;
if (!gi->composite.gadget_driver.function)
- goto err;
+ goto out_free_driver_name;
return &gi->group;
+
+out_free_driver_name:
+ kfree(gi->composite.gadget_driver.driver.name);
err:
kfree(gi);
return ERR_PTR(-ENOMEM);
struct usb_request *req = ffs->ep0req;
int ret;
+ if (!req)
+ return -EINVAL;
+
req->zero = len < le16_to_cpu(ffs->ev.setup.wLength);
spin_unlock_irq(&ffs->ev.waitq.lock);
ENTER();
if (!WARN_ON(!ffs->gadget)) {
+ /* dequeue before freeing ep0req */
+ usb_ep_dequeue(ffs->gadget->ep0, ffs->ep0req);
+ mutex_lock(&ffs->mutex);
usb_ep_free_request(ffs->gadget->ep0, ffs->ep0req);
ffs->ep0req = NULL;
ffs->gadget = NULL;
clear_bit(FFS_FL_BOUND, &ffs->flags);
+ mutex_unlock(&ffs->mutex);
ffs_data_put(ffs);
}
}
/* peak (theoretical) bulk transfer rate in bits-per-second */
static inline unsigned ncm_bitrate(struct usb_gadget *g)
{
- if (gadget_is_superspeed(g) && g->speed >= USB_SPEED_SUPER_PLUS)
+ if (!g)
+ return 0;
+ else if (gadget_is_superspeed(g) && g->speed >= USB_SPEED_SUPER_PLUS)
return 4250000000U;
else if (gadget_is_superspeed(g) && g->speed == USB_SPEED_SUPER)
return 3750000000U;
*/
static const char *CHIP;
+static DEFINE_MUTEX(sb_mutex); /* Serialize superblock operations */
/*----------------------------------------------------------------------*/
{
struct inode *inode;
struct dev_data *dev;
+ int rc;
- if (the_device)
- return -ESRCH;
+ mutex_lock(&sb_mutex);
+
+ if (the_device) {
+ rc = -ESRCH;
+ goto Done;
+ }
CHIP = usb_get_gadget_udc_name();
- if (!CHIP)
- return -ENODEV;
+ if (!CHIP) {
+ rc = -ENODEV;
+ goto Done;
+ }
/* superblock */
sb->s_blocksize = PAGE_SIZE;
* from binding to a controller.
*/
the_device = dev;
- return 0;
+ rc = 0;
+ goto Done;
-Enomem:
+ Enomem:
kfree(CHIP);
CHIP = NULL;
+ rc = -ENOMEM;
- return -ENOMEM;
+ Done:
+ mutex_unlock(&sb_mutex);
+ return rc;
}
/* "mount -t gadgetfs path /dev/gadget" ends up here */
static void
gadgetfs_kill_sb (struct super_block *sb)
{
+ mutex_lock(&sb_mutex);
kill_litter_super (sb);
if (the_device) {
put_dev (the_device);
}
kfree(CHIP);
CHIP = NULL;
+ mutex_unlock(&sb_mutex);
}
/*----------------------------------------------------------------------*/
(const struct uvc_descriptor_header *) &uvc_format_yuv,
(const struct uvc_descriptor_header *) &uvc_frame_yuv_360p,
(const struct uvc_descriptor_header *) &uvc_frame_yuv_720p,
+ (const struct uvc_descriptor_header *) &uvc_color_matching,
(const struct uvc_descriptor_header *) &uvc_format_mjpg,
(const struct uvc_descriptor_header *) &uvc_frame_mjpg_360p,
(const struct uvc_descriptor_header *) &uvc_frame_mjpg_720p,
(const struct uvc_descriptor_header *) &uvc_format_yuv,
(const struct uvc_descriptor_header *) &uvc_frame_yuv_360p,
(const struct uvc_descriptor_header *) &uvc_frame_yuv_720p,
+ (const struct uvc_descriptor_header *) &uvc_color_matching,
(const struct uvc_descriptor_header *) &uvc_format_mjpg,
(const struct uvc_descriptor_header *) &uvc_frame_mjpg_360p,
(const struct uvc_descriptor_header *) &uvc_frame_mjpg_720p,
(const struct uvc_descriptor_header *) &uvc_format_yuv,
(const struct uvc_descriptor_header *) &uvc_frame_yuv_360p,
(const struct uvc_descriptor_header *) &uvc_frame_yuv_720p,
+ (const struct uvc_descriptor_header *) &uvc_color_matching,
(const struct uvc_descriptor_header *) &uvc_format_mjpg,
(const struct uvc_descriptor_header *) &uvc_frame_mjpg_360p,
(const struct uvc_descriptor_header *) &uvc_frame_mjpg_720p,
#include "ehci-fsl.h"
#define DRIVER_DESC "Freescale EHCI Host controller driver"
-#define DRV_NAME "ehci-fsl"
+#define DRV_NAME "fsl-ehci"
static struct hc_driver __read_mostly fsl_ehci_hc_driver;
static struct hc_driver __read_mostly xhci_pci_hc_driver;
static int xhci_pci_setup(struct usb_hcd *hcd);
+static int xhci_pci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
+ struct usb_tt *tt, gfp_t mem_flags);
static const struct xhci_driver_overrides xhci_pci_overrides __initconst = {
.reset = xhci_pci_setup,
+ .update_hub_device = xhci_pci_update_hub_device,
};
/* called after powerup, by probe or system-pm "wakeup" */
NULL);
ACPI_FREE(obj);
}
+
+static void xhci_find_lpm_incapable_ports(struct usb_hcd *hcd, struct usb_device *hdev)
+{
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ struct xhci_hub *rhub = &xhci->usb3_rhub;
+ int ret;
+ int i;
+
+ /* This is not the usb3 roothub we are looking for */
+ if (hcd != rhub->hcd)
+ return;
+
+ if (hdev->maxchild > rhub->num_ports) {
+ dev_err(&hdev->dev, "USB3 roothub port number mismatch\n");
+ return;
+ }
+
+ for (i = 0; i < hdev->maxchild; i++) {
+ ret = usb_acpi_port_lpm_incapable(hdev, i);
+
+ dev_dbg(&hdev->dev, "port-%d disable U1/U2 _DSM: %d\n", i + 1, ret);
+
+ if (ret >= 0) {
+ rhub->ports[i]->lpm_incapable = ret;
+ continue;
+ }
+ }
+}
+
#else
static void xhci_pme_acpi_rtd3_enable(struct pci_dev *dev) { }
+static void xhci_find_lpm_incapable_ports(struct usb_hcd *hcd, struct usb_device *hdev) { }
#endif /* CONFIG_ACPI */
/* called during probe() after chip reset completes */
return xhci_pci_reinit(xhci, pdev);
}
+static int xhci_pci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
+ struct usb_tt *tt, gfp_t mem_flags)
+{
+ /* Check if acpi claims some USB3 roothub ports are lpm incapable */
+ if (!hdev->parent)
+ xhci_find_lpm_incapable_ports(hcd, hdev);
+
+ return xhci_update_hub_device(hcd, hdev, tt, mem_flags);
+}
+
/*
* We need to register our own PCI probe function (instead of the USB core's
* function) in order to create a second roothub under xHCI.
if (xhci->quirks & XHCI_DEFAULT_PM_RUNTIME_ALLOW)
pm_runtime_allow(&dev->dev);
+ dma_set_max_seg_size(&dev->dev, UINT_MAX);
+
return 0;
put_usb3_hcd:
struct xhci_virt_ep *ep;
struct xhci_ring *ring;
- ep = &xhci->devs[slot_id]->eps[ep_index];
+ ep = xhci_get_virt_ep(xhci, slot_id, ep_index);
+ if (!ep)
+ return;
+
if ((ep->ep_state & EP_HAS_STREAMS) ||
(ep->ep_state & EP_GETTING_NO_STREAMS)) {
int stream_id;
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
struct xhci_virt_device *virt_dev;
struct xhci_slot_ctx *slot_ctx;
+ unsigned long flags;
int i, ret;
/*
virt_dev->eps[i].ep_state &= ~EP_STOP_CMD_PENDING;
virt_dev->udev = NULL;
xhci_disable_slot(xhci, udev->slot_id);
+
+ spin_lock_irqsave(&xhci->lock, flags);
xhci_free_virt_device(xhci, udev->slot_id);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+
}
int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id)
struct usb_device *udev, enum usb3_link_state state)
{
struct xhci_hcd *xhci;
+ struct xhci_port *port;
u16 hub_encoded_timeout;
int mel;
int ret;
if (xhci_check_tier_policy(xhci, udev, state) < 0)
return USB3_LPM_DISABLED;
+ /* If connected to root port then check port can handle lpm */
+ if (udev->parent && !udev->parent->parent) {
+ port = xhci->usb3_rhub.ports[udev->portnum - 1];
+ if (port->lpm_incapable)
+ return USB3_LPM_DISABLED;
+ }
+
hub_encoded_timeout = xhci_calculate_lpm_timeout(hcd, udev, state);
mel = calculate_max_exit_latency(udev, state, hub_encoded_timeout);
if (mel < 0) {
/* Once a hub descriptor is fetched for a device, we need to update the xHC's
* internal data structures for the device.
*/
-static int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
+int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
struct usb_tt *tt, gfp_t mem_flags)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
xhci_free_command(xhci, config_cmd);
return ret;
}
+EXPORT_SYMBOL_GPL(xhci_update_hub_device);
static int xhci_get_frame(struct usb_hcd *hcd)
{
drv->check_bandwidth = over->check_bandwidth;
if (over->reset_bandwidth)
drv->reset_bandwidth = over->reset_bandwidth;
+ if (over->update_hub_device)
+ drv->update_hub_device = over->update_hub_device;
}
}
EXPORT_SYMBOL_GPL(xhci_init_driver);
int hcd_portnum;
struct xhci_hub *rhub;
struct xhci_port_cap *port_cap;
+ unsigned int lpm_incapable:1;
};
struct xhci_hub {
struct usb_host_endpoint *ep);
int (*check_bandwidth)(struct usb_hcd *, struct usb_device *);
void (*reset_bandwidth)(struct usb_hcd *, struct usb_device *);
+ int (*update_hub_device)(struct usb_hcd *hcd, struct usb_device *hdev,
+ struct usb_tt *tt, gfp_t mem_flags);
};
#define XHCI_CFC_DELAY 10
struct usb_host_endpoint *ep);
int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
+int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
+ struct usb_tt *tt, gfp_t mem_flags);
int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id);
int xhci_ext_cap_init(struct xhci_hcd *xhci);
break;
case USB_DEVICE_ID_CODEMERCS_IOW100:
- dev->report_size = 13;
+ dev->report_size = 12;
break;
}
}
#include "onboard_usb_hub.h"
+static void onboard_hub_attach_usb_driver(struct work_struct *work);
+
static struct usb_device_driver onboard_hub_usbdev_driver;
+static DECLARE_WORK(attach_usb_driver_work, onboard_hub_attach_usb_driver);
/************************** Platform driver **************************/
bool is_powered_on;
bool going_away;
struct list_head udev_list;
- struct work_struct attach_usb_driver_work;
struct mutex lock;
};
* This needs to be done deferred to avoid self-deadlocks on systems
* with nested onboard hubs.
*/
- INIT_WORK(&hub->attach_usb_driver_work, onboard_hub_attach_usb_driver);
- schedule_work(&hub->attach_usb_driver_work);
+ schedule_work(&attach_usb_driver_work);
return 0;
}
hub->going_away = true;
- if (&hub->attach_usb_driver_work != current_work())
- cancel_work_sync(&hub->attach_usb_driver_work);
-
mutex_lock(&hub->lock);
/* unbind the USB devices to avoid dangling references to this device */
{
int ret;
- ret = platform_driver_register(&onboard_hub_driver);
+ ret = usb_register_device_driver(&onboard_hub_usbdev_driver, THIS_MODULE);
if (ret)
return ret;
- ret = usb_register_device_driver(&onboard_hub_usbdev_driver, THIS_MODULE);
+ ret = platform_driver_register(&onboard_hub_driver);
if (ret)
- platform_driver_unregister(&onboard_hub_driver);
+ usb_deregister_device_driver(&onboard_hub_usbdev_driver);
return ret;
}
{
usb_deregister_device_driver(&onboard_hub_usbdev_driver);
platform_driver_unregister(&onboard_hub_driver);
+
+ cancel_work_sync(&attach_usb_driver_work);
}
module_exit(onboard_hub_exit);
memset(musb_res, 0, sizeof(*musb_res) * ARRAY_SIZE(musb_res));
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
+ if (!res) {
+ ret = -EINVAL;
goto err2;
+ }
musb_res[i].start = res->start;
musb_res[i].end = res->end;
{ USB_DEVICE(0x0846, 0x1100) }, /* NetGear Managed Switch M4100 series, M5300 series, M7100 series */
{ USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */
{ USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */
+ { USB_DEVICE(0x0908, 0x0070) }, /* Siemens SCALANCE LPE-9000 USB Serial Console */
{ USB_DEVICE(0x0908, 0x01FF) }, /* Siemens RUGGEDCOM USB Serial Console */
{ USB_DEVICE(0x0988, 0x0578) }, /* Teraoka AD2000 */
{ USB_DEVICE(0x0B00, 0x3070) }, /* Ingenico 3070 */
#define QUECTEL_PRODUCT_EP06 0x0306
#define QUECTEL_PRODUCT_EM05G 0x030a
#define QUECTEL_PRODUCT_EM060K 0x030b
+#define QUECTEL_PRODUCT_EM05G_CS 0x030c
+#define QUECTEL_PRODUCT_EM05CN_SG 0x0310
#define QUECTEL_PRODUCT_EM05G_SG 0x0311
+#define QUECTEL_PRODUCT_EM05CN 0x0312
+#define QUECTEL_PRODUCT_EM05G_GR 0x0313
+#define QUECTEL_PRODUCT_EM05G_RS 0x0314
#define QUECTEL_PRODUCT_EM12 0x0512
#define QUECTEL_PRODUCT_RM500Q 0x0800
#define QUECTEL_PRODUCT_RM520N 0x0801
+#define QUECTEL_PRODUCT_EC200U 0x0901
#define QUECTEL_PRODUCT_EC200S_CN 0x6002
#define QUECTEL_PRODUCT_EC200T 0x6026
#define QUECTEL_PRODUCT_RM500K 0x7001
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0xff, 0xff),
.driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0, 0) },
+ { USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05CN, 0xff),
+ .driver_info = RSVD(6) | ZLP },
+ { USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05CN_SG, 0xff),
+ .driver_info = RSVD(6) | ZLP },
{ USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05G, 0xff),
.driver_info = RSVD(6) | ZLP },
+ { USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05G_CS, 0xff),
+ .driver_info = RSVD(6) | ZLP },
+ { USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05G_GR, 0xff),
+ .driver_info = RSVD(6) | ZLP },
+ { USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05G_RS, 0xff),
+ .driver_info = RSVD(6) | ZLP },
{ USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05G_SG, 0xff),
.driver_info = RSVD(6) | ZLP },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K, 0xff, 0x00, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0, 0) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200U, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200S_CN, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200T, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500K, 0xff, 0x00, 0x00) },
if (le16_to_cpu(udev->descriptor.idVendor) == 0x0bc2)
flags |= US_FL_NO_ATA_1X;
+ /*
+ * RTL9210-based enclosure from HIKSEMI, MD202 reportedly have issues
+ * with UAS. This isn't distinguishable with just idVendor and
+ * idProduct, use manufacturer and product too.
+ *
+ * Reported-by: Hongling Zeng <zenghongling@kylinos.cn>
+ */
+ if (le16_to_cpu(udev->descriptor.idVendor) == 0x0bda &&
+ le16_to_cpu(udev->descriptor.idProduct) == 0x9210 &&
+ (udev->manufacturer && !strcmp(udev->manufacturer, "HIKSEMI")) &&
+ (udev->product && !strcmp(udev->product, "MD202")))
+ flags |= US_FL_IGNORE_UAS;
+
usb_stor_adjust_quirks(udev, &flags);
if (flags & US_FL_IGNORE_UAS) {
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_NO_REPORT_LUNS),
-/* Reported-by: Hongling Zeng <zenghongling@kylinos.cn> */
-UNUSUAL_DEV(0x0bda, 0x9210, 0x0000, 0x9999,
- "Hiksemi",
- "External HDD",
- USB_SC_DEVICE, USB_PR_DEVICE, NULL,
- US_FL_IGNORE_UAS),
-
/* Reported-by: Benjamin Tissoires <benjamin.tissoires@redhat.com> */
UNUSUAL_DEV(0x13fd, 0x3940, 0x0000, 0x9999,
"Initio Corporation",
[DP_PIN_ASSIGN_F] = "F",
};
+/*
+ * Helper function to extract a peripheral's currently supported
+ * Pin Assignments from its DisplayPort alternate mode state.
+ */
+static u8 get_current_pin_assignments(struct dp_altmode *dp)
+{
+ if (DP_CONF_CURRENTLY(dp->data.conf) == DP_CONF_UFP_U_AS_DFP_D)
+ return DP_CAP_PIN_ASSIGN_DFP_D(dp->alt->vdo);
+ else
+ return DP_CAP_PIN_ASSIGN_UFP_D(dp->alt->vdo);
+}
+
static ssize_t
pin_assignment_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t size)
goto out_unlock;
}
- if (DP_CONF_CURRENTLY(dp->data.conf) == DP_CONF_DFP_D)
- assignments = DP_CAP_UFP_D_PIN_ASSIGN(dp->alt->vdo);
- else
- assignments = DP_CAP_DFP_D_PIN_ASSIGN(dp->alt->vdo);
+ assignments = get_current_pin_assignments(dp);
if (!(DP_CONF_GET_PIN_ASSIGN(conf) & assignments)) {
ret = -EINVAL;
cur = get_count_order(DP_CONF_GET_PIN_ASSIGN(dp->data.conf));
- if (DP_CONF_CURRENTLY(dp->data.conf) == DP_CONF_DFP_D)
- assignments = DP_CAP_UFP_D_PIN_ASSIGN(dp->alt->vdo);
- else
- assignments = DP_CAP_DFP_D_PIN_ASSIGN(dp->alt->vdo);
+ assignments = get_current_pin_assignments(dp);
for (i = 0; assignments; assignments >>= 1, i++) {
if (assignments & 1) {
tcpm_set_state(port, ready_state(port), 0);
break;
case DR_SWAP_CHANGE_DR:
- if (port->data_role == TYPEC_HOST) {
- tcpm_unregister_altmodes(port);
+ tcpm_unregister_altmodes(port);
+ if (port->data_role == TYPEC_HOST)
tcpm_set_roles(port, true, port->pwr_role,
TYPEC_DEVICE);
- } else {
+ else
tcpm_set_roles(port, true, port->pwr_role,
TYPEC_HOST);
- }
tcpm_ams_finish(port);
tcpm_set_state(port, ready_state(port), 0);
break;
struct ucsi_work {
struct delayed_work work;
+ struct list_head node;
unsigned long delay;
unsigned int count;
struct ucsi_connector *con;
mutex_lock(&con->lock);
if (!con->partner) {
+ list_del(&uwork->node);
mutex_unlock(&con->lock);
kfree(uwork);
return;
ret = uwork->cb(con);
- if (uwork->count-- && (ret == -EBUSY || ret == -ETIMEDOUT))
+ if (uwork->count-- && (ret == -EBUSY || ret == -ETIMEDOUT)) {
queue_delayed_work(con->wq, &uwork->work, uwork->delay);
- else
+ } else {
+ list_del(&uwork->node);
kfree(uwork);
+ }
mutex_unlock(&con->lock);
}
uwork->con = con;
uwork->cb = cb;
+ list_add_tail(&uwork->node, &con->partner_tasks);
queue_delayed_work(con->wq, &uwork->work, delay);
return 0;
INIT_WORK(&con->work, ucsi_handle_connector_change);
init_completion(&con->complete);
mutex_init(&con->lock);
+ INIT_LIST_HEAD(&con->partner_tasks);
con->num = index + 1;
con->ucsi = ucsi;
ucsi_unregister_altmodes(&ucsi->connector[i],
UCSI_RECIPIENT_CON);
ucsi_unregister_port_psy(&ucsi->connector[i]);
- if (ucsi->connector[i].wq)
+
+ if (ucsi->connector[i].wq) {
+ struct ucsi_work *uwork;
+
+ mutex_lock(&ucsi->connector[i].lock);
+ /*
+ * queue delayed items immediately so they can execute
+ * and free themselves before the wq is destroyed
+ */
+ list_for_each_entry(uwork, &ucsi->connector[i].partner_tasks, node)
+ mod_delayed_work(ucsi->connector[i].wq, &uwork->work, 0);
+ mutex_unlock(&ucsi->connector[i].lock);
destroy_workqueue(ucsi->connector[i].wq);
+ }
typec_unregister_port(ucsi->connector[i].port);
}
struct work_struct work;
struct completion complete;
struct workqueue_struct *wq;
+ struct list_head partner_tasks;
struct typec_port *port;
struct typec_partner *partner;
* significantly boosts non-hugetlbfs mappings and doesn't seem to hurt when
* hugetlbfs is in use.
*/
-static void vfio_test_domain_fgsp(struct vfio_domain *domain)
+static void vfio_test_domain_fgsp(struct vfio_domain *domain, struct list_head *regions)
{
- struct page *pages;
int ret, order = get_order(PAGE_SIZE * 2);
+ struct vfio_iova *region;
+ struct page *pages;
+ dma_addr_t start;
pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
if (!pages)
return;
- ret = iommu_map(domain->domain, 0, page_to_phys(pages), PAGE_SIZE * 2,
- IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE);
- if (!ret) {
- size_t unmapped = iommu_unmap(domain->domain, 0, PAGE_SIZE);
+ list_for_each_entry(region, regions, list) {
+ start = ALIGN(region->start, PAGE_SIZE * 2);
+ if (start >= region->end || (region->end - start < PAGE_SIZE * 2))
+ continue;
- if (unmapped == PAGE_SIZE)
- iommu_unmap(domain->domain, PAGE_SIZE, PAGE_SIZE);
- else
- domain->fgsp = true;
+ ret = iommu_map(domain->domain, start, page_to_phys(pages), PAGE_SIZE * 2,
+ IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE);
+ if (!ret) {
+ size_t unmapped = iommu_unmap(domain->domain, start, PAGE_SIZE);
+
+ if (unmapped == PAGE_SIZE)
+ iommu_unmap(domain->domain, start + PAGE_SIZE, PAGE_SIZE);
+ else
+ domain->fgsp = true;
+ }
+ break;
}
__free_pages(pages, order);
}
}
- vfio_test_domain_fgsp(domain);
+ vfio_test_domain_fgsp(domain, &iova_copy);
/* replay mappings on new domains */
ret = vfio_iommu_replay(iommu, domain);
/* remainder if it woke up early */
unsigned long jremain = 0;
+ atomic_inc(&dev->refcnt);
+
for (;;) {
if (!jremain && dev->search_count) {
*/
mutex_unlock(&dev->list_mutex);
- if (kthread_should_stop())
+ if (kthread_should_stop()) {
+ __set_current_state(TASK_RUNNING);
break;
+ }
/* Only sleep when the search is active. */
if (dev->search_count) {
dev->search_count = w1_search_count;
dev->enable_pullup = w1_enable_pullup;
- /* 1 for w1_process to decrement
- * 1 for __w1_remove_master_device to decrement
+ /* For __w1_remove_master_device to decrement
*/
- atomic_set(&dev->refcnt, 2);
+ atomic_set(&dev->refcnt, 1);
INIT_LIST_HEAD(&dev->slist);
INIT_LIST_HEAD(&dev->async_list);
struct extent_buffer *leaf = path->nodes[0];
struct btrfs_file_extent_item *extent;
u64 extent_end;
+ u8 type;
if (path->slots[0] >= btrfs_header_nritems(leaf)) {
ret = btrfs_next_leaf(root, path);
extent = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item);
+ type = btrfs_file_extent_type(leaf, extent);
- if (btrfs_file_extent_disk_bytenr(leaf, extent) == 0 ||
- btrfs_file_extent_type(leaf, extent) ==
- BTRFS_FILE_EXTENT_PREALLOC) {
+ /*
+ * Can't access the extent's disk_bytenr field if this is an
+ * inline extent, since at that offset, it's where the extent
+ * data starts.
+ */
+ if (type == BTRFS_FILE_EXTENT_PREALLOC ||
+ (type == BTRFS_FILE_EXTENT_REG &&
+ btrfs_file_extent_disk_bytenr(leaf, extent) == 0)) {
/*
* Explicit hole or prealloc extent, search for delalloc.
* A prealloc extent is treated like a hole.
int err = -ENOMEM;
int ret = 0;
bool stopped = false;
+ bool did_leaf_rescans = false;
path = btrfs_alloc_path();
if (!path)
}
err = qgroup_rescan_leaf(trans, path);
+ did_leaf_rescans = true;
if (err > 0)
btrfs_commit_transaction(trans);
mutex_unlock(&fs_info->qgroup_rescan_lock);
/*
- * only update status, since the previous part has already updated the
- * qgroup info.
+ * Only update status, since the previous part has already updated the
+ * qgroup info, and only if we did any actual work. This also prevents
+ * race with a concurrent quota disable, which has already set
+ * fs_info->quota_root to NULL and cleared BTRFS_FS_QUOTA_ENABLED at
+ * btrfs_quota_disable().
*/
- trans = btrfs_start_transaction(fs_info->quota_root, 1);
- if (IS_ERR(trans)) {
- err = PTR_ERR(trans);
+ if (did_leaf_rescans) {
+ trans = btrfs_start_transaction(fs_info->quota_root, 1);
+ if (IS_ERR(trans)) {
+ err = PTR_ERR(trans);
+ trans = NULL;
+ btrfs_err(fs_info,
+ "fail to start transaction for status update: %d",
+ err);
+ }
+ } else {
trans = NULL;
- btrfs_err(fs_info,
- "fail to start transaction for status update: %d",
- err);
}
mutex_lock(&fs_info->qgroup_rescan_lock);
return num_devices;
}
+static void btrfs_scratch_superblock(struct btrfs_fs_info *fs_info,
+ struct block_device *bdev, int copy_num)
+{
+ struct btrfs_super_block *disk_super;
+ const size_t len = sizeof(disk_super->magic);
+ const u64 bytenr = btrfs_sb_offset(copy_num);
+ int ret;
+
+ disk_super = btrfs_read_disk_super(bdev, bytenr, bytenr);
+ if (IS_ERR(disk_super))
+ return;
+
+ memset(&disk_super->magic, 0, len);
+ folio_mark_dirty(virt_to_folio(disk_super));
+ btrfs_release_disk_super(disk_super);
+
+ ret = sync_blockdev_range(bdev, bytenr, bytenr + len - 1);
+ if (ret)
+ btrfs_warn(fs_info, "error clearing superblock number %d (%d)",
+ copy_num, ret);
+}
+
void btrfs_scratch_superblocks(struct btrfs_fs_info *fs_info,
struct block_device *bdev,
const char *device_path)
{
- struct btrfs_super_block *disk_super;
int copy_num;
if (!bdev)
return;
for (copy_num = 0; copy_num < BTRFS_SUPER_MIRROR_MAX; copy_num++) {
- struct page *page;
- int ret;
-
- disk_super = btrfs_read_dev_one_super(bdev, copy_num, false);
- if (IS_ERR(disk_super))
- continue;
-
- if (bdev_is_zoned(bdev)) {
+ if (bdev_is_zoned(bdev))
btrfs_reset_sb_log_zones(bdev, copy_num);
- continue;
- }
-
- memset(&disk_super->magic, 0, sizeof(disk_super->magic));
-
- page = virt_to_page(disk_super);
- set_page_dirty(page);
- lock_page(page);
- /* write_on_page() unlocks the page */
- ret = write_one_page(page);
- if (ret)
- btrfs_warn(fs_info,
- "error clearing superblock number %d (%d)",
- copy_num, ret);
- btrfs_release_disk_super(disk_super);
-
+ else
+ btrfs_scratch_superblock(fs_info, bdev, copy_num);
}
/* Notify udev that device has changed */
list_for_each_entry(t, &ce->tlist, list) {
seq_printf(m, " %s%s\n",
t->name,
- ce->tgthint == t ? " (target hint)" : "");
+ READ_ONCE(ce->tgthint) == t ? " (target hint)" : "");
}
}
}
cifs_dbg(FYI, "target list:\n");
list_for_each_entry(t, &ce->tlist, list) {
cifs_dbg(FYI, " %s%s\n", t->name,
- ce->tgthint == t ? " (target hint)" : "");
+ READ_ONCE(ce->tgthint) == t ? " (target hint)" : "");
}
}
/* Return target hint of a DFS cache entry */
static inline char *get_tgt_name(const struct cache_entry *ce)
{
- struct cache_dfs_tgt *t = ce->tgthint;
+ struct cache_dfs_tgt *t = READ_ONCE(ce->tgthint);
return t ? t->name : ERR_PTR(-ENOENT);
}
static int copy_ref_data(const struct dfs_info3_param *refs, int numrefs,
struct cache_entry *ce, const char *tgthint)
{
+ struct cache_dfs_tgt *target;
int i;
ce->ttl = max_t(int, refs[0].ttl, CACHE_MIN_TTL);
ce->numtgts++;
}
- ce->tgthint = list_first_entry_or_null(&ce->tlist,
- struct cache_dfs_tgt, list);
+ target = list_first_entry_or_null(&ce->tlist, struct cache_dfs_tgt,
+ list);
+ WRITE_ONCE(ce->tgthint, target);
return 0;
}
}
/* Add a new DFS cache entry */
-static int add_cache_entry_locked(struct dfs_info3_param *refs, int numrefs)
+static struct cache_entry *add_cache_entry_locked(struct dfs_info3_param *refs,
+ int numrefs)
{
int rc;
struct cache_entry *ce;
rc = cache_entry_hash(refs[0].path_name, strlen(refs[0].path_name), &hash);
if (rc)
- return rc;
+ return ERR_PTR(rc);
ce = alloc_cache_entry(refs, numrefs);
if (IS_ERR(ce))
- return PTR_ERR(ce);
+ return ce;
spin_lock(&cache_ttl_lock);
if (!cache_ttl) {
atomic_inc(&cache_count);
- return 0;
+ return ce;
}
/* Check if two DFS paths are equal. @s1 and @s2 are expected to be in @cache_cp's charset */
*
* Use whole path components in the match. Must be called with htable_rw_lock held.
*
+ * Return cached entry if successful.
* Return ERR_PTR(-ENOENT) if the entry is not found.
+ * Return error ptr otherwise.
*/
static struct cache_entry *lookup_cache_entry(const char *path)
{
static int update_cache_entry_locked(struct cache_entry *ce, const struct dfs_info3_param *refs,
int numrefs)
{
+ struct cache_dfs_tgt *target;
+ char *th = NULL;
int rc;
- char *s, *th = NULL;
WARN_ON(!rwsem_is_locked(&htable_rw_lock));
- if (ce->tgthint) {
- s = ce->tgthint->name;
- th = kstrdup(s, GFP_ATOMIC);
+ target = READ_ONCE(ce->tgthint);
+ if (target) {
+ th = kstrdup(target->name, GFP_ATOMIC);
if (!th)
return -ENOMEM;
}
*
* For interlinks, cifs_mount() and expand_dfs_referral() are supposed to
* handle them properly.
+ *
+ * On success, return entry with acquired lock for reading, otherwise error ptr.
*/
-static int cache_refresh_path(const unsigned int xid, struct cifs_ses *ses, const char *path)
+static struct cache_entry *cache_refresh_path(const unsigned int xid,
+ struct cifs_ses *ses,
+ const char *path,
+ bool force_refresh)
{
- int rc;
- struct cache_entry *ce;
struct dfs_info3_param *refs = NULL;
+ struct cache_entry *ce;
int numrefs = 0;
- bool newent = false;
+ int rc;
cifs_dbg(FYI, "%s: search path: %s\n", __func__, path);
- down_write(&htable_rw_lock);
+ down_read(&htable_rw_lock);
ce = lookup_cache_entry(path);
if (!IS_ERR(ce)) {
- if (!cache_entry_expired(ce)) {
- dump_ce(ce);
- up_write(&htable_rw_lock);
- return 0;
- }
- } else {
- newent = true;
+ if (!force_refresh && !cache_entry_expired(ce))
+ return ce;
+ } else if (PTR_ERR(ce) != -ENOENT) {
+ up_read(&htable_rw_lock);
+ return ce;
}
/*
- * Either the entry was not found, or it is expired.
+ * Unlock shared access as we don't want to hold any locks while getting
+ * a new referral. The @ses used for performing the I/O could be
+ * reconnecting and it acquires @htable_rw_lock to look up the dfs cache
+ * in order to failover -- if necessary.
+ */
+ up_read(&htable_rw_lock);
+
+ /*
+ * Either the entry was not found, or it is expired, or it is a forced
+ * refresh.
* Request a new DFS referral in order to create or update a cache entry.
*/
rc = get_dfs_referral(xid, ses, path, &refs, &numrefs);
- if (rc)
- goto out_unlock;
+ if (rc) {
+ ce = ERR_PTR(rc);
+ goto out;
+ }
dump_refs(refs, numrefs);
- if (!newent) {
- rc = update_cache_entry_locked(ce, refs, numrefs);
- goto out_unlock;
+ down_write(&htable_rw_lock);
+ /* Re-check as another task might have it added or refreshed already */
+ ce = lookup_cache_entry(path);
+ if (!IS_ERR(ce)) {
+ if (force_refresh || cache_entry_expired(ce)) {
+ rc = update_cache_entry_locked(ce, refs, numrefs);
+ if (rc)
+ ce = ERR_PTR(rc);
+ }
+ } else if (PTR_ERR(ce) == -ENOENT) {
+ ce = add_cache_entry_locked(refs, numrefs);
}
- rc = add_cache_entry_locked(refs, numrefs);
+ if (IS_ERR(ce)) {
+ up_write(&htable_rw_lock);
+ goto out;
+ }
-out_unlock:
- up_write(&htable_rw_lock);
+ downgrade_write(&htable_rw_lock);
+out:
free_dfs_info_array(refs, numrefs);
- return rc;
+ return ce;
}
/*
}
it->it_path_consumed = t->path_consumed;
- if (ce->tgthint == t)
+ if (READ_ONCE(ce->tgthint) == t)
list_add(&it->it_list, head);
else
list_add_tail(&it->it_list, head);
if (IS_ERR(npath))
return PTR_ERR(npath);
- rc = cache_refresh_path(xid, ses, npath);
- if (rc)
- goto out_free_path;
-
- down_read(&htable_rw_lock);
-
- ce = lookup_cache_entry(npath);
+ ce = cache_refresh_path(xid, ses, npath, false);
if (IS_ERR(ce)) {
- up_read(&htable_rw_lock);
rc = PTR_ERR(ce);
goto out_free_path;
}
}
/**
- * dfs_cache_update_tgthint - update target hint of a DFS cache entry
- *
- * If it doesn't find the cache entry, then it will get a DFS referral for @path
- * and create a new entry.
- *
- * In case the cache entry exists but expired, it will get a DFS referral
- * for @path and then update the respective cache entry.
- *
- * @xid: syscall id
- * @ses: smb session
- * @cp: codepage
- * @remap: type of character remapping for paths
- * @path: path to lookup in DFS referral cache
- * @it: DFS target iterator
- *
- * Return zero if the target hint was updated successfully, otherwise non-zero.
- */
-int dfs_cache_update_tgthint(const unsigned int xid, struct cifs_ses *ses,
- const struct nls_table *cp, int remap, const char *path,
- const struct dfs_cache_tgt_iterator *it)
-{
- int rc;
- const char *npath;
- struct cache_entry *ce;
- struct cache_dfs_tgt *t;
-
- npath = dfs_cache_canonical_path(path, cp, remap);
- if (IS_ERR(npath))
- return PTR_ERR(npath);
-
- cifs_dbg(FYI, "%s: update target hint - path: %s\n", __func__, npath);
-
- rc = cache_refresh_path(xid, ses, npath);
- if (rc)
- goto out_free_path;
-
- down_write(&htable_rw_lock);
-
- ce = lookup_cache_entry(npath);
- if (IS_ERR(ce)) {
- rc = PTR_ERR(ce);
- goto out_unlock;
- }
-
- t = ce->tgthint;
-
- if (likely(!strcasecmp(it->it_name, t->name)))
- goto out_unlock;
-
- list_for_each_entry(t, &ce->tlist, list) {
- if (!strcasecmp(t->name, it->it_name)) {
- ce->tgthint = t;
- cifs_dbg(FYI, "%s: new target hint: %s\n", __func__,
- it->it_name);
- break;
- }
- }
-
-out_unlock:
- up_write(&htable_rw_lock);
-out_free_path:
- kfree(npath);
- return rc;
-}
-
-/**
* dfs_cache_noreq_update_tgthint - update target hint of a DFS cache entry
* without sending any requests to the currently connected server.
*
cifs_dbg(FYI, "%s: path: %s\n", __func__, path);
- if (!down_write_trylock(&htable_rw_lock))
- return;
+ down_read(&htable_rw_lock);
ce = lookup_cache_entry(path);
if (IS_ERR(ce))
goto out_unlock;
- t = ce->tgthint;
+ t = READ_ONCE(ce->tgthint);
if (unlikely(!strcasecmp(it->it_name, t->name)))
goto out_unlock;
list_for_each_entry(t, &ce->tlist, list) {
if (!strcasecmp(t->name, it->it_name)) {
- ce->tgthint = t;
+ WRITE_ONCE(ce->tgthint, t);
cifs_dbg(FYI, "%s: new target hint: %s\n", __func__,
it->it_name);
break;
}
out_unlock:
- up_write(&htable_rw_lock);
+ up_read(&htable_rw_lock);
}
/**
* Mark dfs tcon for reconnecting when the currently connected tcon does not match any of the new
* target shares in @refs.
*/
-static void mark_for_reconnect_if_needed(struct cifs_tcon *tcon, struct dfs_cache_tgt_list *tl,
- const struct dfs_info3_param *refs, int numrefs)
+static void mark_for_reconnect_if_needed(struct TCP_Server_Info *server,
+ struct dfs_cache_tgt_list *old_tl,
+ struct dfs_cache_tgt_list *new_tl)
{
- struct dfs_cache_tgt_iterator *it;
- int i;
-
- for (it = dfs_cache_get_tgt_iterator(tl); it; it = dfs_cache_get_next_tgt(tl, it)) {
- for (i = 0; i < numrefs; i++) {
- if (target_share_equal(tcon->ses->server, dfs_cache_get_tgt_name(it),
- refs[i].node_name))
+ struct dfs_cache_tgt_iterator *oit, *nit;
+
+ for (oit = dfs_cache_get_tgt_iterator(old_tl); oit;
+ oit = dfs_cache_get_next_tgt(old_tl, oit)) {
+ for (nit = dfs_cache_get_tgt_iterator(new_tl); nit;
+ nit = dfs_cache_get_next_tgt(new_tl, nit)) {
+ if (target_share_equal(server,
+ dfs_cache_get_tgt_name(oit),
+ dfs_cache_get_tgt_name(nit)))
return;
}
}
cifs_dbg(FYI, "%s: no cached or matched targets. mark dfs share for reconnect.\n", __func__);
- cifs_signal_cifsd_for_reconnect(tcon->ses->server, true);
+ cifs_signal_cifsd_for_reconnect(server, true);
}
/* Refresh dfs referral of tcon and mark it for reconnect if needed */
static int __refresh_tcon(const char *path, struct cifs_tcon *tcon, bool force_refresh)
{
- struct dfs_cache_tgt_list tl = DFS_CACHE_TGT_LIST_INIT(tl);
+ struct dfs_cache_tgt_list old_tl = DFS_CACHE_TGT_LIST_INIT(old_tl);
+ struct dfs_cache_tgt_list new_tl = DFS_CACHE_TGT_LIST_INIT(new_tl);
struct cifs_ses *ses = CIFS_DFS_ROOT_SES(tcon->ses);
struct cifs_tcon *ipc = ses->tcon_ipc;
- struct dfs_info3_param *refs = NULL;
bool needs_refresh = false;
struct cache_entry *ce;
unsigned int xid;
- int numrefs = 0;
int rc = 0;
xid = get_xid();
ce = lookup_cache_entry(path);
needs_refresh = force_refresh || IS_ERR(ce) || cache_entry_expired(ce);
if (!IS_ERR(ce)) {
- rc = get_targets(ce, &tl);
- if (rc)
- cifs_dbg(FYI, "%s: could not get dfs targets: %d\n", __func__, rc);
+ rc = get_targets(ce, &old_tl);
+ cifs_dbg(FYI, "%s: get_targets: %d\n", __func__, rc);
}
up_read(&htable_rw_lock);
}
spin_unlock(&ipc->tc_lock);
- rc = get_dfs_referral(xid, ses, path, &refs, &numrefs);
- if (!rc) {
- /* Create or update a cache entry with the new referral */
- dump_refs(refs, numrefs);
-
- down_write(&htable_rw_lock);
- ce = lookup_cache_entry(path);
- if (IS_ERR(ce))
- add_cache_entry_locked(refs, numrefs);
- else if (force_refresh || cache_entry_expired(ce))
- update_cache_entry_locked(ce, refs, numrefs);
- up_write(&htable_rw_lock);
-
- mark_for_reconnect_if_needed(tcon, &tl, refs, numrefs);
+ ce = cache_refresh_path(xid, ses, path, true);
+ if (!IS_ERR(ce)) {
+ rc = get_targets(ce, &new_tl);
+ up_read(&htable_rw_lock);
+ cifs_dbg(FYI, "%s: get_targets: %d\n", __func__, rc);
+ mark_for_reconnect_if_needed(tcon->ses->server, &old_tl, &new_tl);
}
out:
free_xid(xid);
- dfs_cache_free_tgts(&tl);
- free_dfs_info_array(refs, numrefs);
+ dfs_cache_free_tgts(&old_tl);
+ dfs_cache_free_tgts(&new_tl);
return rc;
}
struct dfs_cache_tgt_list *tgt_list);
int dfs_cache_noreq_find(const char *path, struct dfs_info3_param *ref,
struct dfs_cache_tgt_list *tgt_list);
-int dfs_cache_update_tgthint(const unsigned int xid, struct cifs_ses *ses,
- const struct nls_table *cp, int remap, const char *path,
- const struct dfs_cache_tgt_iterator *it);
void dfs_cache_noreq_update_tgthint(const char *path, const struct dfs_cache_tgt_iterator *it);
int dfs_cache_get_tgt_referral(const char *path, const struct dfs_cache_tgt_iterator *it,
struct dfs_info3_param *ref);
(struct smb2_hdr *)rdata->iov[0].iov_base;
struct cifs_credits credits = { .value = 0, .instance = 0 };
struct smb_rqst rqst = { .rq_iov = &rdata->iov[1],
- .rq_nvec = 1,
- .rq_pages = rdata->pages,
- .rq_offset = rdata->page_offset,
- .rq_npages = rdata->nr_pages,
- .rq_pagesz = rdata->pagesz,
- .rq_tailsz = rdata->tailsz };
+ .rq_nvec = 1, };
+
+ if (rdata->got_bytes) {
+ rqst.rq_pages = rdata->pages;
+ rqst.rq_offset = rdata->page_offset;
+ rqst.rq_npages = rdata->nr_pages;
+ rqst.rq_pagesz = rdata->pagesz;
+ rqst.rq_tailsz = rdata->tailsz;
+ }
WARN_ONCE(rdata->server != mid->server,
"rdata server %p != mid server %p",
struct mb_cache_entry **);
static __le32 ext4_xattr_hash_entry(char *name, size_t name_len, __le32 *value,
size_t value_count);
+static __le32 ext4_xattr_hash_entry_signed(char *name, size_t name_len, __le32 *value,
+ size_t value_count);
static void ext4_xattr_rehash(struct ext4_xattr_header *);
static const struct xattr_handler * const ext4_xattr_handler_map[] = {
tmp_data = cpu_to_le32(hash);
e_hash = ext4_xattr_hash_entry(entry->e_name, entry->e_name_len,
&tmp_data, 1);
+ /* All good? */
+ if (e_hash == entry->e_hash)
+ return 0;
+
+ /*
+ * Not good. Maybe the entry hash was calculated
+ * using the buggy signed char version?
+ */
+ e_hash = ext4_xattr_hash_entry_signed(entry->e_name, entry->e_name_len,
+ &tmp_data, 1);
+ /* Still no match - bad */
if (e_hash != entry->e_hash)
return -EFSCORRUPTED;
+
+ /* Let people know about old hash */
+ pr_warn_once("ext4: filesystem with signed xattr name hash");
}
return 0;
}
while (name_len--) {
hash = (hash << NAME_HASH_SHIFT) ^
(hash >> (8*sizeof(hash) - NAME_HASH_SHIFT)) ^
- *name++;
+ (unsigned char)*name++;
+ }
+ while (value_count--) {
+ hash = (hash << VALUE_HASH_SHIFT) ^
+ (hash >> (8*sizeof(hash) - VALUE_HASH_SHIFT)) ^
+ le32_to_cpu(*value++);
+ }
+ return cpu_to_le32(hash);
+}
+
+/*
+ * ext4_xattr_hash_entry_signed()
+ *
+ * Compute the hash of an extended attribute incorrectly.
+ */
+static __le32 ext4_xattr_hash_entry_signed(char *name, size_t name_len, __le32 *value, size_t value_count)
+{
+ __u32 hash = 0;
+
+ while (name_len--) {
+ hash = (hash << NAME_HASH_SHIFT) ^
+ (hash >> (8*sizeof(hash) - NAME_HASH_SHIFT)) ^
+ (signed char)*name++;
}
while (value_count--) {
hash = (hash << VALUE_HASH_SHIFT) ^
#include <linux/posix_acl.h>
#include <linux/posix_acl_xattr.h>
-struct posix_acl *fuse_get_acl(struct inode *inode, int type, bool rcu)
+static struct posix_acl *__fuse_get_acl(struct fuse_conn *fc,
+ struct user_namespace *mnt_userns,
+ struct inode *inode, int type, bool rcu)
{
- struct fuse_conn *fc = get_fuse_conn(inode);
int size;
const char *name;
void *value = NULL;
if (fuse_is_bad(inode))
return ERR_PTR(-EIO);
- if (!fc->posix_acl || fc->no_getxattr)
+ if (fc->no_getxattr)
return NULL;
if (type == ACL_TYPE_ACCESS)
return acl;
}
+static inline bool fuse_no_acl(const struct fuse_conn *fc,
+ const struct inode *inode)
+{
+ /*
+ * Refuse interacting with POSIX ACLs for daemons that
+ * don't support FUSE_POSIX_ACL and are not mounted on
+ * the host to retain backwards compatibility.
+ */
+ return !fc->posix_acl && (i_user_ns(inode) != &init_user_ns);
+}
+
+struct posix_acl *fuse_get_acl(struct user_namespace *mnt_userns,
+ struct dentry *dentry, int type)
+{
+ struct inode *inode = d_inode(dentry);
+ struct fuse_conn *fc = get_fuse_conn(inode);
+
+ if (fuse_no_acl(fc, inode))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ return __fuse_get_acl(fc, mnt_userns, inode, type, false);
+}
+
+struct posix_acl *fuse_get_inode_acl(struct inode *inode, int type, bool rcu)
+{
+ struct fuse_conn *fc = get_fuse_conn(inode);
+
+ /*
+ * FUSE daemons before FUSE_POSIX_ACL was introduced could get and set
+ * POSIX ACLs without them being used for permission checking by the
+ * vfs. Retain that behavior for backwards compatibility as there are
+ * filesystems that do all permission checking for acls in the daemon
+ * and not in the kernel.
+ */
+ if (!fc->posix_acl)
+ return NULL;
+
+ return __fuse_get_acl(fc, &init_user_ns, inode, type, rcu);
+}
+
int fuse_set_acl(struct user_namespace *mnt_userns, struct dentry *dentry,
struct posix_acl *acl, int type)
{
if (fuse_is_bad(inode))
return -EIO;
- if (!fc->posix_acl || fc->no_setxattr)
+ if (fc->no_setxattr || fuse_no_acl(fc, inode))
return -EOPNOTSUPP;
if (type == ACL_TYPE_ACCESS)
return ret;
}
- if (!vfsgid_in_group_p(i_gid_into_vfsgid(&init_user_ns, inode)) &&
+ /*
+ * Fuse daemons without FUSE_POSIX_ACL never changed the passed
+ * through POSIX ACLs. Such daemons don't expect setgid bits to
+ * be stripped.
+ */
+ if (fc->posix_acl &&
+ !vfsgid_in_group_p(i_gid_into_vfsgid(&init_user_ns, inode)) &&
!capable_wrt_inode_uidgid(&init_user_ns, inode, CAP_FSETID))
extra_flags |= FUSE_SETXATTR_ACL_KILL_SGID;
} else {
ret = fuse_removexattr(inode, name);
}
- forget_all_cached_acls(inode);
- fuse_invalidate_attr(inode);
+
+ if (fc->posix_acl) {
+ /*
+ * Fuse daemons without FUSE_POSIX_ACL never cached POSIX ACLs
+ * and didn't invalidate attributes. Retain that behavior.
+ */
+ forget_all_cached_acls(inode);
+ fuse_invalidate_attr(inode);
+ }
return ret;
}
.permission = fuse_permission,
.getattr = fuse_getattr,
.listxattr = fuse_listxattr,
- .get_inode_acl = fuse_get_acl,
+ .get_inode_acl = fuse_get_inode_acl,
+ .get_acl = fuse_get_acl,
.set_acl = fuse_set_acl,
.fileattr_get = fuse_fileattr_get,
.fileattr_set = fuse_fileattr_set,
.permission = fuse_permission,
.getattr = fuse_getattr,
.listxattr = fuse_listxattr,
- .get_inode_acl = fuse_get_acl,
+ .get_inode_acl = fuse_get_inode_acl,
+ .get_acl = fuse_get_acl,
.set_acl = fuse_set_acl,
.fileattr_get = fuse_fileattr_get,
.fileattr_set = fuse_fileattr_set,
ssize_t fuse_listxattr(struct dentry *entry, char *list, size_t size);
int fuse_removexattr(struct inode *inode, const char *name);
extern const struct xattr_handler *fuse_xattr_handlers[];
-extern const struct xattr_handler *fuse_acl_xattr_handlers[];
-extern const struct xattr_handler *fuse_no_acl_xattr_handlers[];
struct posix_acl;
-struct posix_acl *fuse_get_acl(struct inode *inode, int type, bool rcu);
+struct posix_acl *fuse_get_inode_acl(struct inode *inode, int type, bool rcu);
+struct posix_acl *fuse_get_acl(struct user_namespace *mnt_userns,
+ struct dentry *dentry, int type);
int fuse_set_acl(struct user_namespace *mnt_userns, struct dentry *dentry,
struct posix_acl *acl, int type);
fuse_dax_dontcache(inode, attr->flags);
}
-static void fuse_init_inode(struct inode *inode, struct fuse_attr *attr)
+static void fuse_init_inode(struct inode *inode, struct fuse_attr *attr,
+ struct fuse_conn *fc)
{
inode->i_mode = attr->mode & S_IFMT;
inode->i_size = attr->size;
new_decode_dev(attr->rdev));
} else
BUG();
+ /*
+ * Ensure that we don't cache acls for daemons without FUSE_POSIX_ACL
+ * so they see the exact same behavior as before.
+ */
+ if (!fc->posix_acl)
+ inode->i_acl = inode->i_default_acl = ACL_DONT_CACHE;
}
static int fuse_inode_eq(struct inode *inode, void *_nodeidp)
if (!inode)
return NULL;
- fuse_init_inode(inode, attr);
+ fuse_init_inode(inode, attr, fc);
get_fuse_inode(inode)->nodeid = nodeid;
inode->i_flags |= S_AUTOMOUNT;
goto done;
if (!fc->writeback_cache || !S_ISREG(attr->mode))
inode->i_flags |= S_NOCMTIME;
inode->i_generation = generation;
- fuse_init_inode(inode, attr);
+ fuse_init_inode(inode, attr, fc);
unlock_new_inode(inode);
} else if (fuse_stale_inode(inode, generation, attr)) {
/* nodeid was reused, any I/O on the old inode should fail */
if ((flags & FUSE_POSIX_ACL)) {
fc->default_permissions = 1;
fc->posix_acl = 1;
- fm->sb->s_xattr = fuse_acl_xattr_handlers;
}
if (flags & FUSE_CACHE_SYMLINKS)
fc->cache_symlinks = 1;
if (sb->s_user_ns != &init_user_ns)
sb->s_iflags |= SB_I_UNTRUSTED_MOUNTER;
sb->s_flags &= ~(SB_NOSEC | SB_I_VERSION);
-
- /*
- * If we are not in the initial user namespace posix
- * acls must be translated.
- */
- if (sb->s_user_ns != &init_user_ns)
- sb->s_xattr = fuse_no_acl_xattr_handlers;
}
static int fuse_fill_super_submount(struct super_block *sb,
return fuse_setxattr(inode, name, value, size, flags, 0);
}
-static bool no_xattr_list(struct dentry *dentry)
-{
- return false;
-}
-
-static int no_xattr_get(const struct xattr_handler *handler,
- struct dentry *dentry, struct inode *inode,
- const char *name, void *value, size_t size)
-{
- return -EOPNOTSUPP;
-}
-
-static int no_xattr_set(const struct xattr_handler *handler,
- struct user_namespace *mnt_userns,
- struct dentry *dentry, struct inode *nodee,
- const char *name, const void *value,
- size_t size, int flags)
-{
- return -EOPNOTSUPP;
-}
-
static const struct xattr_handler fuse_xattr_handler = {
.prefix = "",
.get = fuse_xattr_get,
&fuse_xattr_handler,
NULL
};
-
-const struct xattr_handler *fuse_acl_xattr_handlers[] = {
- &posix_acl_access_xattr_handler,
- &posix_acl_default_xattr_handler,
- &fuse_xattr_handler,
- NULL
-};
-
-static const struct xattr_handler fuse_no_acl_access_xattr_handler = {
- .name = XATTR_NAME_POSIX_ACL_ACCESS,
- .flags = ACL_TYPE_ACCESS,
- .list = no_xattr_list,
- .get = no_xattr_get,
- .set = no_xattr_set,
-};
-
-static const struct xattr_handler fuse_no_acl_default_xattr_handler = {
- .name = XATTR_NAME_POSIX_ACL_DEFAULT,
- .flags = ACL_TYPE_ACCESS,
- .list = no_xattr_list,
- .get = no_xattr_get,
- .set = no_xattr_set,
-};
-
-const struct xattr_handler *fuse_no_acl_xattr_handlers[] = {
- &fuse_no_acl_access_xattr_handler,
- &fuse_no_acl_default_xattr_handler,
- &fuse_xattr_handler,
- NULL
-};
brelse(bd->bd_bh);
}
+static int __gfs2_writepage(struct page *page, struct writeback_control *wbc,
+ void *data)
+{
+ struct address_space *mapping = data;
+ int ret = mapping->a_ops->writepage(page, wbc);
+ mapping_set_error(mapping, ret);
+ return ret;
+}
+
/**
* gfs2_ail1_start_one - Start I/O on a transaction
* @sdp: The superblock
if (!mapping)
continue;
spin_unlock(&sdp->sd_ail_lock);
- ret = filemap_fdatawrite_wbc(mapping, wbc);
+ ret = write_cache_pages(mapping, wbc, __gfs2_writepage, mapping);
if (need_resched()) {
blk_finish_plug(plug);
cond_resched();
};
/**
+ * nfsd_file_cond_queue - conditionally unhash and queue a nfsd_file
+ * @nf: nfsd_file to attempt to queue
+ * @dispose: private list to queue successfully-put objects
+ *
+ * Unhash an nfsd_file, try to get a reference to it, and then put that
+ * reference. If it's the last reference, queue it to the dispose list.
+ */
+static void
+nfsd_file_cond_queue(struct nfsd_file *nf, struct list_head *dispose)
+ __must_hold(RCU)
+{
+ int decrement = 1;
+
+ /* If we raced with someone else unhashing, ignore it */
+ if (!nfsd_file_unhash(nf))
+ return;
+
+ /* If we can't get a reference, ignore it */
+ if (!nfsd_file_get(nf))
+ return;
+
+ /* Extra decrement if we remove from the LRU */
+ if (nfsd_file_lru_remove(nf))
+ ++decrement;
+
+ /* If refcount goes to 0, then put on the dispose list */
+ if (refcount_sub_and_test(decrement, &nf->nf_ref)) {
+ list_add(&nf->nf_lru, dispose);
+ trace_nfsd_file_closing(nf);
+ }
+}
+
+/**
* nfsd_file_queue_for_close: try to close out any open nfsd_files for an inode
* @inode: inode on which to close out nfsd_files
* @dispose: list on which to gather nfsd_files to close out
rcu_read_lock();
do {
- int decrement = 1;
-
nf = rhashtable_lookup(&nfsd_file_rhash_tbl, &key,
nfsd_file_rhash_params);
if (!nf)
break;
-
- /* If we raced with someone else unhashing, ignore it */
- if (!nfsd_file_unhash(nf))
- continue;
-
- /* If we can't get a reference, ignore it */
- if (!nfsd_file_get(nf))
- continue;
-
- /* Extra decrement if we remove from the LRU */
- if (nfsd_file_lru_remove(nf))
- ++decrement;
-
- /* If refcount goes to 0, then put on the dispose list */
- if (refcount_sub_and_test(decrement, &nf->nf_ref)) {
- list_add(&nf->nf_lru, dispose);
- trace_nfsd_file_closing(nf);
- }
+ nfsd_file_cond_queue(nf, dispose);
} while (1);
rcu_read_unlock();
}
nf = rhashtable_walk_next(&iter);
while (!IS_ERR_OR_NULL(nf)) {
- if (!net || nf->nf_net == net) {
- nfsd_file_unhash(nf);
- nfsd_file_lru_remove(nf);
- list_add(&nf->nf_lru, &dispose);
- }
+ if (!net || nf->nf_net == net)
+ nfsd_file_cond_queue(nf, &dispose);
nf = rhashtable_walk_next(&iter);
}
const u64 address,
const enum zynqmp_pm_request_ack ack);
int zynqmp_pm_get_rpu_mode(u32 node_id, enum rpu_oper_mode *rpu_mode);
-int zynqmp_pm_set_rpu_mode(u32 node_id, u32 arg1);
-int zynqmp_pm_set_tcm_config(u32 node_id, u32 arg1);
+int zynqmp_pm_set_rpu_mode(u32 node_id, enum rpu_oper_mode rpu_mode);
+int zynqmp_pm_set_tcm_config(u32 node_id, enum rpu_tcm_comb tcm_mode);
int zynqmp_pm_set_sd_config(u32 node, enum pm_sd_config_type config, u32 value);
int zynqmp_pm_set_gem_config(u32 node, enum pm_gem_config_type config,
u32 value);
return -ENODEV;
}
-static inline int zynqmp_pm_set_rpu_mode(u32 node_id, u32 arg1)
+static inline int zynqmp_pm_set_rpu_mode(u32 node_id, enum rpu_oper_mode rpu_mode)
{
return -ENODEV;
}
-static inline int zynqmp_pm_set_tcm_config(u32 node_id, u32 arg1)
+static inline int zynqmp_pm_set_tcm_config(u32 node_id, enum rpu_tcm_comb tcm_mode)
{
return -ENODEV;
}
#ifndef __ASSEMBLER__
#include <linux/types.h>
-#ifdef CONFIG_ARCH_OMAP1_ANY
+#ifdef CONFIG_ARCH_OMAP1
/*
* NOTE: Please use ioremap + __raw_read/write where possible instead of these
*/
extern void omap_writeb(u8 v, u32 pa);
extern void omap_writew(u16 v, u32 pa);
extern void omap_writel(u32 v, u32 pa);
-#else
+#elif defined(CONFIG_COMPILE_TEST)
static inline u8 omap_readb(u32 pa) { return 0; }
static inline u16 omap_readw(u32 pa) { return 0; }
static inline u32 omap_readl(u32 pa) { return 0; }
}
/**
- * usb_set_intfdata() - associate driver-specific data with the interface
- * @intf: the usb interface
- * @data: pointer to the device priv structure or %NULL
+ * usb_set_intfdata() - associate driver-specific data with an interface
+ * @intf: USB interface
+ * @data: driver data
*
- * Drivers should use this function in their probe() to associate their
- * driver-specific data with the usb interface.
+ * Drivers can use this function in their probe() callbacks to associate
+ * driver-specific data with an interface.
*
- * When disconnecting, the core will take care of setting @intf back to %NULL,
- * so no actions are needed on the driver side. The interface should not be set
- * to %NULL before all actions completed (e.g. no outsanding URB remaining).
+ * Note that there is generally no need to clear the driver-data pointer even
+ * if some drivers do so for historical or implementation-specific reasons.
*/
static inline void usb_set_intfdata(struct usb_interface *intf, void *data)
{
extern int usb_acpi_set_power_state(struct usb_device *hdev, int index,
bool enable);
extern bool usb_acpi_power_manageable(struct usb_device *hdev, int index);
+extern int usb_acpi_port_lpm_incapable(struct usb_device *hdev, int index);
#else
static inline int usb_acpi_set_power_state(struct usb_device *hdev, int index,
bool enable) { return 0; }
static inline bool usb_acpi_power_manageable(struct usb_device *hdev, int index)
{ return true; }
+static inline int usb_acpi_port_lpm_incapable(struct usb_device *hdev, int index)
+ { return 0; }
#endif
/* USB autosuspend and autoresume */
};
};
+#define MANA_IRQ_NAME_SZ 32
+
struct gdma_irq_context {
void (*handler)(void *arg);
void *arg;
+ char name[MANA_IRQ_NAME_SZ];
};
struct gdma_context {
extern struct iscsi_cls_session *
iscsi_session_setup(struct iscsi_transport *, struct Scsi_Host *shost,
uint16_t, int, int, uint32_t, unsigned int);
+void iscsi_session_remove(struct iscsi_cls_session *cls_session);
+void iscsi_session_free(struct iscsi_cls_session *cls_session);
extern void iscsi_session_teardown(struct iscsi_cls_session *);
extern void iscsi_session_recovery_timedout(struct iscsi_cls_session *);
extern int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
#define RPI_FIRMWARE_CLK_RATE_REQUEST(_id) \
{ \
- .id = _id, \
+ .id = cpu_to_le32(_id), \
}
#if IS_ENABLED(CONFIG_RASPBERRYPI_FIRMWARE)
SCTP_CONNTRACK_SHUTDOWN_RECD,
SCTP_CONNTRACK_SHUTDOWN_ACK_SENT,
SCTP_CONNTRACK_HEARTBEAT_SENT,
- SCTP_CONNTRACK_HEARTBEAT_ACKED,
- SCTP_CONNTRACK_DATA_SENT,
+ SCTP_CONNTRACK_HEARTBEAT_ACKED, /* no longer used */
SCTP_CONNTRACK_MAX
};
CTA_TIMEOUT_SCTP_SHUTDOWN_RECD,
CTA_TIMEOUT_SCTP_SHUTDOWN_ACK_SENT,
CTA_TIMEOUT_SCTP_HEARTBEAT_SENT,
- CTA_TIMEOUT_SCTP_HEARTBEAT_ACKED,
- CTA_TIMEOUT_SCTP_DATA_SENT,
+ CTA_TIMEOUT_SCTP_HEARTBEAT_ACKED, /* no longer used */
__CTA_TIMEOUT_SCTP_MAX
};
#define CTA_TIMEOUT_SCTP_MAX (__CTA_TIMEOUT_SCTP_MAX - 1)
* @urgent_bkops_lvl: keeps track of urgent bkops level for device
* @is_urgent_bkops_lvl_checked: keeps track if the urgent bkops level for
* device is known or not.
+ * @wb_mutex: used to serialize devfreq and sysfs write booster toggling
* @clk_scaling_lock: used to serialize device commands and clock scaling
* @desc_size: descriptor sizes reported by device
* @scsi_block_reqs_cnt: reference counting for scsi block requests
enum bkops_status urgent_bkops_lvl;
bool is_urgent_bkops_lvl_checked;
+ struct mutex wb_mutex;
struct rw_semaphore clk_scaling_lock;
unsigned char desc_size[QUERY_DESC_IDN_MAX];
atomic_t scsi_block_reqs_cnt;
appended after any matching localversion* files, and after the value
set in CONFIG_LOCALVERSION.
- (The actual string used here is the first eight characters produced
+ (The actual string used here is the first 12 characters produced
by running the command:
$ git rev-parse --verify HEAD
#include <generated/compile.h>
#include <generated/utsrelease.h>
-#include <linux/version.h>
#include <linux/proc_ns.h>
#include <linux/refcount.h>
#include <linux/uts.h>
if (ctx->flags & IORING_SETUP_SINGLE_ISSUER
&& !(ctx->flags & IORING_SETUP_R_DISABLED))
- ctx->submitter_task = get_task_struct(current);
+ WRITE_ONCE(ctx->submitter_task, get_task_struct(current));
file = io_uring_get_file(ctx);
if (IS_ERR(file)) {
return -EBADFD;
if (ctx->flags & IORING_SETUP_SINGLE_ISSUER && !ctx->submitter_task)
- ctx->submitter_task = get_task_struct(current);
+ WRITE_ONCE(ctx->submitter_task, get_task_struct(current));
if (ctx->restrictions.registered)
ctx->restricted = 1;
u32 flags;
};
+static void io_double_unlock_ctx(struct io_ring_ctx *octx)
+{
+ mutex_unlock(&octx->uring_lock);
+}
+
+static int io_double_lock_ctx(struct io_ring_ctx *octx,
+ unsigned int issue_flags)
+{
+ /*
+ * To ensure proper ordering between the two ctxs, we can only
+ * attempt a trylock on the target. If that fails and we already have
+ * the source ctx lock, punt to io-wq.
+ */
+ if (!(issue_flags & IO_URING_F_UNLOCKED)) {
+ if (!mutex_trylock(&octx->uring_lock))
+ return -EAGAIN;
+ return 0;
+ }
+ mutex_lock(&octx->uring_lock);
+ return 0;
+}
+
void io_msg_ring_cleanup(struct io_kiocb *req)
{
struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
msg->src_file = NULL;
}
+static inline bool io_msg_need_remote(struct io_ring_ctx *target_ctx)
+{
+ if (!target_ctx->task_complete)
+ return false;
+ return current != target_ctx->submitter_task;
+}
+
+static int io_msg_exec_remote(struct io_kiocb *req, task_work_func_t func)
+{
+ struct io_ring_ctx *ctx = req->file->private_data;
+ struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
+ struct task_struct *task = READ_ONCE(ctx->submitter_task);
+
+ if (unlikely(!task))
+ return -EOWNERDEAD;
+
+ init_task_work(&msg->tw, func);
+ if (task_work_add(ctx->submitter_task, &msg->tw, TWA_SIGNAL))
+ return -EOWNERDEAD;
+
+ return IOU_ISSUE_SKIP_COMPLETE;
+}
+
static void io_msg_tw_complete(struct callback_head *head)
{
struct io_msg *msg = container_of(head, struct io_msg, tw);
struct io_ring_ctx *target_ctx = req->file->private_data;
int ret = 0;
- if (current->flags & PF_EXITING)
+ if (current->flags & PF_EXITING) {
ret = -EOWNERDEAD;
- else if (!io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0))
- ret = -EOVERFLOW;
+ } else {
+ /*
+ * If the target ring is using IOPOLL mode, then we need to be
+ * holding the uring_lock for posting completions. Other ring
+ * types rely on the regular completion locking, which is
+ * handled while posting.
+ */
+ if (target_ctx->flags & IORING_SETUP_IOPOLL)
+ mutex_lock(&target_ctx->uring_lock);
+ if (!io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0))
+ ret = -EOVERFLOW;
+ if (target_ctx->flags & IORING_SETUP_IOPOLL)
+ mutex_unlock(&target_ctx->uring_lock);
+ }
if (ret < 0)
req_set_fail(req);
io_req_queue_tw_complete(req, ret);
}
-static int io_msg_ring_data(struct io_kiocb *req)
+static int io_msg_ring_data(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_ring_ctx *target_ctx = req->file->private_data;
struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
+ int ret;
if (msg->src_fd || msg->dst_fd || msg->flags)
return -EINVAL;
+ if (target_ctx->flags & IORING_SETUP_R_DISABLED)
+ return -EBADFD;
- if (target_ctx->task_complete && current != target_ctx->submitter_task) {
- init_task_work(&msg->tw, io_msg_tw_complete);
- if (task_work_add(target_ctx->submitter_task, &msg->tw,
- TWA_SIGNAL_NO_IPI))
- return -EOWNERDEAD;
-
- atomic_or(IORING_SQ_TASKRUN, &target_ctx->rings->sq_flags);
- return IOU_ISSUE_SKIP_COMPLETE;
- }
-
- if (io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0))
- return 0;
+ if (io_msg_need_remote(target_ctx))
+ return io_msg_exec_remote(req, io_msg_tw_complete);
- return -EOVERFLOW;
-}
-
-static void io_double_unlock_ctx(struct io_ring_ctx *octx,
- unsigned int issue_flags)
-{
- mutex_unlock(&octx->uring_lock);
-}
-
-static int io_double_lock_ctx(struct io_ring_ctx *octx,
- unsigned int issue_flags)
-{
- /*
- * To ensure proper ordering between the two ctxs, we can only
- * attempt a trylock on the target. If that fails and we already have
- * the source ctx lock, punt to io-wq.
- */
- if (!(issue_flags & IO_URING_F_UNLOCKED)) {
- if (!mutex_trylock(&octx->uring_lock))
+ ret = -EOVERFLOW;
+ if (target_ctx->flags & IORING_SETUP_IOPOLL) {
+ if (unlikely(io_double_lock_ctx(target_ctx, issue_flags)))
return -EAGAIN;
- return 0;
+ if (io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0))
+ ret = 0;
+ io_double_unlock_ctx(target_ctx);
+ } else {
+ if (io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0))
+ ret = 0;
}
- mutex_lock(&octx->uring_lock);
- return 0;
+ return ret;
}
static struct file *io_msg_grab_file(struct io_kiocb *req, unsigned int issue_flags)
if (!io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0))
ret = -EOVERFLOW;
out_unlock:
- io_double_unlock_ctx(target_ctx, issue_flags);
+ io_double_unlock_ctx(target_ctx);
return ret;
}
if (target_ctx == ctx)
return -EINVAL;
+ if (target_ctx->flags & IORING_SETUP_R_DISABLED)
+ return -EBADFD;
if (!src_file) {
src_file = io_msg_grab_file(req, issue_flags);
if (!src_file)
req->flags |= REQ_F_NEED_CLEANUP;
}
- if (target_ctx->task_complete && current != target_ctx->submitter_task) {
- init_task_work(&msg->tw, io_msg_tw_fd_complete);
- if (task_work_add(target_ctx->submitter_task, &msg->tw,
- TWA_SIGNAL))
- return -EOWNERDEAD;
-
- return IOU_ISSUE_SKIP_COMPLETE;
- }
+ if (io_msg_need_remote(target_ctx))
+ return io_msg_exec_remote(req, io_msg_tw_fd_complete);
return io_msg_install_complete(req, issue_flags);
}
switch (msg->cmd) {
case IORING_MSG_DATA:
- ret = io_msg_ring_data(req);
+ ret = io_msg_ring_data(req, issue_flags);
break;
case IORING_MSG_SEND_FD:
ret = io_msg_send_fd(req, issue_flags);
* to the waitqueue, so if we get nothing back, we
* should be safe and attempt a reissue.
*/
- if (unlikely(!req->cqe.res))
+ if (unlikely(!req->cqe.res)) {
+ /* Multishot armed need not reissue */
+ if (!(req->apoll_events & EPOLLONESHOT))
+ continue;
return IOU_POLL_REISSUE;
+ }
}
if (req->apoll_events & EPOLLONESHOT)
return IOU_POLL_DONE;
arch/$SRCARCH/include/
"
+type cpio > /dev/null
+
# Support incremental builds by skipping archive generation
# if timestamps of files being archived are not changed.
sched_annotate_sleep();
mutex_lock(&module_mutex);
mod = find_module_all(name, strlen(name), true);
- ret = !mod || mod->state == MODULE_STATE_LIVE;
+ ret = !mod || mod->state == MODULE_STATE_LIVE
+ || mod->state == MODULE_STATE_GOING;
mutex_unlock(&module_mutex);
return ret;
mod->state = MODULE_STATE_UNFORMED;
-again:
mutex_lock(&module_mutex);
old = find_module_all(mod->name, strlen(mod->name), true);
if (old != NULL) {
- if (old->state != MODULE_STATE_LIVE) {
+ if (old->state == MODULE_STATE_COMING
+ || old->state == MODULE_STATE_UNFORMED) {
/* Wait in case it fails to load. */
mutex_unlock(&module_mutex);
err = wait_event_interruptible(module_wq,
finished_loading(mod->name));
if (err)
goto out_unlocked;
- goto again;
+
+ /* The module might have gone in the meantime. */
+ mutex_lock(&module_mutex);
+ old = find_module_all(mod->name, strlen(mod->name),
+ true);
}
- err = -EEXIST;
+
+ /*
+ * We are here only when the same module was being loaded. Do
+ * not try to load it again right now. It prevents long delays
+ * caused by serialized module load failures. It might happen
+ * when more devices of the same type trigger load of
+ * a particular module.
+ */
+ if (old && old->state == MODULE_STATE_LIVE)
+ err = -EEXIST;
+ else
+ err = -EBUSY;
goto out;
}
mod_update_bounds(mod);
if (retval)
goto out_put_task;
+ /*
+ * With non-SMP configs, user_cpus_ptr/user_mask isn't used and
+ * alloc_user_cpus_ptr() returns NULL.
+ */
user_mask = alloc_user_cpus_ptr(NUMA_NO_NODE);
- if (IS_ENABLED(CONFIG_SMP) && !user_mask) {
+ if (user_mask) {
+ cpumask_copy(user_mask, in_mask);
+ } else if (IS_ENABLED(CONFIG_SMP)) {
retval = -ENOMEM;
goto out_put_task;
}
- cpumask_copy(user_mask, in_mask);
+
ac = (struct affinity_context){
.new_mask = in_mask,
.user_mask = user_mask,
eenv_task_busy_time(&eenv, p, prev_cpu);
for (; pd; pd = pd->next) {
+ unsigned long util_min = p_util_min, util_max = p_util_max;
unsigned long cpu_cap, cpu_thermal_cap, util;
unsigned long cur_delta, max_spare_cap = 0;
unsigned long rq_util_min, rq_util_max;
- unsigned long util_min, util_max;
unsigned long prev_spare_cap = 0;
int max_spare_cap_cpu = -1;
unsigned long base_energy;
eenv.pd_cap = 0;
for_each_cpu(cpu, cpus) {
+ struct rq *rq = cpu_rq(cpu);
+
eenv.pd_cap += cpu_thermal_cap;
if (!cpumask_test_cpu(cpu, sched_domain_span(sd)))
* much capacity we can get out of the CPU; this is
* aligned with sched_cpu_util().
*/
- if (uclamp_is_used()) {
- if (uclamp_rq_is_idle(cpu_rq(cpu))) {
- util_min = p_util_min;
- util_max = p_util_max;
- } else {
- /*
- * Open code uclamp_rq_util_with() except for
- * the clamp() part. Ie: apply max aggregation
- * only. util_fits_cpu() logic requires to
- * operate on non clamped util but must use the
- * max-aggregated uclamp_{min, max}.
- */
- rq_util_min = uclamp_rq_get(cpu_rq(cpu), UCLAMP_MIN);
- rq_util_max = uclamp_rq_get(cpu_rq(cpu), UCLAMP_MAX);
-
- util_min = max(rq_util_min, p_util_min);
- util_max = max(rq_util_max, p_util_max);
- }
+ if (uclamp_is_used() && !uclamp_rq_is_idle(rq)) {
+ /*
+ * Open code uclamp_rq_util_with() except for
+ * the clamp() part. Ie: apply max aggregation
+ * only. util_fits_cpu() logic requires to
+ * operate on non clamped util but must use the
+ * max-aggregated uclamp_{min, max}.
+ */
+ rq_util_min = uclamp_rq_get(rq, UCLAMP_MIN);
+ rq_util_max = uclamp_rq_get(rq, UCLAMP_MAX);
+
+ util_min = max(rq_util_min, p_util_min);
+ util_max = max(rq_util_max, p_util_max);
}
if (!util_fits_cpu(util, util_min, util_max, cpu))
continue;
* * Thermal pressure will impact all cpus in this perf domain
* equally.
*/
- if (static_branch_unlikely(&sched_asym_cpucapacity)) {
+ if (sched_energy_enabled()) {
unsigned long inv_cap = capacity_orig - thermal_load_avg(rq);
- struct perf_domain *pd = rcu_dereference(rq->rd->pd);
+ struct perf_domain *pd;
+ rcu_read_lock();
+
+ pd = rcu_dereference(rq->rd->pd);
rq->cpu_capacity_inverted = 0;
for (; pd; pd = pd->next) {
struct cpumask *pd_span = perf_domain_span(pd);
unsigned long pd_cap_orig, pd_cap;
+ /* We can't be inverted against our own pd */
+ if (cpumask_test_cpu(cpu_of(rq), pd_span))
+ continue;
+
cpu = cpumask_any(pd_span);
pd_cap_orig = arch_scale_cpu_capacity(cpu);
break;
}
}
+
+ rcu_read_unlock();
}
trace_sched_cpu_capacity_tp(rq);
if (resource >= RLIM_NLIMITS)
return -EINVAL;
+ resource = array_index_nospec(resource, RLIM_NLIMITS);
+
if (new_rlim) {
if (new_rlim->rlim_cur > new_rlim->rlim_max)
return -EINVAL;
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/jiffies.h>
+#include <linux/nospec.h>
#include <linux/skbuff.h>
#include <linux/string.h>
#include <linux/types.h>
if (type <= 0 || type > maxtype)
return 0;
+ type = array_index_nospec(type, maxtype + 1);
pt = &policy[type];
BUG_ON(pt->type > NLA_TYPE_MAX);
}
continue;
}
+ type = array_index_nospec(type, maxtype + 1);
if (policy) {
int err = validate_nla(nla, maxtype, policy,
validate, extack, depth);
return -EOPNOTSUPP;
if (sgt_append->prv) {
+ unsigned long next_pfn = (page_to_phys(sg_page(sgt_append->prv)) +
+ sgt_append->prv->offset + sgt_append->prv->length) / PAGE_SIZE;
+
if (WARN_ON(offset))
return -EINVAL;
/* Merge contiguous pages into the last SG */
prv_len = sgt_append->prv->length;
- last_pg = sg_page(sgt_append->prv);
- while (n_pages && pages_are_mergeable(pages[0], last_pg)) {
- if (sgt_append->prv->length + PAGE_SIZE > max_segment)
- break;
- sgt_append->prv->length += PAGE_SIZE;
- last_pg = pages[0];
- pages++;
- n_pages--;
+ if (page_to_pfn(pages[0]) == next_pfn) {
+ last_pg = pfn_to_page(next_pfn - 1);
+ while (n_pages && pages_are_mergeable(pages[0], last_pg)) {
+ if (sgt_append->prv->length + PAGE_SIZE > max_segment)
+ break;
+ sgt_append->prv->length += PAGE_SIZE;
+ last_pg = pages[0];
+ pages++;
+ n_pages--;
+ }
+ if (!n_pages)
+ goto out;
}
- if (!n_pages)
- goto out;
}
/* compute number of contiguous chunks */
return 0;
if (ops->id && ops->size) {
-cleanup:
ng = rcu_dereference_protected(net->gen,
lockdep_is_held(&pernet_ops_rwsem));
ng->ptr[*ops->id] = NULL;
}
+cleanup:
kfree(data);
out:
#include <linux/slab.h>
#include <linux/netlink.h>
#include <linux/hash.h>
+#include <linux/nospec.h>
#include <net/arp.h>
#include <net/inet_dscp.h>
if (type > RTAX_MAX)
return false;
+ type = array_index_nospec(type, RTAX_MAX + 1);
if (type == RTAX_CC_ALGO) {
char tmp[TCP_CA_NAME_MAX];
bool ecn_ca = false;
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/netlink.h>
+#include <linux/nospec.h>
#include <linux/rtnetlink.h>
#include <linux/types.h>
#include <net/ip.h>
return -EINVAL;
}
+ type = array_index_nospec(type, RTAX_MAX + 1);
if (type == RTAX_CC_ALGO) {
char tmp[TCP_CA_NAME_MAX];
pneigh_lookup(&nd_tbl, net, &hdr->daddr, skb->dev, 0)) {
int proxied = ip6_forward_proxy_check(skb);
if (proxied > 0) {
- hdr->hop_limit--;
+ /* It's tempting to decrease the hop limit
+ * here by 1, as we do at the end of the
+ * function too.
+ *
+ * But that would be incorrect, as proxying is
+ * not forwarding. The ip6_input function
+ * will handle this packet locally, and it
+ * depends on the hop limit being unchanged.
+ *
+ * One example is the NDP hop limit, that
+ * always has to stay 255, but other would be
+ * similar checks around RA packets, where the
+ * user can even change the desired limit.
+ */
return ip6_input(skb);
} else if (proxied < 0) {
__IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS);
static void mctp_sk_close(struct sock *sk, long timeout)
{
- struct mctp_sock *msk = container_of(sk, struct mctp_sock, sk);
-
- del_timer_sync(&msk->key_expiry);
sk_common_release(sk);
}
spin_lock_irqsave(&key->lock, fl2);
__mctp_key_remove(key, net, fl2, MCTP_TRACE_KEY_CLOSED);
}
+ sock_set_flag(sk, SOCK_DEAD);
spin_unlock_irqrestore(&net->mctp.keys_lock, flags);
+
+ /* Since there are no more tag allocations (we have removed all of the
+ * keys), stop any pending expiry events. the timer cannot be re-queued
+ * as the sk is no longer observable
+ */
+ del_timer_sync(&msk->key_expiry);
}
static struct proto mctp_proto = {
key->valid = true;
spin_lock_init(&key->lock);
refcount_set(&key->refs, 1);
+ sock_hold(key->sk);
return key;
}
mctp_dev_release_key(key->dev, key);
spin_unlock_irqrestore(&key->lock, flags);
+ sock_put(key->sk);
kfree(key);
}
spin_lock_irqsave(&net->mctp.keys_lock, flags);
+ if (sock_flag(&msk->sk, SOCK_DEAD)) {
+ rc = -EINVAL;
+ goto out_unlock;
+ }
+
hlist_for_each_entry(tmp, &net->mctp.keys, hlist) {
if (mctp_key_match(tmp, key->local_addr, key->peer_addr,
key->tag)) {
hlist_add_head(&key->sklist, &msk->keys);
}
+out_unlock:
spin_unlock_irqrestore(&net->mctp.keys_lock, flags);
return rc;
static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb)
{
+ struct mctp_sk_key *key, *any_key = NULL;
struct net *net = dev_net(skb->dev);
- struct mctp_sk_key *key;
struct mctp_sock *msk;
struct mctp_hdr *mh;
unsigned long f;
* key for reassembly - we'll create a more specific
* one for future packets if required (ie, !EOM).
*/
- key = mctp_lookup_key(net, skb, MCTP_ADDR_ANY, &f);
- if (key) {
- msk = container_of(key->sk,
+ any_key = mctp_lookup_key(net, skb, MCTP_ADDR_ANY, &f);
+ if (any_key) {
+ msk = container_of(any_key->sk,
struct mctp_sock, sk);
- spin_unlock_irqrestore(&key->lock, f);
- mctp_key_unref(key);
- key = NULL;
+ spin_unlock_irqrestore(&any_key->lock, f);
}
}
* this function.
*/
rc = mctp_key_add(key, msk);
- if (rc) {
- kfree(key);
- } else {
+ if (!rc)
trace_mctp_key_acquire(key);
- /* we don't need to release key->lock on exit */
- mctp_key_unref(key);
- }
+ /* we don't need to release key->lock on exit, so
+ * clean up here and suppress the unlock via
+ * setting to NULL
+ */
+ mctp_key_unref(key);
key = NULL;
} else {
spin_unlock_irqrestore(&key->lock, f);
mctp_key_unref(key);
}
+ if (any_key)
+ mctp_key_unref(any_key);
out:
if (rc)
kfree_skb(skb);
#include <net/netfilter/nf_conntrack_ecache.h>
#include <net/netfilter/nf_conntrack_timeout.h>
-/* FIXME: Examine ipfilter's timeouts and conntrack transitions more
- closely. They're more complex. --RR
-
- And so for me for SCTP :D -Kiran */
-
static const char *const sctp_conntrack_names[] = {
- "NONE",
- "CLOSED",
- "COOKIE_WAIT",
- "COOKIE_ECHOED",
- "ESTABLISHED",
- "SHUTDOWN_SENT",
- "SHUTDOWN_RECD",
- "SHUTDOWN_ACK_SENT",
- "HEARTBEAT_SENT",
- "HEARTBEAT_ACKED",
+ [SCTP_CONNTRACK_NONE] = "NONE",
+ [SCTP_CONNTRACK_CLOSED] = "CLOSED",
+ [SCTP_CONNTRACK_COOKIE_WAIT] = "COOKIE_WAIT",
+ [SCTP_CONNTRACK_COOKIE_ECHOED] = "COOKIE_ECHOED",
+ [SCTP_CONNTRACK_ESTABLISHED] = "ESTABLISHED",
+ [SCTP_CONNTRACK_SHUTDOWN_SENT] = "SHUTDOWN_SENT",
+ [SCTP_CONNTRACK_SHUTDOWN_RECD] = "SHUTDOWN_RECD",
+ [SCTP_CONNTRACK_SHUTDOWN_ACK_SENT] = "SHUTDOWN_ACK_SENT",
+ [SCTP_CONNTRACK_HEARTBEAT_SENT] = "HEARTBEAT_SENT",
};
#define SECS * HZ
[SCTP_CONNTRACK_CLOSED] = 10 SECS,
[SCTP_CONNTRACK_COOKIE_WAIT] = 3 SECS,
[SCTP_CONNTRACK_COOKIE_ECHOED] = 3 SECS,
- [SCTP_CONNTRACK_ESTABLISHED] = 5 DAYS,
+ [SCTP_CONNTRACK_ESTABLISHED] = 210 SECS,
[SCTP_CONNTRACK_SHUTDOWN_SENT] = 300 SECS / 1000,
[SCTP_CONNTRACK_SHUTDOWN_RECD] = 300 SECS / 1000,
[SCTP_CONNTRACK_SHUTDOWN_ACK_SENT] = 3 SECS,
[SCTP_CONNTRACK_HEARTBEAT_SENT] = 30 SECS,
- [SCTP_CONNTRACK_HEARTBEAT_ACKED] = 210 SECS,
- [SCTP_CONNTRACK_DATA_SENT] = 30 SECS,
};
#define SCTP_FLAG_HEARTBEAT_VTAG_FAILED 1
#define sSR SCTP_CONNTRACK_SHUTDOWN_RECD
#define sSA SCTP_CONNTRACK_SHUTDOWN_ACK_SENT
#define sHS SCTP_CONNTRACK_HEARTBEAT_SENT
-#define sHA SCTP_CONNTRACK_HEARTBEAT_ACKED
-#define sDS SCTP_CONNTRACK_DATA_SENT
#define sIV SCTP_CONNTRACK_MAX
/*
CLOSED - We have seen a SHUTDOWN_COMPLETE chunk in the direction of
the SHUTDOWN chunk. Connection is closed.
HEARTBEAT_SENT - We have seen a HEARTBEAT in a new flow.
-HEARTBEAT_ACKED - We have seen a HEARTBEAT-ACK/DATA/SACK in the direction
- opposite to that of the HEARTBEAT/DATA chunk. Secondary connection
- is established.
-DATA_SENT - We have seen a DATA/SACK in a new flow.
*/
/* TODO
*/
/* SCTP conntrack state transitions */
-static const u8 sctp_conntracks[2][12][SCTP_CONNTRACK_MAX] = {
+static const u8 sctp_conntracks[2][11][SCTP_CONNTRACK_MAX] = {
{
/* ORIGINAL */
-/* sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA, sDS */
-/* init */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCW, sHA, sCW},
-/* init_ack */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL, sHA, sCL},
-/* abort */ {sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL},
-/* shutdown */ {sCL, sCL, sCW, sCE, sSS, sSS, sSR, sSA, sCL, sSS, sCL},
-/* shutdown_ack */ {sSA, sCL, sCW, sCE, sES, sSA, sSA, sSA, sSA, sHA, sSA},
-/* error */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL, sHA, sCL},/* Can't have Stale cookie*/
-/* cookie_echo */ {sCL, sCL, sCE, sCE, sES, sSS, sSR, sSA, sCL, sHA, sCL},/* 5.2.4 - Big TODO */
-/* cookie_ack */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL, sHA, sCL},/* Can't come in orig dir */
-/* shutdown_comp*/ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sCL, sCL, sHA, sCL},
-/* heartbeat */ {sHS, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA, sDS},
-/* heartbeat_ack*/ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA, sDS},
-/* data/sack */ {sDS, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA, sDS}
+/* sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS */
+/* init */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCW},
+/* init_ack */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL},
+/* abort */ {sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL},
+/* shutdown */ {sCL, sCL, sCW, sCE, sSS, sSS, sSR, sSA, sCL},
+/* shutdown_ack */ {sSA, sCL, sCW, sCE, sES, sSA, sSA, sSA, sSA},
+/* error */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL},/* Can't have Stale cookie*/
+/* cookie_echo */ {sCL, sCL, sCE, sCE, sES, sSS, sSR, sSA, sCL},/* 5.2.4 - Big TODO */
+/* cookie_ack */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL},/* Can't come in orig dir */
+/* shutdown_comp*/ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sCL, sCL},
+/* heartbeat */ {sHS, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS},
+/* heartbeat_ack*/ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS},
},
{
/* REPLY */
-/* sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA, sDS */
-/* init */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sIV, sHA, sIV},/* INIT in sCL Big TODO */
-/* init_ack */ {sIV, sCW, sCW, sCE, sES, sSS, sSR, sSA, sIV, sHA, sIV},
-/* abort */ {sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sIV, sCL, sIV},
-/* shutdown */ {sIV, sCL, sCW, sCE, sSR, sSS, sSR, sSA, sIV, sSR, sIV},
-/* shutdown_ack */ {sIV, sCL, sCW, sCE, sES, sSA, sSA, sSA, sIV, sHA, sIV},
-/* error */ {sIV, sCL, sCW, sCL, sES, sSS, sSR, sSA, sIV, sHA, sIV},
-/* cookie_echo */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sIV, sHA, sIV},/* Can't come in reply dir */
-/* cookie_ack */ {sIV, sCL, sCW, sES, sES, sSS, sSR, sSA, sIV, sHA, sIV},
-/* shutdown_comp*/ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sCL, sIV, sHA, sIV},
-/* heartbeat */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA, sHA},
-/* heartbeat_ack*/ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHA, sHA, sHA},
-/* data/sack */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHA, sHA, sHA},
+/* sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS */
+/* init */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sIV},/* INIT in sCL Big TODO */
+/* init_ack */ {sIV, sCW, sCW, sCE, sES, sSS, sSR, sSA, sIV},
+/* abort */ {sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sIV},
+/* shutdown */ {sIV, sCL, sCW, sCE, sSR, sSS, sSR, sSA, sIV},
+/* shutdown_ack */ {sIV, sCL, sCW, sCE, sES, sSA, sSA, sSA, sIV},
+/* error */ {sIV, sCL, sCW, sCL, sES, sSS, sSR, sSA, sIV},
+/* cookie_echo */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sIV},/* Can't come in reply dir */
+/* cookie_ack */ {sIV, sCL, sCW, sES, sES, sSS, sSR, sSA, sIV},
+/* shutdown_comp*/ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sCL, sIV},
+/* heartbeat */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS},
+/* heartbeat_ack*/ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sES},
}
};
#define for_each_sctp_chunk(skb, sch, _sch, offset, dataoff, count) \
for ((offset) = (dataoff) + sizeof(struct sctphdr), (count) = 0; \
- (offset) < (skb)->len && \
- ((sch) = skb_header_pointer((skb), (offset), sizeof(_sch), &(_sch))); \
+ ((sch) = skb_header_pointer((skb), (offset), sizeof(_sch), &(_sch))) && \
+ (sch)->length; \
(offset) += (ntohs((sch)->length) + 3) & ~3, (count)++)
/* Some validity checks to make sure the chunks are fine */
case SCTP_CID_HEARTBEAT_ACK:
i = 10;
break;
- case SCTP_CID_DATA:
- case SCTP_CID_SACK:
- i = 11;
- break;
default:
/* Other chunks like DATA or SACK do not change the state */
pr_debug("Unknown chunk type %d, Will stay in %s\n",
ih->init_tag);
ct->proto.sctp.vtag[IP_CT_DIR_REPLY] = ih->init_tag;
- } else if (sch->type == SCTP_CID_HEARTBEAT ||
- sch->type == SCTP_CID_DATA ||
- sch->type == SCTP_CID_SACK) {
+ } else if (sch->type == SCTP_CID_HEARTBEAT) {
pr_debug("Setting vtag %x for secondary conntrack\n",
sh->vtag);
ct->proto.sctp.vtag[IP_CT_DIR_ORIGINAL] = sh->vtag;
if (!sctp_new(ct, skb, sh, dataoff))
return -NF_ACCEPT;
- } else {
- /* Check the verification tag (Sec 8.5) */
- if (!test_bit(SCTP_CID_INIT, map) &&
- !test_bit(SCTP_CID_SHUTDOWN_COMPLETE, map) &&
- !test_bit(SCTP_CID_COOKIE_ECHO, map) &&
- !test_bit(SCTP_CID_ABORT, map) &&
- !test_bit(SCTP_CID_SHUTDOWN_ACK, map) &&
- !test_bit(SCTP_CID_HEARTBEAT, map) &&
- !test_bit(SCTP_CID_HEARTBEAT_ACK, map) &&
- sh->vtag != ct->proto.sctp.vtag[dir]) {
- nf_ct_l4proto_log_invalid(skb, ct, state,
- "verification tag check failed %x vs %x for dir %d",
- sh->vtag, ct->proto.sctp.vtag[dir], dir);
- goto out;
- }
+ }
+
+ /* Check the verification tag (Sec 8.5) */
+ if (!test_bit(SCTP_CID_INIT, map) &&
+ !test_bit(SCTP_CID_SHUTDOWN_COMPLETE, map) &&
+ !test_bit(SCTP_CID_COOKIE_ECHO, map) &&
+ !test_bit(SCTP_CID_ABORT, map) &&
+ !test_bit(SCTP_CID_SHUTDOWN_ACK, map) &&
+ !test_bit(SCTP_CID_HEARTBEAT, map) &&
+ !test_bit(SCTP_CID_HEARTBEAT_ACK, map) &&
+ sh->vtag != ct->proto.sctp.vtag[dir]) {
+ nf_ct_l4proto_log_invalid(skb, ct, state,
+ "verification tag check failed %x vs %x for dir %d",
+ sh->vtag, ct->proto.sctp.vtag[dir], dir);
+ goto out;
}
old_state = new_state = SCTP_CONNTRACK_NONE;
for_each_sctp_chunk (skb, sch, _sch, offset, dataoff, count) {
/* Special cases of Verification tag check (Sec 8.5.1) */
if (sch->type == SCTP_CID_INIT) {
- /* Sec 8.5.1 (A) */
+ /* (A) vtag MUST be zero */
if (sh->vtag != 0)
goto out_unlock;
} else if (sch->type == SCTP_CID_ABORT) {
- /* Sec 8.5.1 (B) */
- if (sh->vtag != ct->proto.sctp.vtag[dir] &&
- sh->vtag != ct->proto.sctp.vtag[!dir])
+ /* (B) vtag MUST match own vtag if T flag is unset OR
+ * MUST match peer's vtag if T flag is set
+ */
+ if ((!(sch->flags & SCTP_CHUNK_FLAG_T) &&
+ sh->vtag != ct->proto.sctp.vtag[dir]) ||
+ ((sch->flags & SCTP_CHUNK_FLAG_T) &&
+ sh->vtag != ct->proto.sctp.vtag[!dir]))
goto out_unlock;
} else if (sch->type == SCTP_CID_SHUTDOWN_COMPLETE) {
- /* Sec 8.5.1 (C) */
- if (sh->vtag != ct->proto.sctp.vtag[dir] &&
- sh->vtag != ct->proto.sctp.vtag[!dir] &&
- sch->flags & SCTP_CHUNK_FLAG_T)
+ /* (C) vtag MUST match own vtag if T flag is unset OR
+ * MUST match peer's vtag if T flag is set
+ */
+ if ((!(sch->flags & SCTP_CHUNK_FLAG_T) &&
+ sh->vtag != ct->proto.sctp.vtag[dir]) ||
+ ((sch->flags & SCTP_CHUNK_FLAG_T) &&
+ sh->vtag != ct->proto.sctp.vtag[!dir]))
goto out_unlock;
} else if (sch->type == SCTP_CID_COOKIE_ECHO) {
- /* Sec 8.5.1 (D) */
+ /* (D) vtag must be same as init_vtag as found in INIT_ACK */
if (sh->vtag != ct->proto.sctp.vtag[dir])
goto out_unlock;
} else if (sch->type == SCTP_CID_HEARTBEAT) {
} else if (ct->proto.sctp.flags & SCTP_FLAG_HEARTBEAT_VTAG_FAILED) {
ct->proto.sctp.flags &= ~SCTP_FLAG_HEARTBEAT_VTAG_FAILED;
}
- } else if (sch->type == SCTP_CID_DATA || sch->type == SCTP_CID_SACK) {
- if (ct->proto.sctp.vtag[dir] == 0) {
- pr_debug("Setting vtag %x for dir %d\n", sh->vtag, dir);
- ct->proto.sctp.vtag[dir] = sh->vtag;
- }
}
old_state = ct->proto.sctp.state;
}
ct->proto.sctp.state = new_state;
- if (old_state != new_state)
+ if (old_state != new_state) {
nf_conntrack_event_cache(IPCT_PROTOINFO, ct);
+ if (new_state == SCTP_CONNTRACK_ESTABLISHED &&
+ !test_and_set_bit(IPS_ASSURED_BIT, &ct->status))
+ nf_conntrack_event_cache(IPCT_ASSURED, ct);
+ }
}
spin_unlock_bh(&ct->lock);
nf_ct_refresh_acct(ct, ctinfo, skb, timeouts[new_state]);
- if (old_state == SCTP_CONNTRACK_COOKIE_ECHOED &&
- dir == IP_CT_DIR_REPLY &&
- new_state == SCTP_CONNTRACK_ESTABLISHED) {
- set_bit(IPS_ASSURED_BIT, &ct->status);
- nf_conntrack_event_cache(IPCT_ASSURED, ct);
- }
-
return NF_ACCEPT;
out_unlock:
[CTA_TIMEOUT_SCTP_SHUTDOWN_ACK_SENT] = { .type = NLA_U32 },
[CTA_TIMEOUT_SCTP_HEARTBEAT_SENT] = { .type = NLA_U32 },
[CTA_TIMEOUT_SCTP_HEARTBEAT_ACKED] = { .type = NLA_U32 },
- [CTA_TIMEOUT_SCTP_DATA_SENT] = { .type = NLA_U32 },
};
#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_SHUTDOWN_RECD,
NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_SHUTDOWN_ACK_SENT,
NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_HEARTBEAT_SENT,
- NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_HEARTBEAT_ACKED,
- NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_DATA_SENT,
#endif
#ifdef CONFIG_NF_CT_PROTO_DCCP
NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_REQUEST,
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
- [NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_HEARTBEAT_ACKED] = {
- .procname = "nf_conntrack_sctp_timeout_heartbeat_acked",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- [NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_DATA_SENT] = {
- .procname = "nf_conntrack_sctp_timeout_data_sent",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
#endif
#ifdef CONFIG_NF_CT_PROTO_DCCP
[NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_REQUEST] = {
XASSIGN(SHUTDOWN_RECD, sn);
XASSIGN(SHUTDOWN_ACK_SENT, sn);
XASSIGN(HEARTBEAT_SENT, sn);
- XASSIGN(HEARTBEAT_ACKED, sn);
- XASSIGN(DATA_SENT, sn);
#undef XASSIGN
#endif
}
return !nft_rbtree_interval_end(rbe);
}
-static bool nft_rbtree_equal(const struct nft_set *set, const void *this,
- const struct nft_rbtree_elem *interval)
+static int nft_rbtree_cmp(const struct nft_set *set,
+ const struct nft_rbtree_elem *e1,
+ const struct nft_rbtree_elem *e2)
{
- return memcmp(this, nft_set_ext_key(&interval->ext), set->klen) == 0;
+ return memcmp(nft_set_ext_key(&e1->ext), nft_set_ext_key(&e2->ext),
+ set->klen);
}
static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
const struct nft_rbtree_elem *rbe, *interval = NULL;
u8 genmask = nft_genmask_cur(net);
const struct rb_node *parent;
- const void *this;
int d;
parent = rcu_dereference_raw(priv->root.rb_node);
rbe = rb_entry(parent, struct nft_rbtree_elem, node);
- this = nft_set_ext_key(&rbe->ext);
- d = memcmp(this, key, set->klen);
+ d = memcmp(nft_set_ext_key(&rbe->ext), key, set->klen);
if (d < 0) {
parent = rcu_dereference_raw(parent->rb_left);
if (interval &&
- nft_rbtree_equal(set, this, interval) &&
+ !nft_rbtree_cmp(set, rbe, interval) &&
nft_rbtree_interval_end(rbe) &&
nft_rbtree_interval_start(interval))
continue;
return rbe;
}
+static int nft_rbtree_gc_elem(const struct nft_set *__set,
+ struct nft_rbtree *priv,
+ struct nft_rbtree_elem *rbe)
+{
+ struct nft_set *set = (struct nft_set *)__set;
+ struct rb_node *prev = rb_prev(&rbe->node);
+ struct nft_rbtree_elem *rbe_prev;
+ struct nft_set_gc_batch *gcb;
+
+ gcb = nft_set_gc_batch_check(set, NULL, GFP_ATOMIC);
+ if (!gcb)
+ return -ENOMEM;
+
+ /* search for expired end interval coming before this element. */
+ do {
+ rbe_prev = rb_entry(prev, struct nft_rbtree_elem, node);
+ if (nft_rbtree_interval_end(rbe_prev))
+ break;
+
+ prev = rb_prev(prev);
+ } while (prev != NULL);
+
+ rb_erase(&rbe_prev->node, &priv->root);
+ rb_erase(&rbe->node, &priv->root);
+ atomic_sub(2, &set->nelems);
+
+ nft_set_gc_batch_add(gcb, rbe);
+ nft_set_gc_batch_complete(gcb);
+
+ return 0;
+}
+
+static bool nft_rbtree_update_first(const struct nft_set *set,
+ struct nft_rbtree_elem *rbe,
+ struct rb_node *first)
+{
+ struct nft_rbtree_elem *first_elem;
+
+ first_elem = rb_entry(first, struct nft_rbtree_elem, node);
+ /* this element is closest to where the new element is to be inserted:
+ * update the first element for the node list path.
+ */
+ if (nft_rbtree_cmp(set, rbe, first_elem) < 0)
+ return true;
+
+ return false;
+}
+
static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
struct nft_rbtree_elem *new,
struct nft_set_ext **ext)
{
- bool overlap = false, dup_end_left = false, dup_end_right = false;
+ struct nft_rbtree_elem *rbe, *rbe_le = NULL, *rbe_ge = NULL;
+ struct rb_node *node, *parent, **p, *first = NULL;
struct nft_rbtree *priv = nft_set_priv(set);
u8 genmask = nft_genmask_next(net);
- struct nft_rbtree_elem *rbe;
- struct rb_node *parent, **p;
- int d;
+ int d, err;
- /* Detect overlaps as we descend the tree. Set the flag in these cases:
- *
- * a1. _ _ __>| ?_ _ __| (insert end before existing end)
- * a2. _ _ ___| ?_ _ _>| (insert end after existing end)
- * a3. _ _ ___? >|_ _ __| (insert start before existing end)
- *
- * and clear it later on, as we eventually reach the points indicated by
- * '?' above, in the cases described below. We'll always meet these
- * later, locally, due to tree ordering, and overlaps for the intervals
- * that are the closest together are always evaluated last.
- *
- * b1. _ _ __>| !_ _ __| (insert end before existing start)
- * b2. _ _ ___| !_ _ _>| (insert end after existing start)
- * b3. _ _ ___! >|_ _ __| (insert start after existing end, as a leaf)
- * '--' no nodes falling in this range
- * b4. >|_ _ ! (insert start before existing start)
- *
- * Case a3. resolves to b3.:
- * - if the inserted start element is the leftmost, because the '0'
- * element in the tree serves as end element
- * - otherwise, if an existing end is found immediately to the left. If
- * there are existing nodes in between, we need to further descend the
- * tree before we can conclude the new start isn't causing an overlap
- *
- * or to b4., which, preceded by a3., means we already traversed one or
- * more existing intervals entirely, from the right.
- *
- * For a new, rightmost pair of elements, we'll hit cases b3. and b2.,
- * in that order.
- *
- * The flag is also cleared in two special cases:
- *
- * b5. |__ _ _!|<_ _ _ (insert start right before existing end)
- * b6. |__ _ >|!__ _ _ (insert end right after existing start)
- *
- * which always happen as last step and imply that no further
- * overlapping is possible.
- *
- * Another special case comes from the fact that start elements matching
- * an already existing start element are allowed: insertion is not
- * performed but we return -EEXIST in that case, and the error will be
- * cleared by the caller if NLM_F_EXCL is not present in the request.
- * This way, request for insertion of an exact overlap isn't reported as
- * error to userspace if not desired.
- *
- * However, if the existing start matches a pre-existing start, but the
- * end element doesn't match the corresponding pre-existing end element,
- * we need to report a partial overlap. This is a local condition that
- * can be noticed without need for a tracking flag, by checking for a
- * local duplicated end for a corresponding start, from left and right,
- * separately.
+ /* Descend the tree to search for an existing element greater than the
+ * key value to insert that is greater than the new element. This is the
+ * first element to walk the ordered elements to find possible overlap.
*/
-
parent = NULL;
p = &priv->root.rb_node;
while (*p != NULL) {
parent = *p;
rbe = rb_entry(parent, struct nft_rbtree_elem, node);
- d = memcmp(nft_set_ext_key(&rbe->ext),
- nft_set_ext_key(&new->ext),
- set->klen);
+ d = nft_rbtree_cmp(set, rbe, new);
+
if (d < 0) {
p = &parent->rb_left;
-
- if (nft_rbtree_interval_start(new)) {
- if (nft_rbtree_interval_end(rbe) &&
- nft_set_elem_active(&rbe->ext, genmask) &&
- !nft_set_elem_expired(&rbe->ext) && !*p)
- overlap = false;
- } else {
- if (dup_end_left && !*p)
- return -ENOTEMPTY;
-
- overlap = nft_rbtree_interval_end(rbe) &&
- nft_set_elem_active(&rbe->ext,
- genmask) &&
- !nft_set_elem_expired(&rbe->ext);
-
- if (overlap) {
- dup_end_right = true;
- continue;
- }
- }
} else if (d > 0) {
- p = &parent->rb_right;
+ if (!first ||
+ nft_rbtree_update_first(set, rbe, first))
+ first = &rbe->node;
- if (nft_rbtree_interval_end(new)) {
- if (dup_end_right && !*p)
- return -ENOTEMPTY;
-
- overlap = nft_rbtree_interval_end(rbe) &&
- nft_set_elem_active(&rbe->ext,
- genmask) &&
- !nft_set_elem_expired(&rbe->ext);
-
- if (overlap) {
- dup_end_left = true;
- continue;
- }
- } else if (nft_set_elem_active(&rbe->ext, genmask) &&
- !nft_set_elem_expired(&rbe->ext)) {
- overlap = nft_rbtree_interval_end(rbe);
- }
+ p = &parent->rb_right;
} else {
- if (nft_rbtree_interval_end(rbe) &&
- nft_rbtree_interval_start(new)) {
+ if (nft_rbtree_interval_end(rbe))
p = &parent->rb_left;
-
- if (nft_set_elem_active(&rbe->ext, genmask) &&
- !nft_set_elem_expired(&rbe->ext))
- overlap = false;
- } else if (nft_rbtree_interval_start(rbe) &&
- nft_rbtree_interval_end(new)) {
+ else
p = &parent->rb_right;
+ }
+ }
+
+ if (!first)
+ first = rb_first(&priv->root);
+
+ /* Detect overlap by going through the list of valid tree nodes.
+ * Values stored in the tree are in reversed order, starting from
+ * highest to lowest value.
+ */
+ for (node = first; node != NULL; node = rb_next(node)) {
+ rbe = rb_entry(node, struct nft_rbtree_elem, node);
- if (nft_set_elem_active(&rbe->ext, genmask) &&
- !nft_set_elem_expired(&rbe->ext))
- overlap = false;
- } else if (nft_set_elem_active(&rbe->ext, genmask) &&
- !nft_set_elem_expired(&rbe->ext)) {
- *ext = &rbe->ext;
- return -EEXIST;
- } else {
- overlap = false;
- if (nft_rbtree_interval_end(rbe))
- p = &parent->rb_left;
- else
- p = &parent->rb_right;
+ if (!nft_set_elem_active(&rbe->ext, genmask))
+ continue;
+
+ /* perform garbage collection to avoid bogus overlap reports. */
+ if (nft_set_elem_expired(&rbe->ext)) {
+ err = nft_rbtree_gc_elem(set, priv, rbe);
+ if (err < 0)
+ return err;
+
+ continue;
+ }
+
+ d = nft_rbtree_cmp(set, rbe, new);
+ if (d == 0) {
+ /* Matching end element: no need to look for an
+ * overlapping greater or equal element.
+ */
+ if (nft_rbtree_interval_end(rbe)) {
+ rbe_le = rbe;
+ break;
+ }
+
+ /* first element that is greater or equal to key value. */
+ if (!rbe_ge) {
+ rbe_ge = rbe;
+ continue;
+ }
+
+ /* this is a closer more or equal element, update it. */
+ if (nft_rbtree_cmp(set, rbe_ge, new) != 0) {
+ rbe_ge = rbe;
+ continue;
+ }
+
+ /* element is equal to key value, make sure flags are
+ * the same, an existing more or equal start element
+ * must not be replaced by more or equal end element.
+ */
+ if ((nft_rbtree_interval_start(new) &&
+ nft_rbtree_interval_start(rbe_ge)) ||
+ (nft_rbtree_interval_end(new) &&
+ nft_rbtree_interval_end(rbe_ge))) {
+ rbe_ge = rbe;
+ continue;
}
+ } else if (d > 0) {
+ /* annotate element greater than the new element. */
+ rbe_ge = rbe;
+ continue;
+ } else if (d < 0) {
+ /* annotate element less than the new element. */
+ rbe_le = rbe;
+ break;
}
+ }
- dup_end_left = dup_end_right = false;
+ /* - new start element matching existing start element: full overlap
+ * reported as -EEXIST, cleared by caller if NLM_F_EXCL is not given.
+ */
+ if (rbe_ge && !nft_rbtree_cmp(set, new, rbe_ge) &&
+ nft_rbtree_interval_start(rbe_ge) == nft_rbtree_interval_start(new)) {
+ *ext = &rbe_ge->ext;
+ return -EEXIST;
}
- if (overlap)
+ /* - new end element matching existing end element: full overlap
+ * reported as -EEXIST, cleared by caller if NLM_F_EXCL is not given.
+ */
+ if (rbe_le && !nft_rbtree_cmp(set, new, rbe_le) &&
+ nft_rbtree_interval_end(rbe_le) == nft_rbtree_interval_end(new)) {
+ *ext = &rbe_le->ext;
+ return -EEXIST;
+ }
+
+ /* - new start element with existing closest, less or equal key value
+ * being a start element: partial overlap, reported as -ENOTEMPTY.
+ * Anonymous sets allow for two consecutive start element since they
+ * are constant, skip them to avoid bogus overlap reports.
+ */
+ if (!nft_set_is_anonymous(set) && rbe_le &&
+ nft_rbtree_interval_start(rbe_le) && nft_rbtree_interval_start(new))
+ return -ENOTEMPTY;
+
+ /* - new end element with existing closest, less or equal key value
+ * being a end element: partial overlap, reported as -ENOTEMPTY.
+ */
+ if (rbe_le &&
+ nft_rbtree_interval_end(rbe_le) && nft_rbtree_interval_end(new))
return -ENOTEMPTY;
+ /* - new end element with existing closest, greater or equal key value
+ * being an end element: partial overlap, reported as -ENOTEMPTY
+ */
+ if (rbe_ge &&
+ nft_rbtree_interval_end(rbe_ge) && nft_rbtree_interval_end(new))
+ return -ENOTEMPTY;
+
+ /* Accepted element: pick insertion point depending on key value */
+ parent = NULL;
+ p = &priv->root.rb_node;
+ while (*p != NULL) {
+ parent = *p;
+ rbe = rb_entry(parent, struct nft_rbtree_elem, node);
+ d = nft_rbtree_cmp(set, rbe, new);
+
+ if (d < 0)
+ p = &parent->rb_left;
+ else if (d > 0)
+ p = &parent->rb_right;
+ else if (nft_rbtree_interval_end(rbe))
+ p = &parent->rb_left;
+ else
+ p = &parent->rb_right;
+ }
+
rb_link_node_rcu(&new->node, parent, p);
rb_insert_color(&new->node, &priv->root);
return 0;
struct nft_rbtree *priv;
struct rb_node *node;
struct nft_set *set;
+ struct net *net;
+ u8 genmask;
priv = container_of(work, struct nft_rbtree, gc_work.work);
set = nft_set_container_of(priv);
+ net = read_pnet(&set->net);
+ genmask = nft_genmask_cur(net);
write_lock_bh(&priv->lock);
write_seqcount_begin(&priv->count);
for (node = rb_first(&priv->root); node != NULL; node = rb_next(node)) {
rbe = rb_entry(node, struct nft_rbtree_elem, node);
+ if (!nft_set_elem_active(&rbe->ext, genmask))
+ continue;
+
+ /* elements are reversed in the rbtree for historical reasons,
+ * from highest to lowest value, that is why end element is
+ * always visited before the start element.
+ */
if (nft_rbtree_interval_end(rbe)) {
rbe_end = rbe;
continue;
}
if (!nft_set_elem_expired(&rbe->ext))
continue;
- if (nft_set_elem_mark_busy(&rbe->ext))
+
+ if (nft_set_elem_mark_busy(&rbe->ext)) {
+ rbe_end = NULL;
continue;
+ }
if (rbe_prev) {
rb_erase(&rbe_prev->node, &priv->root);
if (nlk_sk(sk)->bound)
goto err;
- nlk_sk(sk)->portid = portid;
+ /* portid can be read locklessly from netlink_getname(). */
+ WRITE_ONCE(nlk_sk(sk)->portid, portid);
+
sock_hold(sk);
err = __netlink_insert(table, sk);
return -EINVAL;
if (addr->sa_family == AF_UNSPEC) {
- sk->sk_state = NETLINK_UNCONNECTED;
- nlk->dst_portid = 0;
- nlk->dst_group = 0;
+ /* paired with READ_ONCE() in netlink_getsockbyportid() */
+ WRITE_ONCE(sk->sk_state, NETLINK_UNCONNECTED);
+ /* dst_portid and dst_group can be read locklessly */
+ WRITE_ONCE(nlk->dst_portid, 0);
+ WRITE_ONCE(nlk->dst_group, 0);
return 0;
}
if (addr->sa_family != AF_NETLINK)
err = netlink_autobind(sock);
if (err == 0) {
- sk->sk_state = NETLINK_CONNECTED;
- nlk->dst_portid = nladdr->nl_pid;
- nlk->dst_group = ffs(nladdr->nl_groups);
+ /* paired with READ_ONCE() in netlink_getsockbyportid() */
+ WRITE_ONCE(sk->sk_state, NETLINK_CONNECTED);
+ /* dst_portid and dst_group can be read locklessly */
+ WRITE_ONCE(nlk->dst_portid, nladdr->nl_pid);
+ WRITE_ONCE(nlk->dst_group, ffs(nladdr->nl_groups));
}
return err;
nladdr->nl_pad = 0;
if (peer) {
- nladdr->nl_pid = nlk->dst_portid;
- nladdr->nl_groups = netlink_group_mask(nlk->dst_group);
+ /* Paired with WRITE_ONCE() in netlink_connect() */
+ nladdr->nl_pid = READ_ONCE(nlk->dst_portid);
+ nladdr->nl_groups = netlink_group_mask(READ_ONCE(nlk->dst_group));
} else {
- nladdr->nl_pid = nlk->portid;
+ /* Paired with WRITE_ONCE() in netlink_insert() */
+ nladdr->nl_pid = READ_ONCE(nlk->portid);
netlink_lock_table();
nladdr->nl_groups = nlk->groups ? nlk->groups[0] : 0;
netlink_unlock_table();
/* Don't bother queuing skb if kernel socket has no input function */
nlk = nlk_sk(sock);
- if (sock->sk_state == NETLINK_CONNECTED &&
- nlk->dst_portid != nlk_sk(ssk)->portid) {
+ /* dst_portid and sk_state can be changed in netlink_connect() */
+ if (READ_ONCE(sock->sk_state) == NETLINK_CONNECTED &&
+ READ_ONCE(nlk->dst_portid) != nlk_sk(ssk)->portid) {
sock_put(sock);
return ERR_PTR(-ECONNREFUSED);
}
goto out;
netlink_skb_flags |= NETLINK_SKB_DST;
} else {
- dst_portid = nlk->dst_portid;
- dst_group = nlk->dst_group;
+ /* Paired with WRITE_ONCE() in netlink_connect() */
+ dst_portid = READ_ONCE(nlk->dst_portid);
+ dst_group = READ_ONCE(nlk->dst_group);
}
/* Paired with WRITE_ONCE() in netlink_insert() */
is accepted() it isn't 'dead' so doesn't get removed. */
if (sock_flag(sk, SOCK_DESTROY) ||
(sk->sk_state == TCP_LISTEN && sock_flag(sk, SOCK_DEAD))) {
+ sock_hold(sk);
bh_unlock_sock(sk);
nr_destroy_socket(sk);
goto out;
int i;
hrtimer_cancel(&q->advance_timer);
- qdisc_synchronize(sch);
if (q->qdiscs) {
for (i = 0; i < dev->num_tx_queues; i++)
}
}
+ /* If somehow no addresses were found that can be used with this
+ * scope, it's an error.
+ */
+ if (list_empty(&dest->address_list))
+ error = -ENETUNREACH;
+
out:
if (error)
sctp_bind_addr_clean(dest);
int rc = -EOPNOTSUPP;
lock_sock(sk);
+ if (sock->state != SS_UNCONNECTED) {
+ rc = -EINVAL;
+ release_sock(sk);
+ return rc;
+ }
+
if (sk->sk_state != TCP_LISTEN) {
memset(&x25_sk(sk)->dest_addr, 0, X25_ADDR_LEN);
sk->sk_max_ack_backlog = backlog;
macro_rules! print_macro (
// The non-continuation cases (most of them, e.g. `INFO`).
($format_string:path, false, $($arg:tt)+) => (
- // SAFETY: This hidden macro should only be called by the documented
- // printing macros which ensure the format string is one of the fixed
- // ones. All `__LOG_PREFIX`s are null-terminated as they are generated
- // by the `module!` proc macro or fixed values defined in a kernel
- // crate.
- unsafe {
- $crate::print::call_printk(
- &$format_string,
- crate::__LOG_PREFIX,
- format_args!($($arg)+),
- );
+ // To remain sound, `arg`s must be expanded outside the `unsafe` block.
+ // Typically one would use a `let` binding for that; however, `format_args!`
+ // takes borrows on the arguments, but does not extend the scope of temporaries.
+ // Therefore, a `match` expression is used to keep them around, since
+ // the scrutinee is kept until the end of the `match`.
+ match format_args!($($arg)+) {
+ // SAFETY: This hidden macro should only be called by the documented
+ // printing macros which ensure the format string is one of the fixed
+ // ones. All `__LOG_PREFIX`s are null-terminated as they are generated
+ // by the `module!` proc macro or fixed values defined in a kernel
+ // crate.
+ args => unsafe {
+ $crate::print::call_printk(
+ &$format_string,
+ crate::__LOG_PREFIX,
+ args,
+ );
+ }
}
);
# If the MAKEFLAGS variable contains multiple instances of the
# --jobserver-auth= option, the last one is relevant.
fds = opts[-1].split("=", 1)[1]
- reader, writer = [int(x) for x in fds.split(",", 1)]
- # Open a private copy of reader to avoid setting nonblocking
- # on an unexpecting process with the same reader fd.
- reader = os.open("/proc/self/fd/%d" % (reader),
- os.O_RDONLY | os.O_NONBLOCK)
+
+ # Starting with GNU Make 4.4, named pipes are used for reader and writer.
+ # Example argument: --jobserver-auth=fifo:/tmp/GMfifo8134
+ _, _, path = fds.partition('fifo:')
+
+ if path:
+ reader = os.open(path, os.O_RDONLY | os.O_NONBLOCK)
+ writer = os.open(path, os.O_WRONLY)
+ else:
+ reader, writer = [int(x) for x in fds.split(",", 1)]
+ # Open a private copy of reader to avoid setting nonblocking
+ # on an unexpecting process with the same reader fd.
+ reader = os.open("/proc/self/fd/%d" % (reader),
+ os.O_RDONLY | os.O_NONBLOCK)
# Read out as many jobserver slots as possible.
while True:
# SPDX-License-Identifier: GPL-2.0-only
/conf
/[gmnq]conf
+/[gmnq]conf-bin
/[gmnq]conf-cflags
/[gmnq]conf-libs
-/qconf-bin
/qconf-moc.cc
$(obj)/gconf.o: | $(obj)/gconf-cflags
# check if necessary packages are available, and configure build flags
-cmd_conf_cfg = $< $(addprefix $(obj)/$*conf-, cflags libs bin)
+cmd_conf_cfg = $< $(addprefix $(obj)/$*conf-, cflags libs bin); touch $(obj)/$*conf-bin
$(obj)/%conf-cflags $(obj)/%conf-libs $(obj)/%conf-bin: $(src)/%conf-cfg.sh
$(call cmd,conf_cfg)
#!/bin/sh
#
# Output a simple RPM spec file.
-# This version assumes a minimum of RPM 4.0.3.
+# This version assumes a minimum of RPM 4.13
#
# The only gothic bit here is redefining install_post to avoid
# stripping the symbols from files in the kernel which we want
while ((opt = getopt(argc, argv, "hp:t:r")) != -1) {
switch (opt) {
case 'p':
- reclaim_period_ms = atoi_non_negative("Reclaim period", optarg);
+ reclaim_period_ms = atoi_positive("Reclaim period", optarg);
break;
case 't':
token = atoi_paranoid(optarg);
int main(int argc, char *argv[])
{
struct timespec min_ts, max_ts, vm_ts;
+ struct kvm_xen_hvm_attr evt_reset;
struct kvm_vm *vm;
pthread_t thread;
bool verbose;
}
done:
- struct kvm_xen_hvm_attr evt_reset = {
- .type = KVM_XEN_ATTR_TYPE_EVTCHN,
- .u.evtchn.flags = KVM_XEN_EVTCHN_RESET,
- };
+ evt_reset.type = KVM_XEN_ATTR_TYPE_EVTCHN;
+ evt_reset.u.evtchn.flags = KVM_XEN_EVTCHN_RESET;
vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &evt_reset);
alarm(0);
ifeq ($(CROSS_COMPILE),)
ifeq ($(CLANG_TARGET_FLAGS),)
-$(error Specify CROSS_COMPILE or add '--target=' option to lib.mk
+$(error Specify CROSS_COMPILE or add '--target=' option to lib.mk)
else
CLANG_FLAGS += --target=$(CLANG_TARGET_FLAGS)
endif # CLANG_TARGET_FLAGS
return -ENXIO;
}
-static void kvm_vfio_destroy(struct kvm_device *dev)
+static void kvm_vfio_release(struct kvm_device *dev)
{
struct kvm_vfio *kv = dev->private;
struct kvm_vfio_group *kvg, *tmp;
kvm_vfio_update_coherency(dev);
kfree(kv);
- kfree(dev); /* alloc by kvm_ioctl_create_device, free by .destroy */
+ kfree(dev); /* alloc by kvm_ioctl_create_device, free by .release */
}
static int kvm_vfio_create(struct kvm_device *dev, u32 type);
static struct kvm_device_ops kvm_vfio_ops = {
.name = "kvm-vfio",
.create = kvm_vfio_create,
- .destroy = kvm_vfio_destroy,
+ .release = kvm_vfio_release,
.set_attr = kvm_vfio_set_attr,
.has_attr = kvm_vfio_has_attr,
};