Merge branch 'x86-rep-insns': x86 user copy clarifications
authorLinus Torvalds <torvalds@linux-foundation.org>
Mon, 24 Apr 2023 17:39:27 +0000 (10:39 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Mon, 24 Apr 2023 17:39:27 +0000 (10:39 -0700)
Merge my x86 user copy updates branch.

This cleans up a lot of our x86 memory copy code, particularly for user
accesses.  I've been pushing for microarchitectural support for good
memory copying and clearing for a long while, and it's been visible in
how the kernel has aggressively used 'rep movs' and 'rep stos' whenever
possible.

And that micro-architectural support has been improving over the years,
to the point where on modern CPU's the best option for a memory copy
that would become a function call (as opposed to being something that
can just be turned into individual 'mov' instructions) is now to inline
the string instruction sequence instead.

However, that only makes sense when we have the modern markers for this:
the x86 FSRM and FSRS capabilities ("Fast Short REP MOVS/STOS").

So this cleans up a lot of our historical code, gets rid of the legacy
marker use ("REP_GOOD" and "ERMS") from the memcpy/memset cases, and
replaces it with that modern reality.  Note that REP_GOOD and ERMS end
up still being used by the known large cases (ie page copyin gand
clearing).

The reason much of this ends up being about user memory accesses is that
the normal in-kernel cases are done by the compiler (__builtin_memcpy()
and __builtin_memset()) and getting to the point where we can use our
instruction rewriting to inline those to be string instructions will
need some compiler support.

In contrast, the user accessor functions are all entirely controlled by
the kernel code, so we can change those arbitrarily.

Thanks to Borislav Petkov for feedback on the series, and Jens testing
some of this on micro-architectures I didn't personally have access to.

* x86-rep-insns:
  x86: rewrite '__copy_user_nocache' function
  x86: remove 'zerorest' argument from __copy_user_nocache()
  x86: set FSRS automatically on AMD CPUs that have FSRM
  x86: improve on the non-rep 'copy_user' function
  x86: improve on the non-rep 'clear_user' function
  x86: inline the 'rep movs' in user copies for the FSRM case
  x86: move stac/clac from user copy routines into callers
  x86: don't use REP_GOOD or ERMS for user memory clearing
  x86: don't use REP_GOOD or ERMS for user memory copies
  x86: don't use REP_GOOD or ERMS for small memory clearing
  x86: don't use REP_GOOD or ERMS for small memory copies

197 files changed:
.mailmap
Documentation/admin-guide/kernel-parameters.rst
Documentation/admin-guide/kernel-parameters.txt
Documentation/networking/devlink/ice.rst
Documentation/rust/arch-support.rst
MAINTAINERS
Makefile
arch/arm/boot/compressed/Makefile
arch/arm/boot/dts/imx6ull-colibri.dtsi
arch/arm/boot/dts/imx7d-remarkable2.dts
arch/arm/boot/dts/rk3288.dtsi
arch/arm/configs/imx_v6_v7_defconfig
arch/arm/include/asm/assembler.h
arch/arm/vfp/entry.S
arch/arm/vfp/vfphw.S
arch/arm/vfp/vfpmodule.c
arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
arch/arm64/boot/dts/freescale/imx8mm-evk.dtsi
arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi
arch/arm64/boot/dts/freescale/imx8mp-verdin-dev.dtsi
arch/arm64/boot/dts/freescale/imx8mp-verdin.dtsi
arch/arm64/boot/dts/freescale/imx8mp.dtsi
arch/arm64/boot/dts/qcom/ipq8074-hk01.dts
arch/arm64/boot/dts/qcom/ipq8074-hk10.dtsi
arch/arm64/boot/dts/qcom/qrb5165-rb5.dts
arch/arm64/boot/dts/qcom/sc7280-herobrine.dtsi
arch/arm64/boot/dts/qcom/sc8280xp-pmics.dtsi
arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts
arch/arm64/boot/dts/qcom/sdm850-samsung-w737.dts
arch/arm64/boot/dts/qcom/sm8250-mtp.dts
arch/arm64/boot/dts/rockchip/rk3326-anbernic-rg351m.dts
arch/arm64/boot/dts/rockchip/rk3326-odroid-go.dtsi
arch/arm64/boot/dts/rockchip/rk3326-odroid-go2-v11.dts
arch/arm64/boot/dts/rockchip/rk3326-odroid-go2.dts
arch/arm64/boot/dts/rockchip/rk3368-evb.dtsi
arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi
arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet.dtsi
arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts
arch/arm64/boot/dts/rockchip/rk3399-rockpro64.dtsi
arch/arm64/boot/dts/rockchip/rk3399.dtsi
arch/arm64/boot/dts/rockchip/rk3566-anbernic-rg353x.dtsi
arch/arm64/boot/dts/rockchip/rk3566-anbernic-rg503.dts
arch/arm64/boot/dts/rockchip/rk3566-soquartz.dtsi
arch/arm64/boot/dts/rockchip/rk3588s.dtsi
arch/arm64/include/asm/kvm_host.h
arch/arm64/kvm/hypercalls.c
arch/loongarch/Kconfig
arch/loongarch/include/asm/acpi.h
arch/loongarch/include/asm/addrspace.h
arch/loongarch/include/asm/bootinfo.h
arch/loongarch/include/asm/cpu-features.h
arch/loongarch/include/asm/cpu.h
arch/loongarch/include/asm/io.h
arch/loongarch/include/asm/loongarch.h
arch/loongarch/include/asm/module.lds.h
arch/loongarch/include/uapi/asm/ptrace.h
arch/loongarch/kernel/cpu-probe.c
arch/loongarch/kernel/proc.c
arch/loongarch/kernel/ptrace.c
arch/loongarch/kernel/setup.c
arch/loongarch/kernel/stacktrace.c
arch/loongarch/kernel/unwind.c
arch/loongarch/kernel/unwind_prologue.c
arch/loongarch/mm/init.c
arch/loongarch/power/suspend_asm.S
arch/mips/kernel/vmlinux.lds.S
arch/riscv/boot/dts/canaan/k210.dtsi
arch/s390/net/bpf_jit_comp.c
arch/x86/include/asm/alternative.h
block/blk-map.c
block/blk-mq.c
drivers/acpi/acpica/evevent.c
drivers/acpi/acpica/hwsleep.c
drivers/acpi/acpica/utglobal.c
drivers/firmware/psci/psci.c
drivers/fpga/dfl-pci.c
drivers/fpga/fpga-bridge.c
drivers/fpga/intel-m10-bmc-sec-update.c
drivers/fpga/xilinx-pr-decoupler.c
drivers/gpio/gpio-104-dio-48e.c
drivers/gpio/gpio-104-idi-48.c
drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
drivers/gpu/drm/amd/display/modules/power/power_helpers.c
drivers/gpu/drm/i915/display/intel_dp_aux.c
drivers/gpu/drm/nouveau/nouveau_gem.c
drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
drivers/gpu/drm/scheduler/sched_main.c
drivers/iio/adc/at91-sama5d2_adc.c
drivers/iio/dac/ad5755.c
drivers/iio/light/tsl2772.c
drivers/infiniband/hw/hfi1/file_ops.c
drivers/infiniband/hw/qib/qib_file_ops.c
drivers/input/tablet/pegasus_notetaker.c
drivers/input/touchscreen/cyttsp5.c
drivers/memstick/core/memstick.c
drivers/mmc/host/sdhci_am654.c
drivers/net/bonding/bond_main.c
drivers/net/dsa/microchip/ksz8795.c
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
drivers/net/ethernet/intel/e1000e/netdev.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/mellanox/mlx5/core/dev.c
drivers/net/ethernet/mellanox/mlx5/core/ecpf.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv_multi.c
drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
drivers/net/ethernet/sfc/efx.c
drivers/net/ethernet/sfc/efx_common.c
drivers/net/hamradio/Kconfig
drivers/net/tun.c
drivers/net/veth.c
drivers/net/virtio_net.c
drivers/net/vmxnet3/vmxnet3_drv.c
drivers/net/wireless/ath/ath9k/mci.c
drivers/pci/msi/msi.c
drivers/pci/of.c
drivers/pci/pci.h
drivers/pci/probe.c
drivers/perf/amlogic/meson_g12_ddr_pmu.c
drivers/regulator/fan53555.c
drivers/regulator/sm5703-regulator.c
drivers/spi/spi-rockchip-sfc.c
drivers/tee/optee/call.c
drivers/tee/tee_shm.c
drivers/vhost/scsi.c
fs/btrfs/discard.c
fs/btrfs/file.c
fs/cifs/cifs_dfs_ref.c
fs/cifs/dfs.h
fs/cifs/file.c
fs/cifs/smb2pdu.c
fs/fs-writeback.c
fs/fuse/file.c
fs/nilfs2/segment.c
fs/read_write.c
fs/userfaultfd.c
include/acpi/actypes.h
include/linux/kmsan.h
include/linux/mlx5/driver.h
include/linux/skbuff.h
include/linux/uio.h
include/net/netfilter/nf_tables.h
init/Kconfig
io_uring/net.c
io_uring/rw.c
kernel/bpf/verifier.c
kernel/fork.c
kernel/sys.c
lib/iov_iter.c
lib/maple_tree.c
mm/backing-dev.c
mm/huge_memory.c
mm/khugepaged.c
mm/kmsan/hooks.c
mm/kmsan/shadow.c
mm/madvise.c
mm/mempolicy.c
mm/mmap.c
mm/mprotect.c
mm/page_alloc.c
mm/swap.c
mm/vmalloc.c
net/bridge/br_netfilter_hooks.c
net/bridge/br_switchdev.c
net/ipv6/rpl.c
net/mptcp/protocol.c
net/mptcp/protocol.h
net/mptcp/subflow.c
net/netfilter/nf_tables_api.c
net/netfilter/nft_lookup.c
net/sched/cls_api.c
net/sched/sch_qfq.c
net/sunrpc/auth_gss/gss_krb5_test.c
rust/Makefile
rust/kernel/print.rs
rust/kernel/str.rs
scripts/Makefile.package
scripts/asn1_compiler.c
scripts/cc-version.sh
scripts/generate_rust_analyzer.py
scripts/is_rust_module.sh
scripts/package/mkdebian
sound/core/pcm_native.c
sound/pci/hda/patch_realtek.c
sound/soc/codecs/max98373.c
sound/soc/fsl/fsl_asrc_dma.c
sound/soc/fsl/fsl_sai.c
sound/soc/sof/ipc4-topology.c
sound/soc/sof/pm.c
tools/Makefile
tools/arch/loongarch/include/uapi/asm/bitsperlong.h
tools/mm/page_owner_sort.c

index e424863..6686879 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -232,6 +232,8 @@ Johan Hovold <johan@kernel.org> <johan@hovoldconsulting.com>
 John Crispin <john@phrozen.org> <blogic@openwrt.org>
 John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de>
 John Stultz <johnstul@us.ibm.com>
+<jon.toppins+linux@gmail.com> <jtoppins@cumulusnetworks.com>
+<jon.toppins+linux@gmail.com> <jtoppins@redhat.com>
 Jordan Crouse <jordan@cosmicpenguin.net> <jcrouse@codeaurora.org>
 <josh@joshtriplett.org> <josh@freedesktop.org>
 <josh@joshtriplett.org> <josh@kernel.org>
@@ -297,6 +299,8 @@ Martin Kepplinger <martink@posteo.de> <martin.kepplinger@puri.sm>
 Martin Kepplinger <martink@posteo.de> <martin.kepplinger@theobroma-systems.com>
 Martyna Szapar-Mudlaw <martyna.szapar-mudlaw@linux.intel.com> <martyna.szapar-mudlaw@intel.com>
 Mathieu Othacehe <m.othacehe@gmail.com>
+Mat Martineau <martineau@kernel.org> <mathew.j.martineau@linux.intel.com>
+Mat Martineau <martineau@kernel.org> <mathewm@codeaurora.org>
 Matthew Wilcox <willy@infradead.org> <matthew.r.wilcox@intel.com>
 Matthew Wilcox <willy@infradead.org> <matthew@wil.cx>
 Matthew Wilcox <willy@infradead.org> <mawilcox@linuxonhyperv.com>
index 19600c5..6ae5f12 100644 (file)
@@ -128,6 +128,7 @@ parameter is applicable::
        KVM     Kernel Virtual Machine support is enabled.
        LIBATA  Libata driver is enabled
        LP      Printer support is enabled.
+       LOONGARCH LoongArch architecture is enabled.
        LOOP    Loopback device support is enabled.
        M68k    M68k architecture is enabled.
                        These options have more detailed description inside of
index 6221a1d..7016cb1 100644 (file)
                        When enabled, memory and cache locality will be
                        impacted.
 
+       writecombine=   [LOONGARCH] Control the MAT (Memory Access Type) of
+                       ioremap_wc().
+
+                       on   - Enable writecombine, use WUC for ioremap_wc()
+                       off  - Disable writecombine, use SUC for ioremap_wc()
+
        x2apic_phys     [X86-64,APIC] Use x2apic physical mode instead of
                        default x2apic cluster mode on platforms
                        supporting x2apic.
index 10f282c..2f60e34 100644 (file)
@@ -7,6 +7,21 @@ ice devlink support
 This document describes the devlink features implemented by the ``ice``
 device driver.
 
+Parameters
+==========
+
+.. list-table:: Generic parameters implemented
+
+   * - Name
+     - Mode
+     - Notes
+   * - ``enable_roce``
+     - runtime
+     - mutually exclusive with ``enable_iwarp``
+   * - ``enable_iwarp``
+     - runtime
+     - mutually exclusive with ``enable_roce``
+
 Info versions
 =============
 
index ed7f4f5..b91e9ef 100644 (file)
@@ -15,7 +15,7 @@ support corresponds to ``S`` values in the ``MAINTAINERS`` file.
 ============  ================  ==============================================
 Architecture  Level of support  Constraints
 ============  ================  ==============================================
-``x86``       Maintained        ``x86_64`` only.
 ``um``        Maintained        ``x86_64`` only.
+``x86``       Maintained        ``x86_64`` only.
 ============  ================  ==============================================
 
index 0e64787..c6545eb 100644 (file)
@@ -14594,6 +14594,7 @@ F:      net/netlabel/
 
 NETWORKING [MPTCP]
 M:     Matthieu Baerts <matthieu.baerts@tessares.net>
+M:     Mat Martineau <martineau@kernel.org>
 L:     netdev@vger.kernel.org
 L:     mptcp@lists.linux.dev
 S:     Maintained
index b5c48e3..f5543ee 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 6
 PATCHLEVEL = 3
 SUBLEVEL = 0
-EXTRAVERSION = -rc7
+EXTRAVERSION =
 NAME = Hurr durr I'ma ninja sloth
 
 # *DOCUMENTATION*
index 2ef651a..726ecab 100644 (file)
@@ -107,7 +107,7 @@ ccflags-remove-$(CONFIG_FUNCTION_TRACER) += -pg
 asflags-y := -DZIMAGE
 
 # Supply kernel BSS size to the decompressor via a linker symbol.
-KBSS_SZ = $(shell echo $$(($$($(NM) $(obj)/../../../../vmlinux | \
+KBSS_SZ = $(shell echo $$(($$($(NM) vmlinux | \
                sed -n -e 's/^\([^ ]*\) [ABD] __bss_start$$/-0x\1/p' \
                       -e 's/^\([^ ]*\) [ABD] __bss_stop$$/+0x\1/p') )) )
 LDFLAGS_vmlinux = --defsym _kernel_bss_size=$(KBSS_SZ)
index bf64ba8..fde8a19 100644 (file)
                self-powered;
                type = "micro";
 
-               ports {
-                       #address-cells = <1>;
-                       #size-cells = <0>;
-
-                       port@0 {
-                               reg = <0>;
-                               usb_dr_connector: endpoint {
-                                       remote-endpoint = <&usb1_drd_sw>;
-                               };
+               port {
+                       usb_dr_connector: endpoint {
+                               remote-endpoint = <&usb1_drd_sw>;
                        };
                };
        };
index 8b2f11e..427f8d0 100644 (file)
                reg = <0x62>;
                pinctrl-names = "default";
                pinctrl-0 = <&pinctrl_epdpmic>;
-               #address-cells = <1>;
-               #size-cells = <0>;
                #thermal-sensor-cells = <0>;
                epd-pwr-good-gpios = <&gpio6 21 GPIO_ACTIVE_HIGH>;
 
index 2ca76b6..511ca86 100644 (file)
                status = "disabled";
        };
 
-       spdif: sound@ff88b0000 {
+       spdif: sound@ff8b0000 {
                compatible = "rockchip,rk3288-spdif", "rockchip,rk3066-spdif";
                reg = <0x0 0xff8b0000 0x0 0x10000>;
                #sound-dai-cells = <0>;
index 6dc6fed..8d002c6 100644 (file)
@@ -76,7 +76,7 @@ CONFIG_RFKILL=y
 CONFIG_RFKILL_INPUT=y
 CONFIG_PCI=y
 CONFIG_PCI_MSI=y
-CONFIG_PCI_IMX6=y
+CONFIG_PCI_IMX6_HOST=y
 CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
 # CONFIG_STANDALONE is not set
index 06b48ce..505a306 100644 (file)
@@ -244,19 +244,6 @@ THUMB(     fpreg   .req    r7      )
        .endm
 #endif
 
-       .macro  local_bh_disable, ti, tmp
-       ldr     \tmp, [\ti, #TI_PREEMPT]
-       add     \tmp, \tmp, #SOFTIRQ_DISABLE_OFFSET
-       str     \tmp, [\ti, #TI_PREEMPT]
-       .endm
-
-       .macro  local_bh_enable_ti, ti, tmp
-       get_thread_info \ti
-       ldr     \tmp, [\ti, #TI_PREEMPT]
-       sub     \tmp, \tmp, #SOFTIRQ_DISABLE_OFFSET
-       str     \tmp, [\ti, #TI_PREEMPT]
-       .endm
-
 #define USERL(l, x...)                         \
 9999:  x;                                      \
        .pushsection __ex_table,"a";            \
index 9a89264..7483ef8 100644 (file)
 @  IRQs enabled.
 @
 ENTRY(do_vfp)
-       local_bh_disable r10, r4
-       ldr     r4, .LCvfp
-       ldr     r11, [r10, #TI_CPU]     @ CPU number
-       add     r10, r10, #TI_VFPSTATE  @ r10 = workspace
-       ldr     pc, [r4]                @ call VFP entry point
+       mov     r1, r10
+       mov     r3, r9
+       b       vfp_entry
 ENDPROC(do_vfp)
-
-ENTRY(vfp_null_entry)
-       local_bh_enable_ti r10, r4
-       ret     lr
-ENDPROC(vfp_null_entry)
-
-       .align  2
-.LCvfp:
-       .word   vfp_vector
index 26c4f61..4d84782 100644 (file)
@@ -6,9 +6,9 @@
  *  Written by Deep Blue Solutions Limited.
  *
  * This code is called from the kernel's undefined instruction trap.
- * r9 holds the return address for successful handling.
+ * r1 holds the thread_info pointer
+ * r3 holds the return address for successful handling.
  * lr holds the return address for unrecognised instructions.
- * r10 points at the start of the private FP workspace in the thread structure
  * sp points to a struct pt_regs (as defined in include/asm/proc/ptrace.h)
  */
 #include <linux/init.h>
 @ VFP hardware support entry point.
 @
 @  r0  = instruction opcode (32-bit ARM or two 16-bit Thumb)
+@  r1  = thread_info pointer
 @  r2  = PC value to resume execution after successful emulation
-@  r9  = normal "successful" return address
-@  r10 = vfp_state union
-@  r11 = CPU number
+@  r3  = normal "successful" return address
 @  lr  = unrecognised instruction return address
 @  IRQs enabled.
 ENTRY(vfp_support_entry)
+       ldr     r11, [r1, #TI_CPU]      @ CPU number
+       add     r10, r1, #TI_VFPSTATE   @ r10 = workspace
+
        DBGSTR3 "instr %08x pc %08x state %p", r0, r2, r10
 
        .fpu    vfpv2
@@ -85,9 +87,9 @@ ENTRY(vfp_support_entry)
        bne     look_for_VFP_exceptions @ VFP is already enabled
 
        DBGSTR1 "enable %x", r10
-       ldr     r3, vfp_current_hw_state_address
+       ldr     r9, vfp_current_hw_state_address
        orr     r1, r1, #FPEXC_EN       @ user FPEXC has the enable bit set
-       ldr     r4, [r3, r11, lsl #2]   @ vfp_current_hw_state pointer
+       ldr     r4, [r9, r11, lsl #2]   @ vfp_current_hw_state pointer
        bic     r5, r1, #FPEXC_EX       @ make sure exceptions are disabled
        cmp     r4, r10                 @ this thread owns the hw context?
 #ifndef CONFIG_SMP
@@ -146,7 +148,7 @@ vfp_reload_hw:
 #endif
 
        DBGSTR1 "load state %p", r10
-       str     r10, [r3, r11, lsl #2]  @ update the vfp_current_hw_state pointer
+       str     r10, [r9, r11, lsl #2]  @ update the vfp_current_hw_state pointer
                                        @ Load the saved state back into the VFP
        VFPFLDMIA r10, r5               @ reload the working registers while
                                        @ FPEXC is in a safe state
@@ -175,9 +177,12 @@ vfp_hw_state_valid:
                                        @ else it's one 32-bit instruction, so
                                        @ always subtract 4 from the following
                                        @ instruction address.
-       local_bh_enable_ti r10, r4
-       ret     r9                      @ we think we have handled things
 
+       mov     lr, r3                  @ we think we have handled things
+local_bh_enable_and_ret:
+       adr     r0, .
+       mov     r1, #SOFTIRQ_DISABLE_OFFSET
+       b       __local_bh_enable_ip    @ tail call
 
 look_for_VFP_exceptions:
        @ Check for synchronous or asynchronous exception
@@ -200,13 +205,12 @@ skip:
        @ not recognised by VFP
 
        DBGSTR  "not VFP"
-       local_bh_enable_ti r10, r4
-       ret     lr
+       b       local_bh_enable_and_ret
 
 process_exception:
        DBGSTR  "bounce"
        mov     r2, sp                  @ nothing stacked - regdump is at TOS
-       mov     lr, r9                  @ setup for a return to the user code.
+       mov     lr, r3                  @ setup for a return to the user code.
 
        @ Now call the C code to package up the bounce to the support code
        @   r0 holds the trigger instruction
index 01bc48d..349dcb9 100644 (file)
 /*
  * Our undef handlers (in entry.S)
  */
-asmlinkage void vfp_support_entry(void);
-asmlinkage void vfp_null_entry(void);
+asmlinkage void vfp_support_entry(u32, void *, u32, u32);
 
-asmlinkage void (*vfp_vector)(void) = vfp_null_entry;
+static bool have_vfp __ro_after_init;
 
 /*
  * Dual-use variable.
@@ -645,6 +644,25 @@ static int vfp_starting_cpu(unsigned int unused)
        return 0;
 }
 
+/*
+ * Entered with:
+ *
+ *  r0  = instruction opcode (32-bit ARM or two 16-bit Thumb)
+ *  r1  = thread_info pointer
+ *  r2  = PC value to resume execution after successful emulation
+ *  r3  = normal "successful" return address
+ *  lr  = unrecognised instruction return address
+ */
+asmlinkage void vfp_entry(u32 trigger, struct thread_info *ti, u32 resume_pc,
+                         u32 resume_return_address)
+{
+       if (unlikely(!have_vfp))
+               return;
+
+       local_bh_disable();
+       vfp_support_entry(trigger, ti, resume_pc, resume_return_address);
+}
+
 #ifdef CONFIG_KERNEL_MODE_NEON
 
 static int vfp_kmode_exception(struct pt_regs *regs, unsigned int instr)
@@ -798,7 +816,6 @@ static int __init vfp_init(void)
        vfpsid = fmrx(FPSID);
        barrier();
        unregister_undef_hook(&vfp_detect_hook);
-       vfp_vector = vfp_null_entry;
 
        pr_info("VFP support v0.3: ");
        if (VFP_arch) {
@@ -883,7 +900,7 @@ static int __init vfp_init(void)
                                  "arm/vfp:starting", vfp_starting_cpu,
                                  vfp_dying_cpu);
 
-       vfp_vector = vfp_support_entry;
+       have_vfp = true;
 
        thread_register_notifier(&vfp_notifier_block);
        vfp_pm_init();
index 123a56f..feb27a0 100644 (file)
 
                        dmc: bus@38000 {
                                compatible = "simple-bus";
-                               reg = <0x0 0x38000 0x0 0x400>;
                                #address-cells = <2>;
                                #size-cells = <2>;
-                               ranges = <0x0 0x0 0x0 0x38000 0x0 0x400>;
+                               ranges = <0x0 0x0 0x0 0x38000 0x0 0x2000>;
 
                                canvas: video-lut@48 {
                                        compatible = "amlogic,canvas";
                                        reg = <0x0 0x48 0x0 0x14>;
                                };
+
+                               pmu: pmu@80 {
+                                       reg = <0x0 0x80 0x0 0x40>,
+                                             <0x0 0xc00 0x0 0x40>;
+                                       interrupts = <GIC_SPI 52 IRQ_TYPE_EDGE_RISING>;
+                               };
                        };
 
                        usb2_phy1: phy@3a000 {
                        };
                };
 
-               pmu: pmu@ff638000 {
-                       reg = <0x0 0xff638000 0x0 0x100>,
-                             <0x0 0xff638c00 0x0 0x100>;
-                       interrupts = <GIC_SPI 52 IRQ_TYPE_EDGE_RISING>;
-               };
-
                aobus: bus@ff800000 {
                        compatible = "simple-bus";
                        reg = <0x0 0xff800000 0x0 0x100000>;
index d1a6390..3f9dfd4 100644 (file)
                rohm,reset-snvs-powered;
 
                #clock-cells = <0>;
-               clocks = <&osc_32k 0>;
+               clocks = <&osc_32k>;
                clock-output-names = "clk-32k-out";
 
                regulators {
index 88321b5..6f08115 100644 (file)
@@ -99,7 +99,7 @@
                compatible = "regulator-fixed";
                enable-active-high;
                gpio = <&gpio2 20 GPIO_ACTIVE_HIGH>; /* PMIC_EN_ETH */
-               off-on-delay = <500000>;
+               off-on-delay-us = <500000>;
                pinctrl-names = "default";
                pinctrl-0 = <&pinctrl_reg_eth>;
                regulator-always-on;
                enable-active-high;
                /* Verdin SD_1_PWR_EN (SODIMM 76) */
                gpio = <&gpio3 5 GPIO_ACTIVE_HIGH>;
-               off-on-delay = <100000>;
+               off-on-delay-us = <100000>;
                pinctrl-names = "default";
                pinctrl-0 = <&pinctrl_usdhc2_pwr_en>;
                regulator-max-microvolt = <3300000>;
index 361426c..c296225 100644 (file)
@@ -10,7 +10,7 @@
                compatible = "regulator-fixed";
                enable-active-high;
                gpio = <&gpio_expander_21 4 GPIO_ACTIVE_HIGH>; /* ETH_PWR_EN */
-               off-on-delay = <500000>;
+               off-on-delay-us = <500000>;
                regulator-max-microvolt = <3300000>;
                regulator-min-microvolt = <3300000>;
                regulator-name = "+V3.3_ETH";
index 0dd6180..1608775 100644 (file)
@@ -87,7 +87,7 @@
                compatible = "regulator-fixed";
                enable-active-high;
                gpio = <&gpio2 20 GPIO_ACTIVE_HIGH>; /* PMIC_EN_ETH */
-               off-on-delay = <500000>;
+               off-on-delay-us = <500000>;
                pinctrl-names = "default";
                pinctrl-0 = <&pinctrl_reg_eth>;
                regulator-always-on;
                enable-active-high;
                /* Verdin SD_1_PWR_EN (SODIMM 76) */
                gpio = <&gpio4 22 GPIO_ACTIVE_HIGH>;
-               off-on-delay = <100000>;
+               off-on-delay-us = <100000>;
                pinctrl-names = "default";
                pinctrl-0 = <&pinctrl_usdhc2_pwr_en>;
                regulator-max-microvolt = <3300000>;
index 2dd60e3..a237275 100644 (file)
 
                        lcdif2: display-controller@32e90000 {
                                compatible = "fsl,imx8mp-lcdif";
-                               reg = <0x32e90000 0x238>;
+                               reg = <0x32e90000 0x10000>;
                                interrupts = <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>;
                                clocks = <&clk IMX8MP_CLK_MEDIA_DISP2_PIX_ROOT>,
                                         <&clk IMX8MP_CLK_MEDIA_APB_ROOT>,
index ca3f966..5cf07ca 100644 (file)
        perst-gpios = <&tlmm 58 GPIO_ACTIVE_LOW>;
 };
 
-&pcie_phy0 {
+&pcie_qmp0 {
        status = "okay";
 };
 
-&pcie_phy1 {
+&pcie_qmp1 {
        status = "okay";
 };
 
index 651a231..1b8379b 100644 (file)
        perst-gpios = <&tlmm 61 GPIO_ACTIVE_LOW>;
 };
 
-&pcie_phy0 {
+&pcie_qmp0 {
        status = "okay";
 };
 
-&pcie_phy1 {
+&pcie_qmp1 {
        status = "okay";
 };
 
index aa0a7bd..dd92433 100644 (file)
        left_spkr: speaker@0,3 {
                compatible = "sdw10217211000";
                reg = <0 3>;
-               powerdown-gpios = <&tlmm 130 GPIO_ACTIVE_HIGH>;
+               powerdown-gpios = <&tlmm 130 GPIO_ACTIVE_LOW>;
                #thermal-sensor-cells = <0>;
                sound-name-prefix = "SpkrLeft";
                #sound-dai-cells = <0>;
        right_spkr: speaker@0,4 {
                compatible = "sdw10217211000";
                reg = <0 4>;
-               powerdown-gpios = <&tlmm 130 GPIO_ACTIVE_HIGH>;
+               powerdown-gpios = <&tlmm 130 GPIO_ACTIVE_LOW>;
                #thermal-sensor-cells = <0>;
                sound-name-prefix = "SpkrRight";
                #sound-dai-cells = <0>;
index b613781..313083e 100644 (file)
@@ -464,7 +464,7 @@ ap_i2c_tpm: &i2c14 {
 
 &mdss_dp_out {
        data-lanes = <0 1>;
-       link-frequencies = /bits/ 64 <1620000000 2700000000 5400000000 8100000000>;
+       link-frequencies = /bits/ 64 <1620000000 2700000000 5400000000>;
 };
 
 &mdss_mdp {
index df7d28f..be446eb 100644 (file)
@@ -59,8 +59,9 @@
                #size-cells = <0>;
 
                pmk8280_pon: pon@1300 {
-                       compatible = "qcom,pm8998-pon";
-                       reg = <0x1300>;
+                       compatible = "qcom,pmk8350-pon";
+                       reg = <0x1300>, <0x800>;
+                       reg-names = "hlos", "pbs";
 
                        pmk8280_pon_pwrkey: pwrkey {
                                compatible = "qcom,pmk8350-pwrkey";
index 67d2a66..5c688cb 100644 (file)
                left_spkr: speaker@0,3 {
                        compatible = "sdw10217211000";
                        reg = <0 3>;
-                       powerdown-gpios = <&wcdgpio 1 GPIO_ACTIVE_HIGH>;
+                       powerdown-gpios = <&wcdgpio 1 GPIO_ACTIVE_LOW>;
                        #thermal-sensor-cells = <0>;
                        sound-name-prefix = "SpkrLeft";
                        #sound-dai-cells = <0>;
 
                right_spkr: speaker@0,4 {
                        compatible = "sdw10217211000";
-                       powerdown-gpios = <&wcdgpio 2 GPIO_ACTIVE_HIGH>;
+                       powerdown-gpios = <&wcdgpio 2 GPIO_ACTIVE_LOW>;
                        reg = <0 4>;
                        #thermal-sensor-cells = <0>;
                        sound-name-prefix = "SpkrRight";
index 9850140..41f59e3 100644 (file)
                left_spkr: speaker@0,3 {
                        compatible = "sdw10217211000";
                        reg = <0 3>;
-                       powerdown-gpios = <&wcdgpio 1 GPIO_ACTIVE_HIGH>;
+                       powerdown-gpios = <&wcdgpio 1 GPIO_ACTIVE_LOW>;
                        #thermal-sensor-cells = <0>;
                        sound-name-prefix = "SpkrLeft";
                        #sound-dai-cells = <0>;
 
                right_spkr: speaker@0,4 {
                        compatible = "sdw10217211000";
-                       powerdown-gpios = <&wcdgpio 2 GPIO_ACTIVE_HIGH>;
+                       powerdown-gpios = <&wcdgpio 2 GPIO_ACTIVE_LOW>;
                        reg = <0 4>;
                        #thermal-sensor-cells = <0>;
                        sound-name-prefix = "SpkrRight";
index e54cdc8..4c9de23 100644 (file)
        left_spkr: speaker@0,3 {
                compatible = "sdw10217211000";
                reg = <0 3>;
-               powerdown-gpios = <&tlmm 26 GPIO_ACTIVE_HIGH>;
+               powerdown-gpios = <&tlmm 26 GPIO_ACTIVE_LOW>;
                #thermal-sensor-cells = <0>;
                sound-name-prefix = "SpkrLeft";
                #sound-dai-cells = <0>;
        right_spkr: speaker@0,4 {
                compatible = "sdw10217211000";
                reg = <0 4>;
-               powerdown-gpios = <&tlmm 127 GPIO_ACTIVE_HIGH>;
+               powerdown-gpios = <&tlmm 127 GPIO_ACTIVE_LOW>;
                #thermal-sensor-cells = <0>;
                sound-name-prefix = "SpkrRight";
                #sound-dai-cells = <0>;
index 61b3168..ce318e0 100644 (file)
@@ -24,6 +24,8 @@
 
 &internal_display {
        compatible = "elida,kd35t133";
+       iovcc-supply = <&vcc_lcd>;
+       vdd-supply = <&vcc_lcd>;
 };
 
 &pwm0 {
index 04eba43..80fc53c 100644 (file)
        internal_display: panel@0 {
                reg = <0>;
                backlight = <&backlight>;
-               iovcc-supply = <&vcc_lcd>;
                reset-gpios = <&gpio3 RK_PC0 GPIO_ACTIVE_LOW>;
                rotation = <270>;
-               vdd-supply = <&vcc_lcd>;
 
                port {
                        mipi_in_panel: endpoint {
index 139c898..d94ac81 100644 (file)
@@ -83,6 +83,8 @@
 
 &internal_display {
        compatible = "elida,kd35t133";
+       iovcc-supply = <&vcc_lcd>;
+       vdd-supply = <&vcc_lcd>;
 };
 
 &rk817 {
index 4702183..aa6f5b1 100644 (file)
@@ -59,6 +59,8 @@
 
 &internal_display {
        compatible = "elida,kd35t133";
+       iovcc-supply = <&vcc_lcd>;
+       vdd-supply = <&vcc_lcd>;
 };
 
 &rk817_charger {
index 083452c..e47d139 100644 (file)
@@ -61,7 +61,6 @@
                pinctrl-names = "default";
                pinctrl-0 = <&bl_en>;
                pwms = <&pwm0 0 1000000 PWM_POLARITY_INVERTED>;
-               pwm-delay-us = <10000>;
        };
 
        emmc_pwrseq: emmc-pwrseq {
index ee6095b..5c1929d 100644 (file)
                power-supply = <&pp3300_disp>;
                pinctrl-names = "default";
                pinctrl-0 = <&bl_en>;
-               pwm-delay-us = <10000>;
        };
 
        gpio_keys: gpio-keys {
index a47d9f7..c5e7de6 100644 (file)
                pinctrl-names = "default";
                pinctrl-0 = <&bl_en>;
                pwms = <&pwm1 0 1000000 0>;
-               pwm-delay-us = <10000>;
        };
 
        dmic: dmic {
index 194e48c..ddd45de 100644 (file)
                pinctrl-0 = <&panel_en_pin>;
                power-supply = <&vcc3v3_panel>;
 
-               ports {
-                       #address-cells = <1>;
-                       #size-cells = <0>;
-
-                       port@0 {
-                               reg = <0>;
-                               #address-cells = <1>;
-                               #size-cells = <0>;
-
-                               panel_in_edp: endpoint@0 {
-                                       reg = <0>;
-                                       remote-endpoint = <&edp_out_panel>;
-                               };
+               port {
+                       panel_in_edp: endpoint {
+                               remote-endpoint = <&edp_out_panel>;
                        };
                };
        };
        disable-wp;
        pinctrl-names = "default";
        pinctrl-0 = <&sdmmc_clk &sdmmc_cmd &sdmmc_bus4>;
-       sd-uhs-sdr104;
+       sd-uhs-sdr50;
        vmmc-supply = <&vcc3v0_sd>;
        vqmmc-supply = <&vcc_sdio>;
        status = "okay";
index 7815752..bca2b50 100644 (file)
                avdd-supply = <&avdd>;
                backlight = <&backlight>;
                dvdd-supply = <&vcc3v3_s0>;
-               ports {
-                       #address-cells = <1>;
-                       #size-cells = <0>;
 
-                       port@0 {
-                               reg = <0>;
-
-                               mipi_in_panel: endpoint {
-                                       remote-endpoint = <&mipi_out_panel>;
-                               };
+               port {
+                       mipi_in_panel: endpoint {
+                               remote-endpoint = <&mipi_out_panel>;
                        };
                };
        };
index 1881b4b..40e7c4a 100644 (file)
                      <0x0 0xfff10000 0 0x10000>, /* GICH */
                      <0x0 0xfff20000 0 0x10000>; /* GICV */
                interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH 0>;
-               its: interrupt-controller@fee20000 {
+               its: msi-controller@fee20000 {
                        compatible = "arm,gic-v3-its";
                        msi-controller;
                        #msi-cells = <1>;
index 65a80d1..9a0e217 100644 (file)
 };
 
 &cru {
-       assigned-clocks = <&cru PLL_GPLL>, <&pmucru PLL_PPLL>, <&cru PLL_VPLL>;
-       assigned-clock-rates = <1200000000>, <200000000>, <241500000>;
+       assigned-clocks = <&pmucru CLK_RTC_32K>, <&cru PLL_GPLL>,
+                         <&pmucru PLL_PPLL>, <&cru PLL_VPLL>;
+       assigned-clock-rates = <32768>, <1200000000>,
+                              <200000000>, <241500000>;
 };
 
 &gpio_keys_control {
index b4b2df8..c763c7f 100644 (file)
 };
 
 &cru {
-       assigned-clocks = <&cru PLL_GPLL>, <&pmucru PLL_PPLL>, <&cru PLL_VPLL>;
-       assigned-clock-rates = <1200000000>, <200000000>, <500000000>;
+       assigned-clocks = <&pmucru CLK_RTC_32K>, <&cru PLL_GPLL>,
+                         <&pmucru PLL_PPLL>, <&cru PLL_VPLL>;
+       assigned-clock-rates = <32768>, <1200000000>,
+                              <200000000>, <500000000>;
 };
 
 &dsi_dphy0 {
index ce7165d..102e448 100644 (file)
        non-removable;
        pinctrl-names = "default";
        pinctrl-0 = <&sdmmc1_bus4 &sdmmc1_cmd &sdmmc1_clk>;
-       sd-uhs-sdr104;
+       sd-uhs-sdr50;
        vmmc-supply = <&vcc3v3_sys>;
        vqmmc-supply = <&vcc_1v8>;
        status = "okay";
index 005cde6..a506948 100644 (file)
                        cache-size = <131072>;
                        cache-line-size = <64>;
                        cache-sets = <512>;
+                       cache-level = <2>;
                        next-level-cache = <&l3_cache>;
                };
 
                        cache-size = <131072>;
                        cache-line-size = <64>;
                        cache-sets = <512>;
+                       cache-level = <2>;
                        next-level-cache = <&l3_cache>;
                };
 
                        cache-size = <131072>;
                        cache-line-size = <64>;
                        cache-sets = <512>;
+                       cache-level = <2>;
                        next-level-cache = <&l3_cache>;
                };
 
                        cache-size = <131072>;
                        cache-line-size = <64>;
                        cache-sets = <512>;
+                       cache-level = <2>;
                        next-level-cache = <&l3_cache>;
                };
 
                        cache-size = <524288>;
                        cache-line-size = <64>;
                        cache-sets = <1024>;
+                       cache-level = <2>;
                        next-level-cache = <&l3_cache>;
                };
 
                        cache-size = <524288>;
                        cache-line-size = <64>;
                        cache-sets = <1024>;
+                       cache-level = <2>;
                        next-level-cache = <&l3_cache>;
                };
 
                        cache-size = <524288>;
                        cache-line-size = <64>;
                        cache-sets = <1024>;
+                       cache-level = <2>;
                        next-level-cache = <&l3_cache>;
                };
 
                        cache-size = <524288>;
                        cache-line-size = <64>;
                        cache-sets = <1024>;
+                       cache-level = <2>;
                        next-level-cache = <&l3_cache>;
                };
 
                        cache-size = <3145728>;
                        cache-line-size = <64>;
                        cache-sets = <4096>;
+                       cache-level = <3>;
                };
        };
 
index bcd774d..3dd691c 100644 (file)
@@ -576,9 +576,22 @@ struct kvm_vcpu_arch {
        ({                                                      \
                __build_check_flag(v, flagset, f, m);           \
                                                                \
-               v->arch.flagset & (m);                          \
+               READ_ONCE(v->arch.flagset) & (m);               \
        })
 
+/*
+ * Note that the set/clear accessors must be preempt-safe in order to
+ * avoid nesting them with load/put which also manipulate flags...
+ */
+#ifdef __KVM_NVHE_HYPERVISOR__
+/* the nVHE hypervisor is always non-preemptible */
+#define __vcpu_flags_preempt_disable()
+#define __vcpu_flags_preempt_enable()
+#else
+#define __vcpu_flags_preempt_disable() preempt_disable()
+#define __vcpu_flags_preempt_enable()  preempt_enable()
+#endif
+
 #define __vcpu_set_flag(v, flagset, f, m)                      \
        do {                                                    \
                typeof(v->arch.flagset) *fset;                  \
@@ -586,9 +599,11 @@ struct kvm_vcpu_arch {
                __build_check_flag(v, flagset, f, m);           \
                                                                \
                fset = &v->arch.flagset;                        \
+               __vcpu_flags_preempt_disable();                 \
                if (HWEIGHT(m) > 1)                             \
                        *fset &= ~(m);                          \
                *fset |= (f);                                   \
+               __vcpu_flags_preempt_enable();                  \
        } while (0)
 
 #define __vcpu_clear_flag(v, flagset, f, m)                    \
@@ -598,7 +613,9 @@ struct kvm_vcpu_arch {
                __build_check_flag(v, flagset, f, m);           \
                                                                \
                fset = &v->arch.flagset;                        \
+               __vcpu_flags_preempt_disable();                 \
                *fset &= ~(m);                                  \
+               __vcpu_flags_preempt_enable();                  \
        } while (0)
 
 #define vcpu_get_flag(v, ...)  __vcpu_get_flag((v), __VA_ARGS__)
index 5da884e..c4b4678 100644 (file)
@@ -397,6 +397,8 @@ int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
        u64 val;
        int wa_level;
 
+       if (KVM_REG_SIZE(reg->id) != sizeof(val))
+               return -ENOENT;
        if (copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id)))
                return -EFAULT;
 
index 7fd5125..3ddde33 100644 (file)
@@ -447,6 +447,22 @@ config ARCH_IOREMAP
          protection support. However, you can enable LoongArch DMW-based
          ioremap() for better performance.
 
+config ARCH_WRITECOMBINE
+       bool "Enable WriteCombine (WUC) for ioremap()"
+       help
+         LoongArch maintains cache coherency in hardware, but when paired
+         with LS7A chipsets the WUC attribute (Weak-ordered UnCached, which
+         is similar to WriteCombine) is out of the scope of cache coherency
+         machanism for PCIe devices (this is a PCIe protocol violation, which
+         may be fixed in newer chipsets).
+
+         This means WUC can only used for write-only memory regions now, so
+         this option is disabled by default, making WUC silently fallback to
+         SUC for ioremap(). You can enable this option if the kernel is ensured
+         to run on hardware without this bug.
+
+         You can override this setting via writecombine=on/off boot parameter.
+
 config ARCH_STRICT_ALIGN
        bool "Enable -mstrict-align to prevent unaligned accesses" if EXPERT
        default y
index 4198753..976a810 100644 (file)
@@ -41,8 +41,11 @@ extern void loongarch_suspend_enter(void);
 
 static inline unsigned long acpi_get_wakeup_address(void)
 {
+#ifdef CONFIG_SUSPEND
        extern void loongarch_wakeup_start(void);
        return (unsigned long)loongarch_wakeup_start;
+#endif
+       return 0UL;
 }
 
 #endif /* _ASM_LOONGARCH_ACPI_H */
index 8fb699b..5c9c03b 100644 (file)
@@ -71,9 +71,9 @@ extern unsigned long vm_map_base;
 #define _ATYPE32_      int
 #define _ATYPE64_      __s64
 #ifdef CONFIG_64BIT
-#define _CONST64_(x)   x ## L
+#define _CONST64_(x)   x ## UL
 #else
-#define _CONST64_(x)   x ## LL
+#define _CONST64_(x)   x ## ULL
 #endif
 #endif
 
index 0051b52..c607968 100644 (file)
@@ -13,7 +13,6 @@ const char *get_system_type(void);
 extern void init_environ(void);
 extern void memblock_init(void);
 extern void platform_init(void);
-extern void plat_swiotlb_setup(void);
 extern int __init init_numa_memory(void);
 
 struct loongson_board_info {
index b079742..f6177f1 100644 (file)
@@ -42,6 +42,7 @@
 #define cpu_has_fpu            cpu_opt(LOONGARCH_CPU_FPU)
 #define cpu_has_lsx            cpu_opt(LOONGARCH_CPU_LSX)
 #define cpu_has_lasx           cpu_opt(LOONGARCH_CPU_LASX)
+#define cpu_has_crc32          cpu_opt(LOONGARCH_CPU_CRC32)
 #define cpu_has_complex                cpu_opt(LOONGARCH_CPU_COMPLEX)
 #define cpu_has_crypto         cpu_opt(LOONGARCH_CPU_CRYPTO)
 #define cpu_has_lvz            cpu_opt(LOONGARCH_CPU_LVZ)
index c3da917..88773d8 100644 (file)
@@ -78,25 +78,26 @@ enum cpu_type_enum {
 #define CPU_FEATURE_FPU                        3       /* CPU has FPU */
 #define CPU_FEATURE_LSX                        4       /* CPU has LSX (128-bit SIMD) */
 #define CPU_FEATURE_LASX               5       /* CPU has LASX (256-bit SIMD) */
-#define CPU_FEATURE_COMPLEX            6       /* CPU has Complex instructions */
-#define CPU_FEATURE_CRYPTO             7       /* CPU has Crypto instructions */
-#define CPU_FEATURE_LVZ                        8       /* CPU has Virtualization extension */
-#define CPU_FEATURE_LBT_X86            9       /* CPU has X86 Binary Translation */
-#define CPU_FEATURE_LBT_ARM            10      /* CPU has ARM Binary Translation */
-#define CPU_FEATURE_LBT_MIPS           11      /* CPU has MIPS Binary Translation */
-#define CPU_FEATURE_TLB                        12      /* CPU has TLB */
-#define CPU_FEATURE_CSR                        13      /* CPU has CSR */
-#define CPU_FEATURE_WATCH              14      /* CPU has watchpoint registers */
-#define CPU_FEATURE_VINT               15      /* CPU has vectored interrupts */
-#define CPU_FEATURE_CSRIPI             16      /* CPU has CSR-IPI */
-#define CPU_FEATURE_EXTIOI             17      /* CPU has EXT-IOI */
-#define CPU_FEATURE_PREFETCH           18      /* CPU has prefetch instructions */
-#define CPU_FEATURE_PMP                        19      /* CPU has perfermance counter */
-#define CPU_FEATURE_SCALEFREQ          20      /* CPU supports cpufreq scaling */
-#define CPU_FEATURE_FLATMODE           21      /* CPU has flat mode */
-#define CPU_FEATURE_EIODECODE          22      /* CPU has EXTIOI interrupt pin decode mode */
-#define CPU_FEATURE_GUESTID            23      /* CPU has GuestID feature */
-#define CPU_FEATURE_HYPERVISOR         24      /* CPU has hypervisor (running in VM) */
+#define CPU_FEATURE_CRC32              6       /* CPU has CRC32 instructions */
+#define CPU_FEATURE_COMPLEX            7       /* CPU has Complex instructions */
+#define CPU_FEATURE_CRYPTO             8       /* CPU has Crypto instructions */
+#define CPU_FEATURE_LVZ                        9       /* CPU has Virtualization extension */
+#define CPU_FEATURE_LBT_X86            10      /* CPU has X86 Binary Translation */
+#define CPU_FEATURE_LBT_ARM            11      /* CPU has ARM Binary Translation */
+#define CPU_FEATURE_LBT_MIPS           12      /* CPU has MIPS Binary Translation */
+#define CPU_FEATURE_TLB                        13      /* CPU has TLB */
+#define CPU_FEATURE_CSR                        14      /* CPU has CSR */
+#define CPU_FEATURE_WATCH              15      /* CPU has watchpoint registers */
+#define CPU_FEATURE_VINT               16      /* CPU has vectored interrupts */
+#define CPU_FEATURE_CSRIPI             17      /* CPU has CSR-IPI */
+#define CPU_FEATURE_EXTIOI             18      /* CPU has EXT-IOI */
+#define CPU_FEATURE_PREFETCH           19      /* CPU has prefetch instructions */
+#define CPU_FEATURE_PMP                        20      /* CPU has perfermance counter */
+#define CPU_FEATURE_SCALEFREQ          21      /* CPU supports cpufreq scaling */
+#define CPU_FEATURE_FLATMODE           22      /* CPU has flat mode */
+#define CPU_FEATURE_EIODECODE          23      /* CPU has EXTIOI interrupt pin decode mode */
+#define CPU_FEATURE_GUESTID            24      /* CPU has GuestID feature */
+#define CPU_FEATURE_HYPERVISOR         25      /* CPU has hypervisor (running in VM) */
 
 #define LOONGARCH_CPU_CPUCFG           BIT_ULL(CPU_FEATURE_CPUCFG)
 #define LOONGARCH_CPU_LAM              BIT_ULL(CPU_FEATURE_LAM)
@@ -104,6 +105,7 @@ enum cpu_type_enum {
 #define LOONGARCH_CPU_FPU              BIT_ULL(CPU_FEATURE_FPU)
 #define LOONGARCH_CPU_LSX              BIT_ULL(CPU_FEATURE_LSX)
 #define LOONGARCH_CPU_LASX             BIT_ULL(CPU_FEATURE_LASX)
+#define LOONGARCH_CPU_CRC32            BIT_ULL(CPU_FEATURE_CRC32)
 #define LOONGARCH_CPU_COMPLEX          BIT_ULL(CPU_FEATURE_COMPLEX)
 #define LOONGARCH_CPU_CRYPTO           BIT_ULL(CPU_FEATURE_CRYPTO)
 #define LOONGARCH_CPU_LVZ              BIT_ULL(CPU_FEATURE_LVZ)
index 402a7d9..545e270 100644 (file)
@@ -54,8 +54,10 @@ static inline void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size,
  * @offset:    bus address of the memory
  * @size:      size of the resource to map
  */
+extern pgprot_t pgprot_wc;
+
 #define ioremap_wc(offset, size)       \
-       ioremap_prot((offset), (size), pgprot_val(PAGE_KERNEL_WUC))
+       ioremap_prot((offset), (size), pgprot_val(pgprot_wc))
 
 #define ioremap_cache(offset, size)    \
        ioremap_prot((offset), (size), pgprot_val(PAGE_KERNEL))
index 65b7dcd..83da5d2 100644 (file)
@@ -117,7 +117,7 @@ static inline u32 read_cpucfg(u32 reg)
 #define  CPUCFG1_EP                    BIT(22)
 #define  CPUCFG1_RPLV                  BIT(23)
 #define  CPUCFG1_HUGEPG                        BIT(24)
-#define  CPUCFG1_IOCSRBRD              BIT(25)
+#define  CPUCFG1_CRC32                 BIT(25)
 #define  CPUCFG1_MSGINT                        BIT(26)
 
 #define LOONGARCH_CPUCFG2              0x2
@@ -423,9 +423,9 @@ static __always_inline void iocsr_write64(u64 val, u32 reg)
 #define  CSR_ASID_ASID_WIDTH           10
 #define  CSR_ASID_ASID                 (_ULCAST_(0x3ff) << CSR_ASID_ASID_SHIFT)
 
-#define LOONGARCH_CSR_PGDL             0x19    /* Page table base address when VA[47] = 0 */
+#define LOONGARCH_CSR_PGDL             0x19    /* Page table base address when VA[VALEN-1] = 0 */
 
-#define LOONGARCH_CSR_PGDH             0x1a    /* Page table base address when VA[47] = 1 */
+#define LOONGARCH_CSR_PGDH             0x1a    /* Page table base address when VA[VALEN-1] = 1 */
 
 #define LOONGARCH_CSR_PGD              0x1b    /* Page table base */
 
index 438f09d..88554f9 100644 (file)
@@ -2,8 +2,8 @@
 /* Copyright (C) 2020-2022 Loongson Technology Corporation Limited */
 SECTIONS {
        . = ALIGN(4);
-       .got : { BYTE(0) }
-       .plt : { BYTE(0) }
-       .plt.idx : { BYTE(0) }
-       .ftrace_trampoline : { BYTE(0) }
+       .got : { BYTE(0) }
+       .plt : { BYTE(0) }
+       .plt.idx : { BYTE(0) }
+       .ftrace_trampoline : { BYTE(0) }
 }
index cc48ed2..82d811b 100644 (file)
@@ -47,11 +47,12 @@ struct user_fp_state {
 };
 
 struct user_watch_state {
-       uint16_t dbg_info;
+       uint64_t dbg_info;
        struct {
                uint64_t    addr;
                uint64_t    mask;
                uint32_t    ctrl;
+               uint32_t    pad;
        } dbg_regs[8];
 };
 
index 3a3fce2..5adf0f7 100644 (file)
@@ -60,7 +60,7 @@ static inline void set_elf_platform(int cpu, const char *plat)
 
 /* MAP BASE */
 unsigned long vm_map_base;
-EXPORT_SYMBOL_GPL(vm_map_base);
+EXPORT_SYMBOL(vm_map_base);
 
 static void cpu_probe_addrbits(struct cpuinfo_loongarch *c)
 {
@@ -94,13 +94,18 @@ static void cpu_probe_common(struct cpuinfo_loongarch *c)
        c->options = LOONGARCH_CPU_CPUCFG | LOONGARCH_CPU_CSR |
                     LOONGARCH_CPU_TLB | LOONGARCH_CPU_VINT | LOONGARCH_CPU_WATCH;
 
-       elf_hwcap = HWCAP_LOONGARCH_CPUCFG | HWCAP_LOONGARCH_CRC32;
+       elf_hwcap = HWCAP_LOONGARCH_CPUCFG;
 
        config = read_cpucfg(LOONGARCH_CPUCFG1);
        if (config & CPUCFG1_UAL) {
                c->options |= LOONGARCH_CPU_UAL;
                elf_hwcap |= HWCAP_LOONGARCH_UAL;
        }
+       if (config & CPUCFG1_CRC32) {
+               c->options |= LOONGARCH_CPU_CRC32;
+               elf_hwcap |= HWCAP_LOONGARCH_CRC32;
+       }
+
 
        config = read_cpucfg(LOONGARCH_CPUCFG2);
        if (config & CPUCFG2_LAM) {
index 5c67cc4..0d82907 100644 (file)
@@ -76,6 +76,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
        if (cpu_has_fpu)        seq_printf(m, " fpu");
        if (cpu_has_lsx)        seq_printf(m, " lsx");
        if (cpu_has_lasx)       seq_printf(m, " lasx");
+       if (cpu_has_crc32)      seq_printf(m, " crc32");
        if (cpu_has_complex)    seq_printf(m, " complex");
        if (cpu_has_crypto)     seq_printf(m, " crypto");
        if (cpu_has_lvz)        seq_printf(m, " lvz");
index 06bceae..5fcffb4 100644 (file)
@@ -391,10 +391,10 @@ static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type,
        return 0;
 }
 
-static int ptrace_hbp_get_resource_info(unsigned int note_type, u16 *info)
+static int ptrace_hbp_get_resource_info(unsigned int note_type, u64 *info)
 {
        u8 num;
-       u16 reg = 0;
+       u64 reg = 0;
 
        switch (note_type) {
        case NT_LOONGARCH_HW_BREAK:
@@ -524,15 +524,16 @@ static int ptrace_hbp_set_addr(unsigned int note_type,
        return modify_user_hw_breakpoint(bp, &attr);
 }
 
-#define PTRACE_HBP_CTRL_SZ     sizeof(u32)
 #define PTRACE_HBP_ADDR_SZ     sizeof(u64)
 #define PTRACE_HBP_MASK_SZ     sizeof(u64)
+#define PTRACE_HBP_CTRL_SZ     sizeof(u32)
+#define PTRACE_HBP_PAD_SZ      sizeof(u32)
 
 static int hw_break_get(struct task_struct *target,
                        const struct user_regset *regset,
                        struct membuf to)
 {
-       u16 info;
+       u64 info;
        u32 ctrl;
        u64 addr, mask;
        int ret, idx = 0;
@@ -545,7 +546,7 @@ static int hw_break_get(struct task_struct *target,
 
        membuf_write(&to, &info, sizeof(info));
 
-       /* (address, ctrl) registers */
+       /* (address, mask, ctrl) registers */
        while (to.left) {
                ret = ptrace_hbp_get_addr(note_type, target, idx, &addr);
                if (ret)
@@ -562,6 +563,7 @@ static int hw_break_get(struct task_struct *target,
                membuf_store(&to, addr);
                membuf_store(&to, mask);
                membuf_store(&to, ctrl);
+               membuf_zero(&to, sizeof(u32));
                idx++;
        }
 
@@ -582,7 +584,7 @@ static int hw_break_set(struct task_struct *target,
        offset = offsetof(struct user_watch_state, dbg_regs);
        user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 0, offset);
 
-       /* (address, ctrl) registers */
+       /* (address, mask, ctrl) registers */
        limit = regset->n * regset->size;
        while (count && offset < limit) {
                if (count < PTRACE_HBP_ADDR_SZ)
@@ -602,7 +604,7 @@ static int hw_break_set(struct task_struct *target,
                        break;
 
                ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &mask,
-                                        offset, offset + PTRACE_HBP_ADDR_SZ);
+                                        offset, offset + PTRACE_HBP_MASK_SZ);
                if (ret)
                        return ret;
 
@@ -611,8 +613,8 @@ static int hw_break_set(struct task_struct *target,
                        return ret;
                offset += PTRACE_HBP_MASK_SZ;
 
-               ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &mask,
-                                        offset, offset + PTRACE_HBP_MASK_SZ);
+               ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl,
+                                        offset, offset + PTRACE_HBP_CTRL_SZ);
                if (ret)
                        return ret;
 
@@ -620,6 +622,11 @@ static int hw_break_set(struct task_struct *target,
                if (ret)
                        return ret;
                offset += PTRACE_HBP_CTRL_SZ;
+
+               user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
+                                         offset, offset + PTRACE_HBP_PAD_SZ);
+               offset += PTRACE_HBP_PAD_SZ;
+
                idx++;
        }
 
index bae84cc..4444b13 100644 (file)
@@ -160,6 +160,27 @@ static void __init smbios_parse(void)
        dmi_walk(find_tokens, NULL);
 }
 
+#ifdef CONFIG_ARCH_WRITECOMBINE
+pgprot_t pgprot_wc = PAGE_KERNEL_WUC;
+#else
+pgprot_t pgprot_wc = PAGE_KERNEL_SUC;
+#endif
+
+EXPORT_SYMBOL(pgprot_wc);
+
+static int __init setup_writecombine(char *p)
+{
+       if (!strcmp(p, "on"))
+               pgprot_wc = PAGE_KERNEL_WUC;
+       else if (!strcmp(p, "off"))
+               pgprot_wc = PAGE_KERNEL_SUC;
+       else
+               pr_warn("Unknown writecombine setting \"%s\".\n", p);
+
+       return 0;
+}
+early_param("writecombine", setup_writecombine);
+
 static int usermem __initdata;
 
 static int __init early_parse_mem(char *p)
@@ -368,8 +389,8 @@ static void __init arch_mem_init(char **cmdline_p)
        /*
         * In order to reduce the possibility of kernel panic when failed to
         * get IO TLB memory under CONFIG_SWIOTLB, it is better to allocate
-        * low memory as small as possible before plat_swiotlb_setup(), so
-        * make sparse_init() using top-down allocation.
+        * low memory as small as possible before swiotlb_init(), so make
+        * sparse_init() using top-down allocation.
         */
        memblock_set_bottom_up(false);
        sparse_init();
index 3a690f9..2463d2f 100644 (file)
@@ -30,7 +30,7 @@ void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
 
        regs->regs[1] = 0;
        for (unwind_start(&state, task, regs);
-             !unwind_done(&state); unwind_next_frame(&state)) {
+            !unwind_done(&state) && !unwind_error(&state); unwind_next_frame(&state)) {
                addr = unwind_get_return_address(&state);
                if (!addr || !consume_entry(cookie, addr))
                        break;
index a463d69..ba324ba 100644 (file)
@@ -28,5 +28,6 @@ bool default_next_frame(struct unwind_state *state)
 
        } while (!get_stack_info(state->sp, state->task, info));
 
+       state->error = true;
        return false;
 }
index 9095fde..55afc27 100644 (file)
@@ -211,7 +211,7 @@ static bool next_frame(struct unwind_state *state)
                        pc = regs->csr_era;
 
                        if (user_mode(regs) || !__kernel_text_address(pc))
-                               return false;
+                               goto out;
 
                        state->first = true;
                        state->pc = pc;
@@ -226,6 +226,8 @@ static bool next_frame(struct unwind_state *state)
 
        } while (!get_stack_info(state->sp, state->task, info));
 
+out:
+       state->error = true;
        return false;
 }
 
index e018aed..3b7d812 100644 (file)
@@ -41,7 +41,7 @@
  * don't have to care about aliases on other CPUs.
  */
 unsigned long empty_zero_page, zero_page_mask;
-EXPORT_SYMBOL_GPL(empty_zero_page);
+EXPORT_SYMBOL(empty_zero_page);
 EXPORT_SYMBOL(zero_page_mask);
 
 void setup_zero_pages(void)
@@ -270,7 +270,7 @@ pud_t invalid_pud_table[PTRS_PER_PUD] __page_aligned_bss;
 #endif
 #ifndef __PAGETABLE_PMD_FOLDED
 pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned_bss;
-EXPORT_SYMBOL_GPL(invalid_pmd_table);
+EXPORT_SYMBOL(invalid_pmd_table);
 #endif
 pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss;
 EXPORT_SYMBOL(invalid_pte_table);
index 90da899..e2fc3b4 100644 (file)
@@ -80,6 +80,10 @@ SYM_INNER_LABEL(loongarch_wakeup_start, SYM_L_GLOBAL)
 
        JUMP_VIRT_ADDR  t0, t1
 
+       /* Enable PG */
+       li.w            t0, 0xb0                # PLV=0, IE=0, PG=1
+       csrwr           t0, LOONGARCH_CSR_CRMD
+
        la.pcrel        t0, acpi_saved_sp
        ld.d            sp, t0, 0
        SETUP_WAKEUP
index 52cbde6..9ff55cb 100644 (file)
@@ -15,6 +15,8 @@
 #define EMITS_PT_NOTE
 #endif
 
+#define RUNTIME_DISCARD_EXIT
+
 #include <asm-generic/vmlinux.lds.h>
 
 #undef mips
index 07e2e26..f87c516 100644 (file)
                                         <&sysclk K210_CLK_APB0>;
                                clock-names = "ssi_clk", "pclk";
                                resets = <&sysrst K210_RST_SPI2>;
-                               spi-max-frequency = <25000000>;
                        };
 
                        i2s0: i2s@50250000 {
index d0846ba..6b1876e 100644 (file)
@@ -539,7 +539,7 @@ static void bpf_jit_plt(void *plt, void *ret, void *target)
 {
        memcpy(plt, bpf_plt, BPF_PLT_SIZE);
        *(void **)((char *)plt + (bpf_plt_ret - bpf_plt)) = ret;
-       *(void **)((char *)plt + (bpf_plt_target - bpf_plt)) = target;
+       *(void **)((char *)plt + (bpf_plt_target - bpf_plt)) = target ?: ret;
 }
 
 /*
@@ -2010,7 +2010,9 @@ int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
        } __packed insn;
        char expected_plt[BPF_PLT_SIZE];
        char current_plt[BPF_PLT_SIZE];
+       char new_plt[BPF_PLT_SIZE];
        char *plt;
+       char *ret;
        int err;
 
        /* Verify the branch to be patched. */
@@ -2032,12 +2034,15 @@ int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
                err = copy_from_kernel_nofault(current_plt, plt, BPF_PLT_SIZE);
                if (err < 0)
                        return err;
-               bpf_jit_plt(expected_plt, (char *)ip + 6, old_addr);
+               ret = (char *)ip + 6;
+               bpf_jit_plt(expected_plt, ret, old_addr);
                if (memcmp(current_plt, expected_plt, BPF_PLT_SIZE))
                        return -EINVAL;
                /* Adjust the call address. */
+               bpf_jit_plt(new_plt, ret, new_addr);
                s390_kernel_write(plt + (bpf_plt_target - bpf_plt),
-                                 &new_addr, sizeof(void *));
+                                 new_plt + (bpf_plt_target - bpf_plt),
+                                 sizeof(void *));
        }
 
        /* Adjust the mask of the branch. */
index e2975a3..d7da28f 100644 (file)
@@ -8,7 +8,7 @@
 
 #define ALT_FLAGS_SHIFT                16
 
-#define ALT_FLAG_NOT           BIT(0)
+#define ALT_FLAG_NOT           (1 << 0)
 #define ALT_NOT(feature)       ((ALT_FLAG_NOT << ALT_FLAGS_SHIFT) | (feature))
 
 #ifndef __ASSEMBLY__
index 9137d16..04c55f1 100644 (file)
@@ -29,10 +29,11 @@ static struct bio_map_data *bio_alloc_map_data(struct iov_iter *data,
        bmd = kmalloc(struct_size(bmd, iov, data->nr_segs), gfp_mask);
        if (!bmd)
                return NULL;
-       memcpy(bmd->iov, data->iov, sizeof(struct iovec) * data->nr_segs);
        bmd->iter = *data;
-       if (iter_is_iovec(data))
-               bmd->iter.iov = bmd->iov;
+       if (iter_is_iovec(data)) {
+               memcpy(bmd->iov, iter_iov(data), sizeof(struct iovec) * data->nr_segs);
+               bmd->iter.__iov = bmd->iov;
+       }
        return bmd;
 }
 
index f0ea9dc..2831f78 100644 (file)
@@ -2878,16 +2878,15 @@ static inline struct request *blk_mq_get_cached_request(struct request_queue *q,
 
        if (!plug)
                return NULL;
+       rq = rq_list_peek(&plug->cached_rq);
+       if (!rq || rq->q != q)
+               return NULL;
 
        if (blk_mq_attempt_bio_merge(q, *bio, nsegs)) {
                *bio = NULL;
                return NULL;
        }
 
-       rq = rq_list_peek(&plug->cached_rq);
-       if (!rq || rq->q != q)
-               return NULL;
-
        type = blk_mq_get_hctx_type((*bio)->bi_opf);
        hctx_type = rq->mq_hctx->type;
        if (type != hctx_type &&
index 82d1728..df596d4 100644 (file)
@@ -142,9 +142,6 @@ static acpi_status acpi_ev_fixed_event_initialize(void)
                        status =
                            acpi_write_bit_register(acpi_gbl_fixed_event_info
                                                    [i].enable_register_id,
-                                                   (i ==
-                                                    ACPI_EVENT_PCIE_WAKE) ?
-                                                   ACPI_ENABLE_EVENT :
                                                    ACPI_DISABLE_EVENT);
                        if (ACPI_FAILURE(status)) {
                                return (status);
@@ -188,11 +185,6 @@ u32 acpi_ev_fixed_event_detect(void)
                return (int_status);
        }
 
-       if (fixed_enable & ACPI_BITMASK_PCIEXP_WAKE_DISABLE)
-               fixed_enable &= ~ACPI_BITMASK_PCIEXP_WAKE_DISABLE;
-       else
-               fixed_enable |= ACPI_BITMASK_PCIEXP_WAKE_DISABLE;
-
        ACPI_DEBUG_PRINT((ACPI_DB_INTERRUPTS,
                          "Fixed Event Block: Enable %08X Status %08X\n",
                          fixed_enable, fixed_status));
@@ -258,9 +250,6 @@ static u32 acpi_ev_fixed_event_dispatch(u32 event)
        if (!acpi_gbl_fixed_event_handlers[event].handler) {
                (void)acpi_write_bit_register(acpi_gbl_fixed_event_info[event].
                                              enable_register_id,
-                                             (event ==
-                                              ACPI_EVENT_PCIE_WAKE) ?
-                                             ACPI_ENABLE_EVENT :
                                              ACPI_DISABLE_EVENT);
 
                ACPI_ERROR((AE_INFO,
index 37b3f64..bd93647 100644 (file)
@@ -311,20 +311,6 @@ acpi_status acpi_hw_legacy_wake(u8 sleep_state)
                                    [ACPI_EVENT_SLEEP_BUTTON].
                                    status_register_id, ACPI_CLEAR_STATUS);
 
-       /* Enable pcie wake event if support */
-       if ((acpi_gbl_FADT.flags & ACPI_FADT_PCI_EXPRESS_WAKE)) {
-               (void)
-                   acpi_write_bit_register(acpi_gbl_fixed_event_info
-                                           [ACPI_EVENT_PCIE_WAKE].
-                                           enable_register_id,
-                                           ACPI_DISABLE_EVENT);
-               (void)
-                   acpi_write_bit_register(acpi_gbl_fixed_event_info
-                                           [ACPI_EVENT_PCIE_WAKE].
-                                           status_register_id,
-                                           ACPI_CLEAR_STATUS);
-       }
-
        acpi_hw_execute_sleep_method(METHOD_PATHNAME__SST, ACPI_SST_WORKING);
        return_ACPI_STATUS(status);
 }
index 53afa5e..cda6e16 100644 (file)
@@ -186,10 +186,6 @@ struct acpi_fixed_event_info acpi_gbl_fixed_event_info[ACPI_NUM_FIXED_EVENTS] =
                                        ACPI_BITREG_RT_CLOCK_ENABLE,
                                        ACPI_BITMASK_RT_CLOCK_STATUS,
                                        ACPI_BITMASK_RT_CLOCK_ENABLE},
-       /* ACPI_EVENT_PCIE_WAKE     */ {ACPI_BITREG_PCIEXP_WAKE_STATUS,
-                                       ACPI_BITREG_PCIEXP_WAKE_DISABLE,
-                                       ACPI_BITMASK_PCIEXP_WAKE_STATUS,
-                                       ACPI_BITMASK_PCIEXP_WAKE_DISABLE},
 };
 #endif                         /* !ACPI_REDUCED_HARDWARE */
 
index 29619f4..d9629ff 100644 (file)
@@ -167,7 +167,8 @@ int psci_set_osi_mode(bool enable)
 
        err = invoke_psci_fn(PSCI_1_0_FN_SET_SUSPEND_MODE, suspend_mode, 0, 0);
        if (err < 0)
-               pr_warn("failed to set %s mode: %d\n", enable ? "OSI" : "PC", err);
+               pr_info(FW_BUG "failed to set %s mode: %d\n",
+                               enable ? "OSI" : "PC", err);
        return psci_to_linux_errno(err);
 }
 
index 0914e73..1bc0437 100644 (file)
@@ -21,7 +21,6 @@
 #include <linux/module.h>
 #include <linux/stddef.h>
 #include <linux/errno.h>
-#include <linux/aer.h>
 
 #include "dfl.h"
 
@@ -376,10 +375,6 @@ int cci_pci_probe(struct pci_dev *pcidev, const struct pci_device_id *pcidevid)
                return ret;
        }
 
-       ret = pci_enable_pcie_error_reporting(pcidev);
-       if (ret && ret != -EINVAL)
-               dev_info(&pcidev->dev, "PCIE AER unavailable %d.\n", ret);
-
        pci_set_master(pcidev);
 
        ret = dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(64));
@@ -387,24 +382,22 @@ int cci_pci_probe(struct pci_dev *pcidev, const struct pci_device_id *pcidevid)
                ret = dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(32));
        if (ret) {
                dev_err(&pcidev->dev, "No suitable DMA support available.\n");
-               goto disable_error_report_exit;
+               return ret;
        }
 
        ret = cci_init_drvdata(pcidev);
        if (ret) {
                dev_err(&pcidev->dev, "Fail to init drvdata %d.\n", ret);
-               goto disable_error_report_exit;
+               return ret;
        }
 
        ret = cci_enumerate_feature_devs(pcidev);
-       if (!ret)
+       if (ret) {
+               dev_err(&pcidev->dev, "enumeration failure %d.\n", ret);
                return ret;
+       }
 
-       dev_err(&pcidev->dev, "enumeration failure %d.\n", ret);
-
-disable_error_report_exit:
-       pci_disable_pcie_error_reporting(pcidev);
-       return ret;
+       return 0;
 }
 
 static int cci_pci_sriov_configure(struct pci_dev *pcidev, int num_vfs)
@@ -448,7 +441,6 @@ static void cci_pci_remove(struct pci_dev *pcidev)
                cci_pci_sriov_configure(pcidev, 0);
 
        cci_remove_feature_devs(pcidev);
-       pci_disable_pcie_error_reporting(pcidev);
 }
 
 static struct pci_driver cci_pci_driver = {
index 5cd40ac..0953e6e 100644 (file)
@@ -363,7 +363,6 @@ fpga_bridge_register(struct device *parent, const char *name,
        bridge->dev.parent = parent;
        bridge->dev.of_node = parent->of_node;
        bridge->dev.id = id;
-       of_platform_populate(bridge->dev.of_node, NULL, NULL, &bridge->dev);
 
        ret = dev_set_name(&bridge->dev, "br%d", id);
        if (ret)
@@ -375,6 +374,8 @@ fpga_bridge_register(struct device *parent, const char *name,
                return ERR_PTR(ret);
        }
 
+       of_platform_populate(bridge->dev.of_node, NULL, NULL, &bridge->dev);
+
        return bridge;
 
 error_device:
index f0acedc..d7e2f9f 100644 (file)
@@ -474,7 +474,7 @@ static enum fw_upload_err rsu_send_data(struct m10bmc_sec *sec)
 
        ret = sec->ops->rsu_status(sec);
        if (ret < 0)
-               return ret;
+               return FW_UPLOAD_ERR_HW_ERROR;
        status = ret;
 
        if (!rsu_status_ok(status)) {
index 2d9c491..b76d854 100644 (file)
@@ -69,7 +69,7 @@ static int xlnx_pr_decoupler_enable_show(struct fpga_bridge *bridge)
        if (err)
                return err;
 
-       status = readl(priv->io_base);
+       status = xlnx_pr_decouple_read(priv, CTRL_OFFSET);
 
        clk_disable(priv->clk);
 
index a3846fa..11c4813 100644 (file)
@@ -86,6 +86,7 @@ static const struct regmap_config dio48e_regmap_config = {
        .volatile_table = &dio48e_volatile_table,
        .precious_table = &dio48e_precious_table,
        .cache_type = REGCACHE_FLAT,
+       .use_raw_spinlock = true,
 };
 
 /* only bit 3 on each respective Port C supports interrupts */
index ca2175b..ba73ee9 100644 (file)
@@ -81,6 +81,7 @@ static const struct regmap_config idi48_regmap_config = {
        .wr_table = &idi_48_wr_table,
        .rd_table = &idi_48_rd_table,
        .precious_table = &idi_48_precious_table,
+       .use_raw_spinlock = true,
 };
 
 #define IDI48_NGPIO 48
index d0a1cc8..fafebec 100644 (file)
@@ -596,6 +596,9 @@ int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
        if (!src->enabled_types || !src->funcs->set)
                return -EINVAL;
 
+       if (WARN_ON(!amdgpu_irq_enabled(adev, src, type)))
+               return -EINVAL;
+
        if (atomic_dec_and_test(&src->enabled_types[type]))
                return amdgpu_irq_update(adev, src, type);
 
index dc4f372..8af70fe 100644 (file)
@@ -169,10 +169,21 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
        if (rc)
                return rc;
 
-       irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
+       if (amdgpu_in_reset(adev)) {
+               irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
+               /* During gpu-reset we disable and then enable vblank irq, so
+                * don't use amdgpu_irq_get/put() to avoid refcount change.
+                */
+               if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
+                       rc = -EBUSY;
+       } else {
+               rc = (enable)
+                       ? amdgpu_irq_get(adev, &adev->crtc_irq, acrtc->crtc_id)
+                       : amdgpu_irq_put(adev, &adev->crtc_irq, acrtc->crtc_id);
+       }
 
-       if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
-               return -EBUSY;
+       if (rc)
+               return rc;
 
 skip:
        if (amdgpu_in_reset(adev))
index 54ed3de..9ffba4c 100644 (file)
@@ -1697,6 +1697,23 @@ static void dcn314_get_panel_config_defaults(struct dc_panel_config *panel_confi
        *panel_config = panel_config_defaults;
 }
 
+static bool filter_modes_for_single_channel_workaround(struct dc *dc,
+               struct dc_state *context)
+{
+       // Filter 2K@240Hz+8K@24fps above combination timing if memory only has single dimm LPDDR
+       if (dc->clk_mgr->bw_params->vram_type == 34 && dc->clk_mgr->bw_params->num_channels < 2) {
+               int total_phy_pix_clk = 0;
+
+               for (int i = 0; i < context->stream_count; i++)
+                       if (context->res_ctx.pipe_ctx[i].stream)
+                               total_phy_pix_clk += context->res_ctx.pipe_ctx[i].stream->phy_pix_clk;
+
+               if (total_phy_pix_clk >= (1148928+826260)) //2K@240Hz+8K@24fps
+                       return true;
+       }
+       return false;
+}
+
 bool dcn314_validate_bandwidth(struct dc *dc,
                struct dc_state *context,
                bool fast_validate)
@@ -1712,6 +1729,9 @@ bool dcn314_validate_bandwidth(struct dc *dc,
 
        BW_VAL_TRACE_COUNT();
 
+       if (filter_modes_for_single_channel_workaround(dc, context))
+               goto validate_fail;
+
        DC_FP_START();
        // do not support self refresh only
        out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate, false);
index b37d143..5983657 100644 (file)
@@ -222,7 +222,7 @@ struct _vcs_dpi_ip_params_st dcn3_15_ip = {
        .maximum_dsc_bits_per_component = 10,
        .dsc422_native_support = false,
        .is_line_buffer_bpp_fixed = true,
-       .line_buffer_fixed_bpp = 49,
+       .line_buffer_fixed_bpp = 48,
        .line_buffer_size_bits = 789504,
        .max_line_buffer_lines = 12,
        .writeback_interface_buffer_size_kbytes = 90,
index e39b133..b56f07f 100644 (file)
@@ -934,6 +934,10 @@ bool psr_su_set_dsc_slice_height(struct dc *dc, struct dc_link *link,
 
        pic_height = stream->timing.v_addressable +
                stream->timing.v_border_top + stream->timing.v_border_bottom;
+
+       if (stream->timing.dsc_cfg.num_slices_v == 0)
+               return false;
+
        slice_height = pic_height / stream->timing.dsc_cfg.num_slices_v;
        config->dsc_slice_height = slice_height;
 
index 5a176bf..30c9881 100644 (file)
@@ -163,7 +163,7 @@ static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
              DP_AUX_CH_CTL_TIME_OUT_MAX |
              DP_AUX_CH_CTL_RECEIVE_ERROR |
              (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
-             DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) |
+             DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(24) |
              DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
 
        if (intel_tc_port_in_tbt_alt_mode(dig_port))
index f77e449..ab9062e 100644 (file)
@@ -645,7 +645,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
                                struct drm_nouveau_gem_pushbuf_reloc *reloc,
                                struct drm_nouveau_gem_pushbuf_bo *bo)
 {
-       long ret = 0;
+       int ret = 0;
        unsigned i;
 
        for (i = 0; i < req->nr_relocs; i++) {
@@ -653,6 +653,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
                struct drm_nouveau_gem_pushbuf_bo *b;
                struct nouveau_bo *nvbo;
                uint32_t data;
+               long lret;
 
                if (unlikely(r->bo_index >= req->nr_buffers)) {
                        NV_PRINTK(err, cli, "reloc bo index invalid\n");
@@ -703,13 +704,18 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
                                data |= r->vor;
                }
 
-               ret = dma_resv_wait_timeout(nvbo->bo.base.resv,
-                                           DMA_RESV_USAGE_BOOKKEEP,
-                                           false, 15 * HZ);
-               if (ret == 0)
+               lret = dma_resv_wait_timeout(nvbo->bo.base.resv,
+                                            DMA_RESV_USAGE_BOOKKEEP,
+                                            false, 15 * HZ);
+               if (!lret)
                        ret = -EBUSY;
+               else if (lret > 0)
+                       ret = 0;
+               else
+                       ret = lret;
+
                if (ret) {
-                       NV_PRINTK(err, cli, "reloc wait_idle failed: %ld\n",
+                       NV_PRINTK(err, cli, "reloc wait_idle failed: %d\n",
                                  ret);
                        break;
                }
index ba3b817..293c228 100644 (file)
@@ -839,6 +839,8 @@ static void vop2_enable(struct vop2 *vop2)
                return;
        }
 
+       regcache_sync(vop2->map);
+
        if (vop2->data->soc_id == 3566)
                vop2_writel(vop2, RK3568_OTP_WIN_EN, 1);
 
@@ -867,6 +869,8 @@ static void vop2_disable(struct vop2 *vop2)
 
        pm_runtime_put_sync(vop2->dev);
 
+       regcache_mark_dirty(vop2->map);
+
        clk_disable_unprepare(vop2->aclk);
        clk_disable_unprepare(vop2->hclk);
 }
index 0e43784..1e08cc5 100644 (file)
@@ -308,7 +308,8 @@ static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched)
  */
 void drm_sched_fault(struct drm_gpu_scheduler *sched)
 {
-       mod_delayed_work(sched->timeout_wq, &sched->work_tdr, 0);
+       if (sched->ready)
+               mod_delayed_work(sched->timeout_wq, &sched->work_tdr, 0);
 }
 EXPORT_SYMBOL(drm_sched_fault);
 
index 50d02e5..7258912 100644 (file)
@@ -1409,7 +1409,7 @@ static struct iio_trigger *at91_adc_allocate_trigger(struct iio_dev *indio,
        trig = devm_iio_trigger_alloc(&indio->dev, "%s-dev%d-%s", indio->name,
                                iio_device_id(indio), trigger_name);
        if (!trig)
-               return NULL;
+               return ERR_PTR(-ENOMEM);
 
        trig->dev.parent = indio->dev.parent;
        iio_trigger_set_drvdata(trig, indio);
index beadfa9..404865e 100644 (file)
@@ -802,6 +802,7 @@ static struct ad5755_platform_data *ad5755_parse_fw(struct device *dev)
        return pdata;
 
  error_out:
+       fwnode_handle_put(pp);
        devm_kfree(dev, pdata);
        return NULL;
 }
index ad50baa..e823c14 100644 (file)
@@ -601,6 +601,7 @@ static int tsl2772_read_prox_diodes(struct tsl2772_chip *chip)
                        return -EINVAL;
                }
        }
+       chip->settings.prox_diode = prox_diode_mask;
 
        return 0;
 }
index b1d6ca7..f3d6ce4 100644 (file)
@@ -267,6 +267,8 @@ static ssize_t hfi1_write_iter(struct kiocb *kiocb, struct iov_iter *from)
 
        if (!HFI1_CAP_IS_KSET(SDMA))
                return -EINVAL;
+       if (!from->user_backed)
+               return -EINVAL;
        idx = srcu_read_lock(&fd->pq_srcu);
        pq = srcu_dereference(fd->pq, &fd->pq_srcu);
        if (!cq || !pq) {
@@ -274,11 +276,6 @@ static ssize_t hfi1_write_iter(struct kiocb *kiocb, struct iov_iter *from)
                return -EIO;
        }
 
-       if (!iter_is_iovec(from) || !dim) {
-               srcu_read_unlock(&fd->pq_srcu, idx);
-               return -EINVAL;
-       }
-
        trace_hfi1_sdma_request(fd->dd, fd->uctxt->ctxt, fd->subctxt, dim);
 
        if (atomic_read(&pq->n_reqs) == pq->n_max_reqs) {
@@ -287,11 +284,12 @@ static ssize_t hfi1_write_iter(struct kiocb *kiocb, struct iov_iter *from)
        }
 
        while (dim) {
+               const struct iovec *iov = iter_iov(from);
                int ret;
                unsigned long count = 0;
 
                ret = hfi1_user_sdma_process_request(
-                       fd, (struct iovec *)(from->iov + done),
+                       fd, (struct iovec *)(iov + done),
                        dim, &count);
                if (ret) {
                        reqs = ret;
index 80fe92a..815ea72 100644 (file)
@@ -2245,10 +2245,10 @@ static ssize_t qib_write_iter(struct kiocb *iocb, struct iov_iter *from)
        struct qib_ctxtdata *rcd = ctxt_fp(iocb->ki_filp);
        struct qib_user_sdma_queue *pq = fp->pq;
 
-       if (!iter_is_iovec(from) || !from->nr_segs || !pq)
+       if (!from->user_backed || !from->nr_segs || !pq)
                return -EINVAL;
 
-       return qib_user_sdma_writev(rcd, pq, from->iov, from->nr_segs);
+       return qib_user_sdma_writev(rcd, pq, iter_iov(from), from->nr_segs);
 }
 
 static struct class *qib_class;
index d836d3d..a68da29 100644 (file)
@@ -296,6 +296,12 @@ static int pegasus_probe(struct usb_interface *intf,
        pegasus->intf = intf;
 
        pipe = usb_rcvintpipe(dev, endpoint->bEndpointAddress);
+       /* Sanity check that pipe's type matches endpoint's type */
+       if (usb_pipe_type_check(dev, pipe)) {
+               error = -EINVAL;
+               goto err_free_mem;
+       }
+
        pegasus->data_len = usb_maxpacket(dev, pipe);
 
        pegasus->data = usb_alloc_coherent(dev, pegasus->data_len, GFP_KERNEL,
index 16caffa..30102cb 100644 (file)
@@ -111,6 +111,7 @@ struct cyttsp5_sensing_conf_data_dev {
        __le16 max_z;
        u8 origin_x;
        u8 origin_y;
+       u8 panel_id;
        u8 btn;
        u8 scan_mode;
        u8 max_num_of_tch_per_refresh_cycle;
index bf76678..bbfaf65 100644 (file)
@@ -410,6 +410,7 @@ static struct memstick_dev *memstick_alloc_card(struct memstick_host *host)
        return card;
 err_out:
        host->card = old_card;
+       kfree_const(card->dev.kobj.name);
        kfree(card);
        return NULL;
 }
@@ -468,8 +469,10 @@ static void memstick_check(struct work_struct *work)
                                put_device(&card->dev);
                                host->card = NULL;
                        }
-               } else
+               } else {
+                       kfree_const(card->dev.kobj.name);
                        kfree(card);
+               }
        }
 
 out_power_off:
index 8995309..672d37e 100644 (file)
@@ -351,8 +351,6 @@ static void sdhci_am654_write_b(struct sdhci_host *host, u8 val, int reg)
                 */
                case MMC_TIMING_SD_HS:
                case MMC_TIMING_MMC_HS:
-               case MMC_TIMING_UHS_SDR12:
-               case MMC_TIMING_UHS_SDR25:
                        val &= ~SDHCI_CTRL_HISPD;
                }
        }
index 8cc9a74..7a7d584 100644 (file)
@@ -1777,14 +1777,15 @@ void bond_lower_state_changed(struct slave *slave)
 
 /* The bonding driver uses ether_setup() to convert a master bond device
  * to ARPHRD_ETHER, that resets the target netdevice's flags so we always
- * have to restore the IFF_MASTER flag, and only restore IFF_SLAVE if it was set
+ * have to restore the IFF_MASTER flag, and only restore IFF_SLAVE and IFF_UP
+ * if they were set
  */
 static void bond_ether_setup(struct net_device *bond_dev)
 {
-       unsigned int slave_flag = bond_dev->flags & IFF_SLAVE;
+       unsigned int flags = bond_dev->flags & (IFF_SLAVE | IFF_UP);
 
        ether_setup(bond_dev);
-       bond_dev->flags |= IFF_MASTER | slave_flag;
+       bond_dev->flags |= IFF_MASTER | flags;
        bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
 }
 
index 3fffd5d..ffcad05 100644 (file)
@@ -96,7 +96,7 @@ static int ksz8795_change_mtu(struct ksz_device *dev, int frame_size)
 
        if (frame_size > KSZ8_LEGAL_PACKET_SIZE)
                ctrl2 |= SW_LEGAL_PACKET_DISABLE;
-       else if (frame_size > KSZ8863_NORMAL_PACKET_SIZE)
+       if (frame_size > KSZ8863_NORMAL_PACKET_SIZE)
                ctrl1 |= SW_HUGE_PACKET;
 
        ret = ksz_rmw8(dev, REG_SW_CTRL_1, SW_HUGE_PACKET, ctrl1);
index c23e3b3..651b79c 100644 (file)
@@ -2388,7 +2388,7 @@ static int bnxt_async_event_process(struct bnxt *bp,
        case ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE: {
                switch (BNXT_EVENT_PHC_EVENT_TYPE(data1)) {
                case ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_RTC_UPDATE:
-                       if (bp->fw_cap & BNXT_FW_CAP_PTP_RTC) {
+                       if (BNXT_PTP_USE_RTC(bp)) {
                                struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
                                u64 ns;
 
@@ -7627,7 +7627,7 @@ static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
        u8 flags;
        int rc;
 
-       if (bp->hwrm_spec_code < 0x10801) {
+       if (bp->hwrm_spec_code < 0x10801 || !BNXT_CHIP_P5_THOR(bp)) {
                rc = -ENODEV;
                goto no_ptp;
        }
index e7b5e28..852eb44 100644 (file)
@@ -304,7 +304,7 @@ void bnxt_rdma_aux_device_uninit(struct bnxt *bp)
        struct auxiliary_device *adev;
 
        /* Skip if no auxiliary device init was done. */
-       if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
+       if (!bp->aux_priv)
                return;
 
        aux_priv = bp->aux_priv;
@@ -324,6 +324,7 @@ static void bnxt_aux_dev_release(struct device *dev)
        bp->edev = NULL;
        kfree(aux_priv->edev);
        kfree(aux_priv);
+       bp->aux_priv = NULL;
 }
 
 static void bnxt_set_edev_info(struct bnxt_en_dev *edev, struct bnxt *bp)
@@ -359,19 +360,18 @@ void bnxt_rdma_aux_device_init(struct bnxt *bp)
        if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
                return;
 
-       bp->aux_priv = kzalloc(sizeof(*bp->aux_priv), GFP_KERNEL);
-       if (!bp->aux_priv)
+       aux_priv = kzalloc(sizeof(*bp->aux_priv), GFP_KERNEL);
+       if (!aux_priv)
                goto exit;
 
-       bp->aux_priv->id = ida_alloc(&bnxt_aux_dev_ids, GFP_KERNEL);
-       if (bp->aux_priv->id < 0) {
+       aux_priv->id = ida_alloc(&bnxt_aux_dev_ids, GFP_KERNEL);
+       if (aux_priv->id < 0) {
                netdev_warn(bp->dev,
                            "ida alloc failed for ROCE auxiliary device\n");
-               kfree(bp->aux_priv);
+               kfree(aux_priv);
                goto exit;
        }
 
-       aux_priv = bp->aux_priv;
        aux_dev = &aux_priv->aux_dev;
        aux_dev->id = aux_priv->id;
        aux_dev->name = "rdma";
@@ -380,10 +380,11 @@ void bnxt_rdma_aux_device_init(struct bnxt *bp)
 
        rc = auxiliary_device_init(aux_dev);
        if (rc) {
-               ida_free(&bnxt_aux_dev_ids, bp->aux_priv->id);
-               kfree(bp->aux_priv);
+               ida_free(&bnxt_aux_dev_ids, aux_priv->id);
+               kfree(aux_priv);
                goto exit;
        }
+       bp->aux_priv = aux_priv;
 
        /* From this point, all cleanup will happen via the .release callback &
         * any error unwinding will need to include a call to
index dd9be22..d354115 100644 (file)
@@ -1135,7 +1135,7 @@ void cxgb4_cleanup_tc_flower(struct adapter *adap)
                return;
 
        if (adap->flower_stats_timer.function)
-               del_timer_sync(&adap->flower_stats_timer);
+               timer_shutdown_sync(&adap->flower_stats_timer);
        cancel_work_sync(&adap->flower_stats_work);
        rhashtable_destroy(&adap->flower_tbl);
        adap->tc_flower_initialized = false;
index e1eb1de..e14d1e4 100644 (file)
@@ -5288,31 +5288,6 @@ static void e1000_watchdog_task(struct work_struct *work)
                                ew32(TARC(0), tarc0);
                        }
 
-                       /* disable TSO for pcie and 10/100 speeds, to avoid
-                        * some hardware issues
-                        */
-                       if (!(adapter->flags & FLAG_TSO_FORCE)) {
-                               switch (adapter->link_speed) {
-                               case SPEED_10:
-                               case SPEED_100:
-                                       e_info("10/100 speed: disabling TSO\n");
-                                       netdev->features &= ~NETIF_F_TSO;
-                                       netdev->features &= ~NETIF_F_TSO6;
-                                       break;
-                               case SPEED_1000:
-                                       netdev->features |= NETIF_F_TSO;
-                                       netdev->features |= NETIF_F_TSO6;
-                                       break;
-                               default:
-                                       /* oops */
-                                       break;
-                               }
-                               if (hw->mac.type == e1000_pch_spt) {
-                                       netdev->features &= ~NETIF_F_TSO;
-                                       netdev->features &= ~NETIF_F_TSO6;
-                               }
-                       }
-
                        /* enable transmits in the hardware, need to do this
                         * after setting TARC(0)
                         */
@@ -7526,6 +7501,32 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                            NETIF_F_RXCSUM |
                            NETIF_F_HW_CSUM);
 
+       /* disable TSO for pcie and 10/100 speeds to avoid
+        * some hardware issues and for i219 to fix transfer
+        * speed being capped at 60%
+        */
+       if (!(adapter->flags & FLAG_TSO_FORCE)) {
+               switch (adapter->link_speed) {
+               case SPEED_10:
+               case SPEED_100:
+                       e_info("10/100 speed: disabling TSO\n");
+                       netdev->features &= ~NETIF_F_TSO;
+                       netdev->features &= ~NETIF_F_TSO6;
+                       break;
+               case SPEED_1000:
+                       netdev->features |= NETIF_F_TSO;
+                       netdev->features |= NETIF_F_TSO6;
+                       break;
+               default:
+                       /* oops */
+                       break;
+               }
+               if (hw->mac.type == e1000_pch_spt) {
+                       netdev->features &= ~NETIF_F_TSO;
+                       netdev->features &= ~NETIF_F_TSO6;
+               }
+       }
+
        /* Set user-changeable features (subset of all device features) */
        netdev->hw_features = netdev->features;
        netdev->hw_features |= NETIF_F_RXFCS;
index 228cd50..7c30abd 100644 (file)
@@ -11059,8 +11059,11 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
                                             pf->hw.aq.asq_last_status));
        }
        /* reinit the misc interrupt */
-       if (pf->flags & I40E_FLAG_MSIX_ENABLED)
+       if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
                ret = i40e_setup_misc_vector(pf);
+               if (ret)
+                       goto end_unlock;
+       }
 
        /* Add a filter to drop all Flow control frames from any VSI from being
         * transmitted. By doing so we stop a malicious VF from sending out
@@ -14133,15 +14136,15 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
                vsi->id = ctxt.vsi_number;
        }
 
-       vsi->active_filters = 0;
-       clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
        spin_lock_bh(&vsi->mac_filter_hash_lock);
+       vsi->active_filters = 0;
        /* If macvlan filters already exist, force them to get loaded */
        hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
                f->state = I40E_FILTER_NEW;
                f_count++;
        }
        spin_unlock_bh(&vsi->mac_filter_hash_lock);
+       clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
 
        if (f_count) {
                vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
index 445fe30..2e78060 100644 (file)
@@ -59,9 +59,6 @@ bool mlx5_eth_supported(struct mlx5_core_dev *dev)
        if (!IS_ENABLED(CONFIG_MLX5_CORE_EN))
                return false;
 
-       if (mlx5_core_is_management_pf(dev))
-               return false;
-
        if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
                return false;
 
@@ -201,9 +198,6 @@ bool mlx5_rdma_supported(struct mlx5_core_dev *dev)
        if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND))
                return false;
 
-       if (mlx5_core_is_management_pf(dev))
-               return false;
-
        if (dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_IB_ADEV)
                return false;
 
index 7c9c4e4..d000236 100644 (file)
@@ -75,10 +75,6 @@ int mlx5_ec_init(struct mlx5_core_dev *dev)
        if (!mlx5_core_is_ecpf(dev))
                return 0;
 
-       /* Management PF don't have a peer PF */
-       if (mlx5_core_is_management_pf(dev))
-               return 0;
-
        return mlx5_host_pf_init(dev);
 }
 
@@ -89,10 +85,6 @@ void mlx5_ec_cleanup(struct mlx5_core_dev *dev)
        if (!mlx5_core_is_ecpf(dev))
                return;
 
-       /* Management PF don't have a peer PF */
-       if (mlx5_core_is_management_pf(dev))
-               return;
-
        mlx5_host_pf_cleanup(dev);
 
        err = mlx5_wait_for_pages(dev, &dev->priv.page_counters[MLX5_HOST_PF]);
index 8bdf287..19fed51 100644 (file)
@@ -1488,7 +1488,7 @@ int mlx5_esw_sf_max_hpf_functions(struct mlx5_core_dev *dev, u16 *max_sfs, u16 *
        void *hca_caps;
        int err;
 
-       if (!mlx5_core_is_ecpf(dev) || mlx5_core_is_management_pf(dev)) {
+       if (!mlx5_core_is_ecpf(dev)) {
                *max_sfs = 0;
                return 0;
        }
index 017d68f..972c571 100644 (file)
@@ -31,6 +31,8 @@ mlxfw_mfa2_tlv_next(const struct mlxfw_mfa2_file *mfa2_file,
 
        if (tlv->type == MLXFW_MFA2_TLV_MULTI_PART) {
                multi = mlxfw_mfa2_tlv_multi_get(mfa2_file, tlv);
+               if (!multi)
+                       return NULL;
                tlv_len = NLA_ALIGN(tlv_len + be16_to_cpu(multi->total_len));
        }
 
index 48dbfea..7cdf0ce 100644 (file)
@@ -26,7 +26,7 @@
 #define MLXSW_PCI_CIR_TIMEOUT_MSECS            1000
 
 #define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS       900000
-#define MLXSW_PCI_SW_RESET_WAIT_MSECS          200
+#define MLXSW_PCI_SW_RESET_WAIT_MSECS          400
 #define MLXSW_PCI_FW_READY                     0xA1844
 #define MLXSW_PCI_FW_READY_MASK                        0xFFFF
 #define MLXSW_PCI_FW_READY_MAGIC               0x5E
index 884d8d1..1eceffa 100644 (file)
@@ -541,7 +541,6 @@ int efx_net_open(struct net_device *net_dev)
        else
                efx->state = STATE_NET_UP;
 
-       efx_selftest_async_start(efx);
        return 0;
 }
 
index cc30524..361687d 100644 (file)
@@ -544,6 +544,8 @@ void efx_start_all(struct efx_nic *efx)
        /* Start the hardware monitor if there is one */
        efx_start_monitor(efx);
 
+       efx_selftest_async_start(efx);
+
        /* Link state detection is normally event-driven; we have
         * to poll now because we could have missed a change
         */
index a9c44f0..a94c7bd 100644 (file)
@@ -47,7 +47,7 @@ config BPQETHER
 
 config SCC
        tristate "Z8530 SCC driver"
-       depends on ISA && AX25 && ISA_DMA_API
+       depends on ISA && AX25
        help
          These cards are used to connect your Linux box to an amateur radio
          in order to communicate with other computers. If you want to use
index ad653b3..5df1eba 100644 (file)
@@ -1486,7 +1486,8 @@ static struct sk_buff *tun_napi_alloc_frags(struct tun_file *tfile,
        skb->truesize += skb->data_len;
 
        for (i = 1; i < it->nr_segs; i++) {
-               size_t fragsz = it->iov[i].iov_len;
+               const struct iovec *iov = iter_iov(it);
+               size_t fragsz = iov->iov_len;
                struct page *page;
                void *frag;
 
index e1b38fb..4b3c664 100644 (file)
@@ -1262,11 +1262,12 @@ static void veth_set_xdp_features(struct net_device *dev)
 
        peer = rtnl_dereference(priv->peer);
        if (peer && peer->real_num_tx_queues <= dev->real_num_rx_queues) {
+               struct veth_priv *priv_peer = netdev_priv(peer);
                xdp_features_t val = NETDEV_XDP_ACT_BASIC |
                                     NETDEV_XDP_ACT_REDIRECT |
                                     NETDEV_XDP_ACT_RX_SG;
 
-               if (priv->_xdp_prog || veth_gro_requested(dev))
+               if (priv_peer->_xdp_prog || veth_gro_requested(peer))
                        val |= NETDEV_XDP_ACT_NDO_XMIT |
                               NETDEV_XDP_ACT_NDO_XMIT_SG;
                xdp_set_features_flag(dev, val);
@@ -1504,19 +1505,23 @@ static int veth_set_features(struct net_device *dev,
 {
        netdev_features_t changed = features ^ dev->features;
        struct veth_priv *priv = netdev_priv(dev);
+       struct net_device *peer;
        int err;
 
        if (!(changed & NETIF_F_GRO) || !(dev->flags & IFF_UP) || priv->_xdp_prog)
                return 0;
 
+       peer = rtnl_dereference(priv->peer);
        if (features & NETIF_F_GRO) {
                err = veth_napi_enable(dev);
                if (err)
                        return err;
 
-               xdp_features_set_redirect_target(dev, true);
+               if (peer)
+                       xdp_features_set_redirect_target(peer, true);
        } else {
-               xdp_features_clear_redirect_target(dev);
+               if (peer)
+                       xdp_features_clear_redirect_target(peer);
                veth_napi_del(dev);
        }
        return 0;
@@ -1598,13 +1603,13 @@ static int veth_xdp_set(struct net_device *dev, struct bpf_prog *prog,
                        peer->max_mtu = max_mtu;
                }
 
-               xdp_features_set_redirect_target(dev, true);
+               xdp_features_set_redirect_target(peer, true);
        }
 
        if (old_prog) {
                if (!prog) {
-                       if (!veth_gro_requested(dev))
-                               xdp_features_clear_redirect_target(dev);
+                       if (peer && !veth_gro_requested(dev))
+                               xdp_features_clear_redirect_target(peer);
 
                        if (dev->flags & IFF_UP)
                                veth_disable_xdp(dev);
index 2396c28..ea1bd4b 100644 (file)
@@ -814,8 +814,13 @@ static struct page *xdp_linearize_page(struct receive_queue *rq,
                                       int page_off,
                                       unsigned int *len)
 {
-       struct page *page = alloc_page(GFP_ATOMIC);
+       int tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+       struct page *page;
+
+       if (page_off + *len + tailroom > PAGE_SIZE)
+               return NULL;
 
+       page = alloc_page(GFP_ATOMIC);
        if (!page)
                return NULL;
 
@@ -823,7 +828,6 @@ static struct page *xdp_linearize_page(struct receive_queue *rq,
        page_off += *len;
 
        while (--*num_buf) {
-               int tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
                unsigned int buflen;
                void *buf;
                int off;
index da488cb..f2b76ee 100644 (file)
@@ -1504,7 +1504,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
                                goto rcd_done;
                        }
 
-                       if (rxDataRingUsed) {
+                       if (rxDataRingUsed && adapter->rxdataring_enabled) {
                                size_t sz;
 
                                BUG_ON(rcd->len > rq->data_ring.desc_size);
index 3363fc4..a084500 100644 (file)
@@ -646,9 +646,7 @@ void ath9k_mci_update_wlan_channels(struct ath_softc *sc, bool allow_all)
        struct ath_hw *ah = sc->sc_ah;
        struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
        struct ath9k_channel *chan = ah->curchan;
-       static const u32 channelmap[] = {
-               0x00000000, 0xffff0000, 0xffffffff, 0x7fffffff
-       };
+       u32 channelmap[] = {0x00000000, 0xffff0000, 0xffffffff, 0x7fffffff};
        int i;
        s16 chan_start, chan_end;
        u16 wlan_chan;
index 1f71662..ef1d885 100644 (file)
@@ -750,8 +750,7 @@ out_disable:
        return ret;
 }
 
-static bool pci_msix_validate_entries(struct pci_dev *dev, struct msix_entry *entries,
-                                     int nvec, int hwsize)
+static bool pci_msix_validate_entries(struct pci_dev *dev, struct msix_entry *entries, int nvec)
 {
        bool nogap;
        int i, j;
@@ -762,10 +761,6 @@ static bool pci_msix_validate_entries(struct pci_dev *dev, struct msix_entry *en
        nogap = pci_msi_domain_supports(dev, MSI_FLAG_MSIX_CONTIGUOUS, DENY_LEGACY);
 
        for (i = 0; i < nvec; i++) {
-               /* Entry within hardware limit? */
-               if (entries[i].entry >= hwsize)
-                       return false;
-
                /* Check for duplicate entries */
                for (j = i + 1; j < nvec; j++) {
                        if (entries[i].entry == entries[j].entry)
@@ -805,7 +800,7 @@ int __pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, int
        if (hwsize < 0)
                return hwsize;
 
-       if (!pci_msix_validate_entries(dev, entries, nvec, hwsize))
+       if (!pci_msix_validate_entries(dev, entries, nvec))
                return -EINVAL;
 
        if (hwsize < nvec) {
index 196834e..4c2ef2e 100644 (file)
 #include "pci.h"
 
 #ifdef CONFIG_PCI
-void pci_set_of_node(struct pci_dev *dev)
+/**
+ * pci_set_of_node - Find and set device's DT device_node
+ * @dev: the PCI device structure to fill
+ *
+ * Returns 0 on success with of_node set or when no device is described in the
+ * DT. Returns -ENODEV if the device is present, but disabled in the DT.
+ */
+int pci_set_of_node(struct pci_dev *dev)
 {
+       struct device_node *node;
+
        if (!dev->bus->dev.of_node)
-               return;
-       dev->dev.of_node = of_pci_find_child_device(dev->bus->dev.of_node,
-                                                   dev->devfn);
-       if (dev->dev.of_node)
-               dev->dev.fwnode = &dev->dev.of_node->fwnode;
+               return 0;
+
+       node = of_pci_find_child_device(dev->bus->dev.of_node, dev->devfn);
+       if (!node)
+               return 0;
+
+       if (!of_device_is_available(node)) {
+               of_node_put(node);
+               return -ENODEV;
+       }
+
+       dev->dev.of_node = node;
+       dev->dev.fwnode = &node->fwnode;
+       return 0;
 }
 
 void pci_release_of_node(struct pci_dev *dev)
index d2c0867..2b48a0a 100644 (file)
@@ -624,7 +624,7 @@ int of_pci_get_max_link_speed(struct device_node *node);
 u32 of_pci_get_slot_power_limit(struct device_node *node,
                                u8 *slot_power_limit_value,
                                u8 *slot_power_limit_scale);
-void pci_set_of_node(struct pci_dev *dev);
+int pci_set_of_node(struct pci_dev *dev);
 void pci_release_of_node(struct pci_dev *dev);
 void pci_set_bus_of_node(struct pci_bus *bus);
 void pci_release_bus_of_node(struct pci_bus *bus);
@@ -662,7 +662,7 @@ of_pci_get_slot_power_limit(struct device_node *node,
        return 0;
 }
 
-static inline void pci_set_of_node(struct pci_dev *dev) { }
+static inline int pci_set_of_node(struct pci_dev *dev) { return 0; }
 static inline void pci_release_of_node(struct pci_dev *dev) { }
 static inline void pci_set_bus_of_node(struct pci_bus *bus) { }
 static inline void pci_release_bus_of_node(struct pci_bus *bus) { }
index a3f68b6..f96fa83 100644 (file)
@@ -1826,7 +1826,7 @@ int pci_setup_device(struct pci_dev *dev)
        u32 class;
        u16 cmd;
        u8 hdr_type;
-       int pos = 0;
+       int err, pos = 0;
        struct pci_bus_region region;
        struct resource *res;
 
@@ -1840,10 +1840,10 @@ int pci_setup_device(struct pci_dev *dev)
        dev->error_state = pci_channel_io_normal;
        set_pcie_port_type(dev);
 
-       pci_set_of_node(dev);
+       err = pci_set_of_node(dev);
+       if (err)
+               return err;
        pci_set_acpi_fwnode(dev);
-       if (dev->dev.fwnode && !fwnode_device_is_available(dev->dev.fwnode))
-               return -ENODEV;
 
        pci_dev_assign_slot(dev);
 
index a78fdb1..8b64388 100644 (file)
 #define DMC_QOS_IRQ            BIT(30)
 
 /* DMC bandwidth monitor register address offset */
-#define DMC_MON_G12_CTRL0              (0x20  << 2)
-#define DMC_MON_G12_CTRL1              (0x21  << 2)
-#define DMC_MON_G12_CTRL2              (0x22  << 2)
-#define DMC_MON_G12_CTRL3              (0x23  << 2)
-#define DMC_MON_G12_CTRL4              (0x24  << 2)
-#define DMC_MON_G12_CTRL5              (0x25  << 2)
-#define DMC_MON_G12_CTRL6              (0x26  << 2)
-#define DMC_MON_G12_CTRL7              (0x27  << 2)
-#define DMC_MON_G12_CTRL8              (0x28  << 2)
-
-#define DMC_MON_G12_ALL_REQ_CNT                (0x29  << 2)
-#define DMC_MON_G12_ALL_GRANT_CNT      (0x2a  << 2)
-#define DMC_MON_G12_ONE_GRANT_CNT      (0x2b  << 2)
-#define DMC_MON_G12_SEC_GRANT_CNT      (0x2c  << 2)
-#define DMC_MON_G12_THD_GRANT_CNT      (0x2d  << 2)
-#define DMC_MON_G12_FOR_GRANT_CNT      (0x2e  << 2)
-#define DMC_MON_G12_TIMER              (0x2f  << 2)
+#define DMC_MON_G12_CTRL0              (0x0  << 2)
+#define DMC_MON_G12_CTRL1              (0x1  << 2)
+#define DMC_MON_G12_CTRL2              (0x2  << 2)
+#define DMC_MON_G12_CTRL3              (0x3  << 2)
+#define DMC_MON_G12_CTRL4              (0x4  << 2)
+#define DMC_MON_G12_CTRL5              (0x5  << 2)
+#define DMC_MON_G12_CTRL6              (0x6  << 2)
+#define DMC_MON_G12_CTRL7              (0x7  << 2)
+#define DMC_MON_G12_CTRL8              (0x8  << 2)
+
+#define DMC_MON_G12_ALL_REQ_CNT                (0x9  << 2)
+#define DMC_MON_G12_ALL_GRANT_CNT      (0xa  << 2)
+#define DMC_MON_G12_ONE_GRANT_CNT      (0xb  << 2)
+#define DMC_MON_G12_SEC_GRANT_CNT      (0xc  << 2)
+#define DMC_MON_G12_THD_GRANT_CNT      (0xd  << 2)
+#define DMC_MON_G12_FOR_GRANT_CNT      (0xe  << 2)
+#define DMC_MON_G12_TIMER              (0xf  << 2)
 
 /* Each bit represent a axi line */
 PMU_FORMAT_ATTR(event, "config:0-7");
index 529963a..41537c4 100644 (file)
@@ -8,18 +8,19 @@
 // Copyright (c) 2012 Marvell Technology Ltd.
 // Yunfan Zhang <yfzhang@marvell.com>
 
+#include <linux/bits.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
 #include <linux/module.h>
+#include <linux/of_device.h>
 #include <linux/param.h>
-#include <linux/err.h>
 #include <linux/platform_device.h>
+#include <linux/regmap.h>
 #include <linux/regulator/driver.h>
+#include <linux/regulator/fan53555.h>
 #include <linux/regulator/machine.h>
 #include <linux/regulator/of_regulator.h>
-#include <linux/of_device.h>
-#include <linux/i2c.h>
 #include <linux/slab.h>
-#include <linux/regmap.h>
-#include <linux/regulator/fan53555.h>
 
 /* Voltage setting */
 #define FAN53555_VSEL0         0x00
@@ -60,7 +61,7 @@
 #define TCS_VSEL1_MODE         (1 << 6)
 
 #define TCS_SLEW_SHIFT         3
-#define TCS_SLEW_MASK          (0x3 < 3)
+#define TCS_SLEW_MASK          GENMASK(4, 3)
 
 enum fan53555_vendor {
        FAN53526_VENDOR_FAIRCHILD = 0,
index 05ad28f..229df71 100644 (file)
@@ -42,6 +42,7 @@ static const int sm5703_buck_voltagemap[] = {
                .type = REGULATOR_VOLTAGE,                              \
                .id = SM5703_USBLDO ## _id,                             \
                .ops = &sm5703_regulator_ops_fixed,                     \
+               .n_voltages = 1,                                        \
                .fixed_uV = SM5703_USBLDO_MICROVOLT,                    \
                .enable_reg = SM5703_REG_USBLDO12,                      \
                .enable_mask = SM5703_REG_EN_USBLDO ##_id,              \
@@ -56,6 +57,7 @@ static const int sm5703_buck_voltagemap[] = {
                .type = REGULATOR_VOLTAGE,                              \
                .id = SM5703_VBUS,                                      \
                .ops = &sm5703_regulator_ops_fixed,                     \
+               .n_voltages = 1,                                        \
                .fixed_uV = SM5703_VBUS_MICROVOLT,                      \
                .enable_reg = SM5703_REG_CNTL,                          \
                .enable_mask = SM5703_OPERATION_MODE_MASK,              \
index bd87d3c..69347b6 100644 (file)
@@ -632,7 +632,7 @@ static int rockchip_sfc_probe(struct platform_device *pdev)
        if (ret) {
                dev_err(dev, "Failed to request irq\n");
 
-               return ret;
+               goto err_irq;
        }
 
        ret = rockchip_sfc_init(sfc);
index 290b1bb..df5fb54 100644 (file)
@@ -488,7 +488,7 @@ static bool is_normal_memory(pgprot_t p)
 #elif defined(CONFIG_ARM64)
        return (pgprot_val(p) & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL);
 #else
-#error "Unuspported architecture"
+#error "Unsupported architecture"
 #endif
 }
 
index b1c6231..673cf03 100644 (file)
@@ -32,7 +32,7 @@ static int shm_get_kernel_pages(unsigned long start, size_t page_count,
                         is_kmap_addr((void *)start)))
                return -EINVAL;
 
-       page = virt_to_page(start);
+       page = virt_to_page((void *)start);
        for (n = 0; n < page_count; n++) {
                pages[n] = page + n;
                get_page(pages[n]);
index 32d0be9..e68f7d2 100644 (file)
@@ -665,7 +665,7 @@ vhost_scsi_calc_sgls(struct iov_iter *iter, size_t bytes, int max_sgls)
 {
        int sgl_count = 0;
 
-       if (!iter || !iter->iov) {
+       if (!iter || !iter_iov(iter)) {
                pr_err("%s: iter->iov is NULL, but expected bytes: %zu"
                       " present\n", __func__, bytes);
                return -EINVAL;
index 317aeff..a6d77fe 100644 (file)
 #define BTRFS_DISCARD_DELAY            (120ULL * NSEC_PER_SEC)
 #define BTRFS_DISCARD_UNUSED_DELAY     (10ULL * NSEC_PER_SEC)
 
-/* Target completion latency of discarding all discardable extents */
-#define BTRFS_DISCARD_TARGET_MSEC      (6 * 60 * 60UL * MSEC_PER_SEC)
 #define BTRFS_DISCARD_MIN_DELAY_MSEC   (1UL)
 #define BTRFS_DISCARD_MAX_DELAY_MSEC   (1000UL)
-#define BTRFS_DISCARD_MAX_IOPS         (10U)
+#define BTRFS_DISCARD_MAX_IOPS         (1000U)
 
 /* Monotonically decreasing minimum length filters after index 0 */
 static int discard_minlen[BTRFS_NR_DISCARD_LISTS] = {
@@ -577,6 +575,7 @@ void btrfs_discard_calc_delay(struct btrfs_discard_ctl *discard_ctl)
        s32 discardable_extents;
        s64 discardable_bytes;
        u32 iops_limit;
+       unsigned long min_delay = BTRFS_DISCARD_MIN_DELAY_MSEC;
        unsigned long delay;
 
        discardable_extents = atomic_read(&discard_ctl->discardable_extents);
@@ -607,13 +606,19 @@ void btrfs_discard_calc_delay(struct btrfs_discard_ctl *discard_ctl)
        }
 
        iops_limit = READ_ONCE(discard_ctl->iops_limit);
-       if (iops_limit)
+
+       if (iops_limit) {
                delay = MSEC_PER_SEC / iops_limit;
-       else
-               delay = BTRFS_DISCARD_TARGET_MSEC / discardable_extents;
+       } else {
+               /*
+                * Unset iops_limit means go as fast as possible, so allow a
+                * delay of 0.
+                */
+               delay = 0;
+               min_delay = 0;
+       }
 
-       delay = clamp(delay, BTRFS_DISCARD_MIN_DELAY_MSEC,
-                     BTRFS_DISCARD_MAX_DELAY_MSEC);
+       delay = clamp(delay, min_delay, BTRFS_DISCARD_MAX_DELAY_MSEC);
        discard_ctl->delay_ms = delay;
 
        spin_unlock(&discard_ctl->lock);
index 5cc5a1f..f649647 100644 (file)
@@ -3730,10 +3730,15 @@ static int check_direct_read(struct btrfs_fs_info *fs_info,
        if (!iter_is_iovec(iter))
                return 0;
 
-       for (seg = 0; seg < iter->nr_segs; seg++)
-               for (i = seg + 1; i < iter->nr_segs; i++)
-                       if (iter->iov[seg].iov_base == iter->iov[i].iov_base)
+       for (seg = 0; seg < iter->nr_segs; seg++) {
+               for (i = seg + 1; i < iter->nr_segs; i++) {
+                       const struct iovec *iov1 = iter_iov(iter) + seg;
+                       const struct iovec *iov2 = iter_iov(iter) + i;
+
+                       if (iov1->iov_base == iov2->iov_base)
                                return -EINVAL;
+               }
+       }
        return 0;
 }
 
index cb40074..0329a90 100644 (file)
@@ -171,8 +171,6 @@ static struct vfsmount *cifs_dfs_do_automount(struct path *path)
                mnt = ERR_CAST(full_path);
                goto out;
        }
-
-       convert_delimiter(full_path, '/');
        cifs_dbg(FYI, "%s: full_path: %s\n", __func__, full_path);
 
        tmp = *cur_ctx;
index 13f26e0..0b8cbf7 100644 (file)
@@ -34,19 +34,33 @@ static inline int dfs_get_referral(struct cifs_mount_ctx *mnt_ctx, const char *p
                              cifs_remap(cifs_sb), path, ref, tl);
 }
 
+/* Return DFS full path out of a dentry set for automount */
 static inline char *dfs_get_automount_devname(struct dentry *dentry, void *page)
 {
        struct cifs_sb_info *cifs_sb = CIFS_SB(dentry->d_sb);
        struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
        struct TCP_Server_Info *server = tcon->ses->server;
+       size_t len;
+       char *s;
 
        if (unlikely(!server->origin_fullpath))
                return ERR_PTR(-EREMOTE);
 
-       return __build_path_from_dentry_optional_prefix(dentry, page,
-                                                       server->origin_fullpath,
-                                                       strlen(server->origin_fullpath),
-                                                       true);
+       s = dentry_path_raw(dentry, page, PATH_MAX);
+       if (IS_ERR(s))
+               return s;
+       /* for root, we want "" */
+       if (!s[1])
+               s++;
+
+       len = strlen(server->origin_fullpath);
+       if (s < (char *)page + len)
+               return ERR_PTR(-ENAMETOOLONG);
+
+       s -= len;
+       memcpy(s, server->origin_fullpath, len);
+       convert_delimiter(s, '/');
+       return s;
 }
 
 static inline void dfs_put_root_smb_sessions(struct list_head *head)
index 6831a99..b33d2e7 100644 (file)
@@ -4010,7 +4010,6 @@ static void
 collect_uncached_read_data(struct cifs_aio_ctx *ctx)
 {
        struct cifs_readdata *rdata, *tmp;
-       struct iov_iter *to = &ctx->iter;
        struct cifs_sb_info *cifs_sb;
        int rc;
 
@@ -4076,9 +4075,6 @@ again:
                kref_put(&rdata->refcount, cifs_readdata_release);
        }
 
-       if (!ctx->direct_io)
-               ctx->total_len = ctx->len - iov_iter_count(to);
-
        /* mask nodata case */
        if (rc == -ENODATA)
                rc = 0;
index 4245249..366f0c3 100644 (file)
@@ -4180,10 +4180,12 @@ smb2_readv_callback(struct mid_q_entry *mid)
        struct smb2_hdr *shdr =
                                (struct smb2_hdr *)rdata->iov[0].iov_base;
        struct cifs_credits credits = { .value = 0, .instance = 0 };
-       struct smb_rqst rqst = { .rq_iov = &rdata->iov[1],
-                                .rq_nvec = 1,
-                                .rq_iter = rdata->iter,
-                                .rq_iter_size = iov_iter_count(&rdata->iter), };
+       struct smb_rqst rqst = { .rq_iov = &rdata->iov[1], .rq_nvec = 1 };
+
+       if (rdata->got_bytes) {
+               rqst.rq_iter      = rdata->iter;
+               rqst.rq_iter_size = iov_iter_count(&rdata->iter);
+       };
 
        WARN_ONCE(rdata->server != mid->server,
                  "rdata server %p != mid server %p",
index 195dc23..1db3e3c 100644 (file)
@@ -978,6 +978,16 @@ restart:
                        continue;
                }
 
+               /*
+                * If wb_tryget fails, the wb has been shutdown, skip it.
+                *
+                * Pin @wb so that it stays on @bdi->wb_list.  This allows
+                * continuing iteration from @wb after dropping and
+                * regrabbing rcu read lock.
+                */
+               if (!wb_tryget(wb))
+                       continue;
+
                /* alloc failed, execute synchronously using on-stack fallback */
                work = &fallback_work;
                *work = *base_work;
@@ -986,13 +996,6 @@ restart:
                work->done = &fallback_work_done;
 
                wb_queue_work(wb, work);
-
-               /*
-                * Pin @wb so that it stays on @bdi->wb_list.  This allows
-                * continuing iteration from @wb after dropping and
-                * regrabbing rcu read lock.
-                */
-               wb_get(wb);
                last_wb = wb;
 
                rcu_read_unlock();
index de37a3a..89d97f6 100644 (file)
@@ -1419,7 +1419,7 @@ out:
 
 static inline unsigned long fuse_get_user_addr(const struct iov_iter *ii)
 {
-       return (unsigned long)ii->iov->iov_base + ii->iov_offset;
+       return (unsigned long)iter_iov(ii)->iov_base + ii->iov_offset;
 }
 
 static inline size_t fuse_get_frag_size(const struct iov_iter *ii,
index 6ad4139..2286596 100644 (file)
@@ -430,6 +430,23 @@ static int nilfs_segctor_reset_segment_buffer(struct nilfs_sc_info *sci)
        return 0;
 }
 
+/**
+ * nilfs_segctor_zeropad_segsum - zero pad the rest of the segment summary area
+ * @sci: segment constructor object
+ *
+ * nilfs_segctor_zeropad_segsum() zero-fills unallocated space at the end of
+ * the current segment summary block.
+ */
+static void nilfs_segctor_zeropad_segsum(struct nilfs_sc_info *sci)
+{
+       struct nilfs_segsum_pointer *ssp;
+
+       ssp = sci->sc_blk_cnt > 0 ? &sci->sc_binfo_ptr : &sci->sc_finfo_ptr;
+       if (ssp->offset < ssp->bh->b_size)
+               memset(ssp->bh->b_data + ssp->offset, 0,
+                      ssp->bh->b_size - ssp->offset);
+}
+
 static int nilfs_segctor_feed_segment(struct nilfs_sc_info *sci)
 {
        sci->sc_nblk_this_inc += sci->sc_curseg->sb_sum.nblocks;
@@ -438,6 +455,7 @@ static int nilfs_segctor_feed_segment(struct nilfs_sc_info *sci)
                                * The current segment is filled up
                                * (internal code)
                                */
+       nilfs_segctor_zeropad_segsum(sci);
        sci->sc_curseg = NILFS_NEXT_SEGBUF(sci->sc_curseg);
        return nilfs_segctor_reset_segment_buffer(sci);
 }
@@ -542,6 +560,7 @@ static int nilfs_segctor_add_file_block(struct nilfs_sc_info *sci,
                goto retry;
        }
        if (unlikely(required)) {
+               nilfs_segctor_zeropad_segsum(sci);
                err = nilfs_segbuf_extend_segsum(segbuf);
                if (unlikely(err))
                        goto failed;
@@ -1533,6 +1552,7 @@ static int nilfs_segctor_collect(struct nilfs_sc_info *sci,
                nadd = min_t(int, nadd << 1, SC_MAX_SEGDELTA);
                sci->sc_stage = prev_stage;
        }
+       nilfs_segctor_zeropad_segsum(sci);
        nilfs_segctor_truncate_segments(sci, sci->sc_curseg, nilfs->ns_sufile);
        return 0;
 
index 7a2ff61..a21ba3b 100644 (file)
@@ -749,15 +749,14 @@ static ssize_t do_loop_readv_writev(struct file *filp, struct iov_iter *iter,
                return -EOPNOTSUPP;
 
        while (iov_iter_count(iter)) {
-               struct iovec iovec = iov_iter_iovec(iter);
                ssize_t nr;
 
                if (type == READ) {
-                       nr = filp->f_op->read(filp, iovec.iov_base,
-                                             iovec.iov_len, ppos);
+                       nr = filp->f_op->read(filp, iter_iov_addr(iter),
+                                               iter_iov_len(iter), ppos);
                } else {
-                       nr = filp->f_op->write(filp, iovec.iov_base,
-                                              iovec.iov_len, ppos);
+                       nr = filp->f_op->write(filp, iter_iov_addr(iter),
+                                               iter_iov_len(iter), ppos);
                }
 
                if (nr < 0) {
@@ -766,7 +765,7 @@ static ssize_t do_loop_readv_writev(struct file *filp, struct iov_iter *iter,
                        break;
                }
                ret += nr;
-               if (nr != iovec.iov_len)
+               if (nr != iter_iov_len(iter))
                        break;
                iov_iter_advance(iter, nr);
        }
index 44d1ee4..40f9e1a 100644 (file)
@@ -1955,8 +1955,10 @@ static int userfaultfd_api(struct userfaultfd_ctx *ctx,
        ret = -EFAULT;
        if (copy_from_user(&uffdio_api, buf, sizeof(uffdio_api)))
                goto out;
-       /* Ignore unsupported features (userspace built against newer kernel) */
-       features = uffdio_api.features & UFFD_API_FEATURES;
+       features = uffdio_api.features;
+       ret = -EINVAL;
+       if (uffdio_api.api != UFFD_API || (features & ~UFFD_API_FEATURES))
+               goto err_out;
        ret = -EPERM;
        if ((features & UFFD_FEATURE_EVENT_FORK) && !capable(CAP_SYS_PTRACE))
                goto err_out;
index 95e4f56..1b4f81f 100644 (file)
@@ -723,8 +723,7 @@ typedef u32 acpi_event_type;
 #define ACPI_EVENT_POWER_BUTTON         2
 #define ACPI_EVENT_SLEEP_BUTTON         3
 #define ACPI_EVENT_RTC                  4
-#define ACPI_EVENT_PCIE_WAKE            5
-#define ACPI_EVENT_MAX                  5
+#define ACPI_EVENT_MAX                  4
 #define ACPI_NUM_FIXED_EVENTS           ACPI_EVENT_MAX + 1
 
 /*
index e38ae3c..30b1764 100644 (file)
@@ -134,11 +134,12 @@ void kmsan_kfree_large(const void *ptr);
  * @page_shift:        page_shift passed to vmap_range_noflush().
  *
  * KMSAN maps shadow and origin pages of @pages into contiguous ranges in
- * vmalloc metadata address range.
+ * vmalloc metadata address range. Returns 0 on success, callers must check
+ * for non-zero return value.
  */
-void kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end,
-                                   pgprot_t prot, struct page **pages,
-                                   unsigned int page_shift);
+int kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end,
+                                  pgprot_t prot, struct page **pages,
+                                  unsigned int page_shift);
 
 /**
  * kmsan_vunmap_kernel_range_noflush() - Notify KMSAN about a vunmap.
@@ -159,11 +160,12 @@ void kmsan_vunmap_range_noflush(unsigned long start, unsigned long end);
  * @page_shift:        page_shift argument passed to vmap_range_noflush().
  *
  * KMSAN creates new metadata pages for the physical pages mapped into the
- * virtual memory.
+ * virtual memory. Returns 0 on success, callers must check for non-zero return
+ * value.
  */
-void kmsan_ioremap_page_range(unsigned long addr, unsigned long end,
-                             phys_addr_t phys_addr, pgprot_t prot,
-                             unsigned int page_shift);
+int kmsan_ioremap_page_range(unsigned long addr, unsigned long end,
+                            phys_addr_t phys_addr, pgprot_t prot,
+                            unsigned int page_shift);
 
 /**
  * kmsan_iounmap_page_range() - Notify KMSAN about a iounmap_page_range() call.
@@ -281,12 +283,13 @@ static inline void kmsan_kfree_large(const void *ptr)
 {
 }
 
-static inline void kmsan_vmap_pages_range_noflush(unsigned long start,
-                                                 unsigned long end,
-                                                 pgprot_t prot,
-                                                 struct page **pages,
-                                                 unsigned int page_shift)
+static inline int kmsan_vmap_pages_range_noflush(unsigned long start,
+                                                unsigned long end,
+                                                pgprot_t prot,
+                                                struct page **pages,
+                                                unsigned int page_shift)
 {
+       return 0;
 }
 
 static inline void kmsan_vunmap_range_noflush(unsigned long start,
@@ -294,12 +297,12 @@ static inline void kmsan_vunmap_range_noflush(unsigned long start,
 {
 }
 
-static inline void kmsan_ioremap_page_range(unsigned long start,
-                                           unsigned long end,
-                                           phys_addr_t phys_addr,
-                                           pgprot_t prot,
-                                           unsigned int page_shift)
+static inline int kmsan_ioremap_page_range(unsigned long start,
+                                          unsigned long end,
+                                          phys_addr_t phys_addr, pgprot_t prot,
+                                          unsigned int page_shift)
 {
+       return 0;
 }
 
 static inline void kmsan_iounmap_page_range(unsigned long start,
index f33389b..7e225e4 100644 (file)
@@ -1211,11 +1211,6 @@ static inline bool mlx5_core_is_vf(const struct mlx5_core_dev *dev)
        return dev->coredev_type == MLX5_COREDEV_VF;
 }
 
-static inline bool mlx5_core_is_management_pf(const struct mlx5_core_dev *dev)
-{
-       return MLX5_CAP_GEN(dev, num_ports) == 1 && !MLX5_CAP_GEN(dev, native_port_num);
-}
-
 static inline bool mlx5_core_is_ecpf(const struct mlx5_core_dev *dev)
 {
        return dev->caps.embedded_cpu;
index ff7ad33..dbcaac8 100644 (file)
@@ -294,6 +294,7 @@ struct nf_bridge_info {
        u8                      pkt_otherhost:1;
        u8                      in_prerouting:1;
        u8                      bridged_dnat:1;
+       u8                      sabotage_in_done:1;
        __u16                   frag_max_size;
        struct net_device       *physindev;
 
@@ -4712,7 +4713,7 @@ static inline void nf_reset_ct(struct sk_buff *skb)
 
 static inline void nf_reset_trace(struct sk_buff *skb)
 {
-#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES)
+#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || IS_ENABLED(CONFIG_NF_TABLES)
        skb->nf_trace = 0;
 #endif
 }
@@ -4732,7 +4733,7 @@ static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src,
        dst->_nfct = src->_nfct;
        nf_conntrack_get(skb_nfct(src));
 #endif
-#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES)
+#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || IS_ENABLED(CONFIG_NF_TABLES)
        if (copy)
                dst->nf_trace = src->nf_trace;
 #endif
index 27e3fd9..ed35f44 100644 (file)
@@ -49,14 +49,35 @@ struct iov_iter {
                size_t iov_offset;
                int last_offset;
        };
-       size_t count;
+       /*
+        * Hack alert: overlay ubuf_iovec with iovec + count, so
+        * that the members resolve correctly regardless of the type
+        * of iterator used. This means that you can use:
+        *
+        * &iter->__ubuf_iovec or iter->__iov
+        *
+        * interchangably for the user_backed cases, hence simplifying
+        * some of the cases that need to deal with both.
+        */
        union {
-               const struct iovec *iov;
-               const struct kvec *kvec;
-               const struct bio_vec *bvec;
-               struct xarray *xarray;
-               struct pipe_inode_info *pipe;
-               void __user *ubuf;
+               /*
+                * This really should be a const, but we cannot do that without
+                * also modifying any of the zero-filling iter init functions.
+                * Leave it non-const for now, but it should be treated as such.
+                */
+               struct iovec __ubuf_iovec;
+               struct {
+                       union {
+                               /* use iter_iov() to get the current vec */
+                               const struct iovec *__iov;
+                               const struct kvec *kvec;
+                               const struct bio_vec *bvec;
+                               struct xarray *xarray;
+                               struct pipe_inode_info *pipe;
+                               void __user *ubuf;
+                       };
+                       size_t count;
+               };
        };
        union {
                unsigned long nr_segs;
@@ -68,6 +89,16 @@ struct iov_iter {
        };
 };
 
+static inline const struct iovec *iter_iov(const struct iov_iter *iter)
+{
+       if (iter->iter_type == ITER_UBUF)
+               return (const struct iovec *) &iter->__ubuf_iovec;
+       return iter->__iov;
+}
+
+#define iter_iov_addr(iter)    (iter_iov(iter)->iov_base + (iter)->iov_offset)
+#define iter_iov_len(iter)     (iter_iov(iter)->iov_len - (iter)->iov_offset)
+
 static inline enum iter_type iov_iter_type(const struct iov_iter *i)
 {
        return i->iter_type;
@@ -143,15 +174,6 @@ static inline size_t iov_length(const struct iovec *iov, unsigned long nr_segs)
        return ret;
 }
 
-static inline struct iovec iov_iter_iovec(const struct iov_iter *iter)
-{
-       return (struct iovec) {
-               .iov_base = iter->iov->iov_base + iter->iov_offset,
-               .iov_len = min(iter->count,
-                              iter->iov->iov_len - iter->iov_offset),
-       };
-}
-
 size_t copy_page_from_iter_atomic(struct page *page, unsigned offset,
                                  size_t bytes, struct iov_iter *i);
 void iov_iter_advance(struct iov_iter *i, size_t bytes);
@@ -359,7 +381,8 @@ static inline void iov_iter_ubuf(struct iov_iter *i, unsigned int direction,
                .user_backed = true,
                .data_source = direction,
                .ubuf = buf,
-               .count = count
+               .count = count,
+               .nr_segs = 1
        };
 }
 /* Flags for iov_iter_get/extract_pages*() */
index 9430128..1b8e305 100644 (file)
@@ -1085,6 +1085,10 @@ struct nft_chain {
 };
 
 int nft_chain_validate(const struct nft_ctx *ctx, const struct nft_chain *chain);
+int nft_setelem_validate(const struct nft_ctx *ctx, struct nft_set *set,
+                        const struct nft_set_iter *iter,
+                        struct nft_set_elem *elem);
+int nft_set_catchall_validate(const struct nft_ctx *ctx, struct nft_set *set);
 
 enum nft_chain_types {
        NFT_CHAIN_T_DEFAULT = 0,
index 1fb5f31..c88bb30 100644 (file)
@@ -890,18 +890,14 @@ config CC_IMPLICIT_FALLTHROUGH
        default "-Wimplicit-fallthrough=5" if CC_IS_GCC && $(cc-option,-Wimplicit-fallthrough=5)
        default "-Wimplicit-fallthrough" if CC_IS_CLANG && $(cc-option,-Wunreachable-code-fallthrough)
 
-# Currently, disable gcc-11,12 array-bounds globally.
-# We may want to target only particular configurations some day.
+# Currently, disable gcc-11+ array-bounds globally.
+# It's still broken in gcc-13, so no upper bound yet.
 config GCC11_NO_ARRAY_BOUNDS
        def_bool y
 
-config GCC12_NO_ARRAY_BOUNDS
-       def_bool y
-
 config CC_NO_ARRAY_BOUNDS
        bool
-       default y if CC_IS_GCC && GCC_VERSION >= 110000 && GCC_VERSION < 120000 && GCC11_NO_ARRAY_BOUNDS
-       default y if CC_IS_GCC && GCC_VERSION >= 120000 && GCC_VERSION < 130000 && GCC12_NO_ARRAY_BOUNDS
+       default y if CC_IS_GCC && GCC_VERSION >= 110000 && GCC11_NO_ARRAY_BOUNDS
 
 #
 # For architectures that know their GCC __int128 support is sound
index 4040cf0..89e8390 100644 (file)
@@ -184,8 +184,8 @@ static int io_setup_async_msg(struct io_kiocb *req,
                async_msg->msg.msg_name = &async_msg->addr;
        /* if were using fast_iov, set it to the new one */
        if (iter_is_iovec(&kmsg->msg.msg_iter) && !kmsg->free_iov) {
-               size_t fast_idx = kmsg->msg.msg_iter.iov - kmsg->fast_iov;
-               async_msg->msg.msg_iter.iov = &async_msg->fast_iov[fast_idx];
+               size_t fast_idx = iter_iov(&kmsg->msg.msg_iter) - kmsg->fast_iov;
+               async_msg->msg.msg_iter.__iov = &async_msg->fast_iov[fast_idx];
        }
 
        return -EAGAIN;
index 4c23391..f33ba6f 100644 (file)
@@ -447,26 +447,25 @@ static ssize_t loop_rw_iter(int ddir, struct io_rw *rw, struct iov_iter *iter)
        ppos = io_kiocb_ppos(kiocb);
 
        while (iov_iter_count(iter)) {
-               struct iovec iovec;
+               void __user *addr;
+               size_t len;
                ssize_t nr;
 
                if (iter_is_ubuf(iter)) {
-                       iovec.iov_base = iter->ubuf + iter->iov_offset;
-                       iovec.iov_len = iov_iter_count(iter);
+                       addr = iter->ubuf + iter->iov_offset;
+                       len = iov_iter_count(iter);
                } else if (!iov_iter_is_bvec(iter)) {
-                       iovec = iov_iter_iovec(iter);
+                       addr = iter_iov_addr(iter);
+                       len = iter_iov_len(iter);
                } else {
-                       iovec.iov_base = u64_to_user_ptr(rw->addr);
-                       iovec.iov_len = rw->len;
+                       addr = u64_to_user_ptr(rw->addr);
+                       len = rw->len;
                }
 
-               if (ddir == READ) {
-                       nr = file->f_op->read(file, iovec.iov_base,
-                                             iovec.iov_len, ppos);
-               } else {
-                       nr = file->f_op->write(file, iovec.iov_base,
-                                              iovec.iov_len, ppos);
-               }
+               if (ddir == READ)
+                       nr = file->f_op->read(file, addr, len, ppos);
+               else
+                       nr = file->f_op->write(file, addr, len, ppos);
 
                if (nr < 0) {
                        if (!ret)
@@ -482,7 +481,7 @@ static ssize_t loop_rw_iter(int ddir, struct io_rw *rw, struct iov_iter *iter)
                        if (!rw->len)
                                break;
                }
-               if (nr != iovec.iov_len)
+               if (nr != len)
                        break;
        }
 
@@ -503,10 +502,10 @@ static void io_req_map_rw(struct io_kiocb *req, const struct iovec *iovec,
        if (!iovec) {
                unsigned iov_off = 0;
 
-               io->s.iter.iov = io->s.fast_iov;
-               if (iter->iov != fast_iov) {
-                       iov_off = iter->iov - fast_iov;
-                       io->s.iter.iov += iov_off;
+               io->s.iter.__iov = io->s.fast_iov;
+               if (iter->__iov != fast_iov) {
+                       iov_off = iter_iov(iter) - fast_iov;
+                       io->s.iter.__iov += iov_off;
                }
                if (io->s.fast_iov != fast_iov)
                        memcpy(io->s.fast_iov + iov_off, fast_iov + iov_off,
index d517d13..767e893 100644 (file)
@@ -2967,6 +2967,21 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx,
                        }
                } else if (opcode == BPF_EXIT) {
                        return -ENOTSUPP;
+               } else if (BPF_SRC(insn->code) == BPF_X) {
+                       if (!(*reg_mask & (dreg | sreg)))
+                               return 0;
+                       /* dreg <cond> sreg
+                        * Both dreg and sreg need precision before
+                        * this insn. If only sreg was marked precise
+                        * before it would be equally necessary to
+                        * propagate it to dreg.
+                        */
+                       *reg_mask |= (sreg | dreg);
+                        /* else dreg <cond> K
+                         * Only dreg still needs precision before
+                         * this insn, so for the K-based conditional
+                         * there is nothing new to be marked.
+                         */
                }
        } else if (class == BPF_LD) {
                if (!(*reg_mask & dreg))
index 0c92f22..ea33231 100644 (file)
@@ -1174,6 +1174,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
 fail_pcpu:
        while (i > 0)
                percpu_counter_destroy(&mm->rss_stat[--i]);
+       destroy_context(mm);
 fail_nocontext:
        mm_free_pgd(mm);
 fail_nopgd:
index 495cd87..351de79 100644 (file)
@@ -664,6 +664,7 @@ long __sys_setresuid(uid_t ruid, uid_t euid, uid_t suid)
        struct cred *new;
        int retval;
        kuid_t kruid, keuid, ksuid;
+       bool ruid_new, euid_new, suid_new;
 
        kruid = make_kuid(ns, ruid);
        keuid = make_kuid(ns, euid);
@@ -678,25 +679,29 @@ long __sys_setresuid(uid_t ruid, uid_t euid, uid_t suid)
        if ((suid != (uid_t) -1) && !uid_valid(ksuid))
                return -EINVAL;
 
+       old = current_cred();
+
+       /* check for no-op */
+       if ((ruid == (uid_t) -1 || uid_eq(kruid, old->uid)) &&
+           (euid == (uid_t) -1 || (uid_eq(keuid, old->euid) &&
+                                   uid_eq(keuid, old->fsuid))) &&
+           (suid == (uid_t) -1 || uid_eq(ksuid, old->suid)))
+               return 0;
+
+       ruid_new = ruid != (uid_t) -1        && !uid_eq(kruid, old->uid) &&
+                  !uid_eq(kruid, old->euid) && !uid_eq(kruid, old->suid);
+       euid_new = euid != (uid_t) -1        && !uid_eq(keuid, old->uid) &&
+                  !uid_eq(keuid, old->euid) && !uid_eq(keuid, old->suid);
+       suid_new = suid != (uid_t) -1        && !uid_eq(ksuid, old->uid) &&
+                  !uid_eq(ksuid, old->euid) && !uid_eq(ksuid, old->suid);
+       if ((ruid_new || euid_new || suid_new) &&
+           !ns_capable_setid(old->user_ns, CAP_SETUID))
+               return -EPERM;
+
        new = prepare_creds();
        if (!new)
                return -ENOMEM;
 
-       old = current_cred();
-
-       retval = -EPERM;
-       if (!ns_capable_setid(old->user_ns, CAP_SETUID)) {
-               if (ruid != (uid_t) -1        && !uid_eq(kruid, old->uid) &&
-                   !uid_eq(kruid, old->euid) && !uid_eq(kruid, old->suid))
-                       goto error;
-               if (euid != (uid_t) -1        && !uid_eq(keuid, old->uid) &&
-                   !uid_eq(keuid, old->euid) && !uid_eq(keuid, old->suid))
-                       goto error;
-               if (suid != (uid_t) -1        && !uid_eq(ksuid, old->uid) &&
-                   !uid_eq(ksuid, old->euid) && !uid_eq(ksuid, old->suid))
-                       goto error;
-       }
-
        if (ruid != (uid_t) -1) {
                new->uid = kruid;
                if (!uid_eq(kruid, old->uid)) {
@@ -761,6 +766,7 @@ long __sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid)
        struct cred *new;
        int retval;
        kgid_t krgid, kegid, ksgid;
+       bool rgid_new, egid_new, sgid_new;
 
        krgid = make_kgid(ns, rgid);
        kegid = make_kgid(ns, egid);
@@ -773,23 +779,28 @@ long __sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid)
        if ((sgid != (gid_t) -1) && !gid_valid(ksgid))
                return -EINVAL;
 
+       old = current_cred();
+
+       /* check for no-op */
+       if ((rgid == (gid_t) -1 || gid_eq(krgid, old->gid)) &&
+           (egid == (gid_t) -1 || (gid_eq(kegid, old->egid) &&
+                                   gid_eq(kegid, old->fsgid))) &&
+           (sgid == (gid_t) -1 || gid_eq(ksgid, old->sgid)))
+               return 0;
+
+       rgid_new = rgid != (gid_t) -1        && !gid_eq(krgid, old->gid) &&
+                  !gid_eq(krgid, old->egid) && !gid_eq(krgid, old->sgid);
+       egid_new = egid != (gid_t) -1        && !gid_eq(kegid, old->gid) &&
+                  !gid_eq(kegid, old->egid) && !gid_eq(kegid, old->sgid);
+       sgid_new = sgid != (gid_t) -1        && !gid_eq(ksgid, old->gid) &&
+                  !gid_eq(ksgid, old->egid) && !gid_eq(ksgid, old->sgid);
+       if ((rgid_new || egid_new || sgid_new) &&
+           !ns_capable_setid(old->user_ns, CAP_SETGID))
+               return -EPERM;
+
        new = prepare_creds();
        if (!new)
                return -ENOMEM;
-       old = current_cred();
-
-       retval = -EPERM;
-       if (!ns_capable_setid(old->user_ns, CAP_SETGID)) {
-               if (rgid != (gid_t) -1        && !gid_eq(krgid, old->gid) &&
-                   !gid_eq(krgid, old->egid) && !gid_eq(krgid, old->sgid))
-                       goto error;
-               if (egid != (gid_t) -1        && !gid_eq(kegid, old->gid) &&
-                   !gid_eq(kegid, old->egid) && !gid_eq(kegid, old->sgid))
-                       goto error;
-               if (sgid != (gid_t) -1        && !gid_eq(ksgid, old->gid) &&
-                   !gid_eq(ksgid, old->egid) && !gid_eq(ksgid, old->sgid))
-                       goto error;
-       }
 
        if (rgid != (gid_t) -1)
                new->gid = krgid;
index 274014e..967fba1 100644 (file)
@@ -126,13 +126,13 @@ __out:                                                            \
                        iterate_buf(i, n, base, len, off,       \
                                                i->ubuf, (I))   \
                } else if (likely(iter_is_iovec(i))) {          \
-                       const struct iovec *iov = i->iov;       \
+                       const struct iovec *iov = iter_iov(i);  \
                        void __user *base;                      \
                        size_t len;                             \
                        iterate_iovec(i, n, base, len, off,     \
                                                iov, (I))       \
-                       i->nr_segs -= iov - i->iov;             \
-                       i->iov = iov;                           \
+                       i->nr_segs -= iov - iter_iov(i);        \
+                       i->__iov = iov;                         \
                } else if (iov_iter_is_bvec(i)) {               \
                        const struct bio_vec *bvec = i->bvec;   \
                        void *base;                             \
@@ -355,7 +355,7 @@ size_t fault_in_iov_iter_readable(const struct iov_iter *i, size_t size)
                size_t skip;
 
                size -= count;
-               for (p = i->iov, skip = i->iov_offset; count; p++, skip = 0) {
+               for (p = iter_iov(i), skip = i->iov_offset; count; p++, skip = 0) {
                        size_t len = min(count, p->iov_len - skip);
                        size_t ret;
 
@@ -398,7 +398,7 @@ size_t fault_in_iov_iter_writeable(const struct iov_iter *i, size_t size)
                size_t skip;
 
                size -= count;
-               for (p = i->iov, skip = i->iov_offset; count; p++, skip = 0) {
+               for (p = iter_iov(i), skip = i->iov_offset; count; p++, skip = 0) {
                        size_t len = min(count, p->iov_len - skip);
                        size_t ret;
 
@@ -425,7 +425,7 @@ void iov_iter_init(struct iov_iter *i, unsigned int direction,
                .nofault = false,
                .user_backed = true,
                .data_source = direction,
-               .iov = iov,
+               .__iov = iov,
                .nr_segs = nr_segs,
                .iov_offset = 0,
                .count = count
@@ -876,14 +876,14 @@ static void iov_iter_iovec_advance(struct iov_iter *i, size_t size)
        i->count -= size;
 
        size += i->iov_offset; // from beginning of current segment
-       for (iov = i->iov, end = iov + i->nr_segs; iov < end; iov++) {
+       for (iov = iter_iov(i), end = iov + i->nr_segs; iov < end; iov++) {
                if (likely(size < iov->iov_len))
                        break;
                size -= iov->iov_len;
        }
        i->iov_offset = size;
-       i->nr_segs -= iov - i->iov;
-       i->iov = iov;
+       i->nr_segs -= iov - iter_iov(i);
+       i->__iov = iov;
 }
 
 void iov_iter_advance(struct iov_iter *i, size_t size)
@@ -958,12 +958,12 @@ void iov_iter_revert(struct iov_iter *i, size_t unroll)
                        unroll -= n;
                }
        } else { /* same logics for iovec and kvec */
-               const struct iovec *iov = i->iov;
+               const struct iovec *iov = iter_iov(i);
                while (1) {
                        size_t n = (--iov)->iov_len;
                        i->nr_segs++;
                        if (unroll <= n) {
-                               i->iov = iov;
+                               i->__iov = iov;
                                i->iov_offset = n - unroll;
                                return;
                        }
@@ -980,7 +980,7 @@ size_t iov_iter_single_seg_count(const struct iov_iter *i)
 {
        if (i->nr_segs > 1) {
                if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
-                       return min(i->count, i->iov->iov_len - i->iov_offset);
+                       return min(i->count, iter_iov(i)->iov_len - i->iov_offset);
                if (iov_iter_is_bvec(i))
                        return min(i->count, i->bvec->bv_len - i->iov_offset);
        }
@@ -1095,13 +1095,14 @@ static bool iov_iter_aligned_iovec(const struct iov_iter *i, unsigned addr_mask,
        unsigned k;
 
        for (k = 0; k < i->nr_segs; k++, skip = 0) {
-               size_t len = i->iov[k].iov_len - skip;
+               const struct iovec *iov = iter_iov(i) + k;
+               size_t len = iov->iov_len - skip;
 
                if (len > size)
                        len = size;
                if (len & len_mask)
                        return false;
-               if ((unsigned long)(i->iov[k].iov_base + skip) & addr_mask)
+               if ((unsigned long)(iov->iov_base + skip) & addr_mask)
                        return false;
 
                size -= len;
@@ -1194,9 +1195,10 @@ static unsigned long iov_iter_alignment_iovec(const struct iov_iter *i)
        unsigned k;
 
        for (k = 0; k < i->nr_segs; k++, skip = 0) {
-               size_t len = i->iov[k].iov_len - skip;
+               const struct iovec *iov = iter_iov(i) + k;
+               size_t len = iov->iov_len - skip;
                if (len) {
-                       res |= (unsigned long)i->iov[k].iov_base + skip;
+                       res |= (unsigned long)iov->iov_base + skip;
                        if (len > size)
                                len = size;
                        res |= len;
@@ -1273,14 +1275,15 @@ unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
                return ~0U;
 
        for (k = 0; k < i->nr_segs; k++) {
-               if (i->iov[k].iov_len) {
-                       unsigned long base = (unsigned long)i->iov[k].iov_base;
+               const struct iovec *iov = iter_iov(i) + k;
+               if (iov->iov_len) {
+                       unsigned long base = (unsigned long)iov->iov_base;
                        if (v) // if not the first one
                                res |= base | v; // this start | previous end
-                       v = base + i->iov[k].iov_len;
-                       if (size <= i->iov[k].iov_len)
+                       v = base + iov->iov_len;
+                       if (size <= iov->iov_len)
                                break;
-                       size -= i->iov[k].iov_len;
+                       size -= iov->iov_len;
                }
        }
        return res;
@@ -1396,13 +1399,14 @@ static unsigned long first_iovec_segment(const struct iov_iter *i, size_t *size)
                return (unsigned long)i->ubuf + i->iov_offset;
 
        for (k = 0, skip = i->iov_offset; k < i->nr_segs; k++, skip = 0) {
-               size_t len = i->iov[k].iov_len - skip;
+               const struct iovec *iov = iter_iov(i) + k;
+               size_t len = iov->iov_len - skip;
 
                if (unlikely(!len))
                        continue;
                if (*size > len)
                        *size = len;
-               return (unsigned long)i->iov[k].iov_base + skip;
+               return (unsigned long)iov->iov_base + skip;
        }
        BUG(); // if it had been empty, we wouldn't get called
 }
@@ -1614,7 +1618,7 @@ static int iov_npages(const struct iov_iter *i, int maxpages)
        const struct iovec *p;
        int npages = 0;
 
-       for (p = i->iov; size; skip = 0, p++) {
+       for (p = iter_iov(i); size; skip = 0, p++) {
                unsigned offs = offset_in_page(p->iov_base + skip);
                size_t len = min(p->iov_len - skip, size);
 
@@ -1691,14 +1695,14 @@ const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
                                    flags);
        else if (iov_iter_is_kvec(new) || iter_is_iovec(new))
                /* iovec and kvec have identical layout */
-               return new->iov = kmemdup(new->iov,
+               return new->__iov = kmemdup(new->__iov,
                                   new->nr_segs * sizeof(struct iovec),
                                   flags);
        return NULL;
 }
 EXPORT_SYMBOL(dup_iter);
 
-static int copy_compat_iovec_from_user(struct iovec *iov,
+static __noclone int copy_compat_iovec_from_user(struct iovec *iov,
                const struct iovec __user *uvec, unsigned long nr_segs)
 {
        const struct compat_iovec __user *uiov =
@@ -1731,18 +1735,35 @@ uaccess_end:
 }
 
 static int copy_iovec_from_user(struct iovec *iov,
-               const struct iovec __user *uvec, unsigned long nr_segs)
+               const struct iovec __user *uiov, unsigned long nr_segs)
 {
-       unsigned long seg;
+       int ret = -EFAULT;
 
-       if (copy_from_user(iov, uvec, nr_segs * sizeof(*uvec)))
+       if (!user_access_begin(uiov, nr_segs * sizeof(*uiov)))
                return -EFAULT;
-       for (seg = 0; seg < nr_segs; seg++) {
-               if ((ssize_t)iov[seg].iov_len < 0)
-                       return -EINVAL;
-       }
 
-       return 0;
+       do {
+               void __user *buf;
+               ssize_t len;
+
+               unsafe_get_user(len, &uiov->iov_len, uaccess_end);
+               unsafe_get_user(buf, &uiov->iov_base, uaccess_end);
+
+               /* check for size_t not fitting in ssize_t .. */
+               if (unlikely(len < 0)) {
+                       ret = -EINVAL;
+                       goto uaccess_end;
+               }
+               iov->iov_base = buf;
+               iov->iov_len = len;
+
+               uiov++; iov++;
+       } while (--nr_segs);
+
+       ret = 0;
+uaccess_end:
+       user_access_end();
+       return ret;
 }
 
 struct iovec *iovec_from_user(const struct iovec __user *uvec,
@@ -1767,7 +1788,7 @@ struct iovec *iovec_from_user(const struct iovec __user *uvec,
                        return ERR_PTR(-ENOMEM);
        }
 
-       if (compat)
+       if (unlikely(compat))
                ret = copy_compat_iovec_from_user(iov, uvec, nr_segs);
        else
                ret = copy_iovec_from_user(iov, uvec, nr_segs);
@@ -1780,6 +1801,30 @@ struct iovec *iovec_from_user(const struct iovec __user *uvec,
        return iov;
 }
 
+/*
+ * Single segment iovec supplied by the user, import it as ITER_UBUF.
+ */
+static ssize_t __import_iovec_ubuf(int type, const struct iovec __user *uvec,
+                                  struct iovec **iovp, struct iov_iter *i,
+                                  bool compat)
+{
+       struct iovec *iov = *iovp;
+       ssize_t ret;
+
+       if (compat)
+               ret = copy_compat_iovec_from_user(iov, uvec, 1);
+       else
+               ret = copy_iovec_from_user(iov, uvec, 1);
+       if (unlikely(ret))
+               return ret;
+
+       ret = import_ubuf(type, iov->iov_base, iov->iov_len, i);
+       if (unlikely(ret))
+               return ret;
+       *iovp = NULL;
+       return i->count;
+}
+
 ssize_t __import_iovec(int type, const struct iovec __user *uvec,
                 unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
                 struct iov_iter *i, bool compat)
@@ -1788,6 +1833,9 @@ ssize_t __import_iovec(int type, const struct iovec __user *uvec,
        unsigned long seg;
        struct iovec *iov;
 
+       if (nr_segs == 1)
+               return __import_iovec_ubuf(type, uvec, iovp, i, compat);
+
        iov = iovec_from_user(uvec, nr_segs, fast_segs, *iovp, compat);
        if (IS_ERR(iov)) {
                *iovp = NULL;
@@ -1866,9 +1914,7 @@ int import_single_range(int rw, void __user *buf, size_t len,
        if (unlikely(!access_ok(buf, len)))
                return -EFAULT;
 
-       iov->iov_base = buf;
-       iov->iov_len = len;
-       iov_iter_init(i, rw, iov, 1, len);
+       iov_iter_ubuf(i, rw, buf, len);
        return 0;
 }
 EXPORT_SYMBOL(import_single_range);
@@ -1918,7 +1964,7 @@ void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state)
        if (iov_iter_is_bvec(i))
                i->bvec -= state->nr_segs - i->nr_segs;
        else
-               i->iov -= state->nr_segs - i->nr_segs;
+               i->__iov -= state->nr_segs - i->nr_segs;
        i->nr_segs = state->nr_segs;
 }
 
index db60edb..1281a40 100644 (file)
@@ -1303,26 +1303,21 @@ static inline void mas_alloc_nodes(struct ma_state *mas, gfp_t gfp)
        node = mas->alloc;
        node->request_count = 0;
        while (requested) {
-               max_req = MAPLE_ALLOC_SLOTS;
-               if (node->node_count) {
-                       unsigned int offset = node->node_count;
-
-                       slots = (void **)&node->slot[offset];
-                       max_req -= offset;
-               } else {
-                       slots = (void **)&node->slot;
-               }
-
+               max_req = MAPLE_ALLOC_SLOTS - node->node_count;
+               slots = (void **)&node->slot[node->node_count];
                max_req = min(requested, max_req);
                count = mt_alloc_bulk(gfp, max_req, slots);
                if (!count)
                        goto nomem_bulk;
 
+               if (node->node_count == 0) {
+                       node->slot[0]->node_count = 0;
+                       node->slot[0]->request_count = 0;
+               }
+
                node->node_count += count;
                allocated += count;
                node = node->slot[0];
-               node->node_count = 0;
-               node->request_count = 0;
                requested -= count;
        }
        mas->alloc->total = allocated;
@@ -4970,7 +4965,8 @@ not_found:
  * Return: True if found in a leaf, false otherwise.
  *
  */
-static bool mas_rev_awalk(struct ma_state *mas, unsigned long size)
+static bool mas_rev_awalk(struct ma_state *mas, unsigned long size,
+               unsigned long *gap_min, unsigned long *gap_max)
 {
        enum maple_type type = mte_node_type(mas->node);
        struct maple_node *node = mas_mn(mas);
@@ -5035,8 +5031,8 @@ static bool mas_rev_awalk(struct ma_state *mas, unsigned long size)
 
        if (unlikely(ma_is_leaf(type))) {
                mas->offset = offset;
-               mas->min = min;
-               mas->max = min + gap - 1;
+               *gap_min = min;
+               *gap_max = min + gap - 1;
                return true;
        }
 
@@ -5060,10 +5056,10 @@ static inline bool mas_anode_descend(struct ma_state *mas, unsigned long size)
 {
        enum maple_type type = mte_node_type(mas->node);
        unsigned long pivot, min, gap = 0;
-       unsigned char offset;
-       unsigned long *gaps;
-       unsigned long *pivots = ma_pivots(mas_mn(mas), type);
-       void __rcu **slots = ma_slots(mas_mn(mas), type);
+       unsigned char offset, data_end;
+       unsigned long *gaps, *pivots;
+       void __rcu **slots;
+       struct maple_node *node;
        bool found = false;
 
        if (ma_is_dense(type)) {
@@ -5071,13 +5067,15 @@ static inline bool mas_anode_descend(struct ma_state *mas, unsigned long size)
                return true;
        }
 
-       gaps = ma_gaps(mte_to_node(mas->node), type);
+       node = mas_mn(mas);
+       pivots = ma_pivots(node, type);
+       slots = ma_slots(node, type);
+       gaps = ma_gaps(node, type);
        offset = mas->offset;
        min = mas_safe_min(mas, pivots, offset);
-       for (; offset < mt_slots[type]; offset++) {
-               pivot = mas_safe_pivot(mas, pivots, offset, type);
-               if (offset && !pivot)
-                       break;
+       data_end = ma_data_end(node, type, pivots, mas->max);
+       for (; offset <= data_end; offset++) {
+               pivot = mas_logical_pivot(mas, pivots, offset, type);
 
                /* Not within lower bounds */
                if (mas->index > pivot)
@@ -5312,6 +5310,9 @@ int mas_empty_area(struct ma_state *mas, unsigned long min,
        unsigned long *pivots;
        enum maple_type mt;
 
+       if (min >= max)
+               return -EINVAL;
+
        if (mas_is_start(mas))
                mas_start(mas);
        else if (mas->offset >= 2)
@@ -5366,6 +5367,9 @@ int mas_empty_area_rev(struct ma_state *mas, unsigned long min,
 {
        struct maple_enode *last = mas->node;
 
+       if (min >= max)
+               return -EINVAL;
+
        if (mas_is_start(mas)) {
                mas_start(mas);
                mas->offset = mas_data_end(mas);
@@ -5385,7 +5389,7 @@ int mas_empty_area_rev(struct ma_state *mas, unsigned long min,
        mas->index = min;
        mas->last = max;
 
-       while (!mas_rev_awalk(mas, size)) {
+       while (!mas_rev_awalk(mas, size, &min, &max)) {
                if (last == mas->node) {
                        if (!mas_rewind_node(mas))
                                return -EBUSY;
@@ -5400,17 +5404,9 @@ int mas_empty_area_rev(struct ma_state *mas, unsigned long min,
        if (unlikely(mas->offset == MAPLE_NODE_SLOTS))
                return -EBUSY;
 
-       /*
-        * mas_rev_awalk() has set mas->min and mas->max to the gap values.  If
-        * the maximum is outside the window we are searching, then use the last
-        * location in the search.
-        * mas->max and mas->min is the range of the gap.
-        * mas->index and mas->last are currently set to the search range.
-        */
-
        /* Trim the upper limit to the max. */
-       if (mas->max <= mas->last)
-               mas->last = mas->max;
+       if (max <= mas->last)
+               mas->last = max;
 
        mas->index = mas->last - size + 1;
        return 0;
index a53b936..30d2d03 100644 (file)
@@ -507,6 +507,15 @@ static LIST_HEAD(offline_cgwbs);
 static void cleanup_offline_cgwbs_workfn(struct work_struct *work);
 static DECLARE_WORK(cleanup_offline_cgwbs_work, cleanup_offline_cgwbs_workfn);
 
+static void cgwb_free_rcu(struct rcu_head *rcu_head)
+{
+       struct bdi_writeback *wb = container_of(rcu_head,
+                       struct bdi_writeback, rcu);
+
+       percpu_ref_exit(&wb->refcnt);
+       kfree(wb);
+}
+
 static void cgwb_release_workfn(struct work_struct *work)
 {
        struct bdi_writeback *wb = container_of(work, struct bdi_writeback,
@@ -529,11 +538,10 @@ static void cgwb_release_workfn(struct work_struct *work)
        list_del(&wb->offline_node);
        spin_unlock_irq(&cgwb_lock);
 
-       percpu_ref_exit(&wb->refcnt);
        wb_exit(wb);
        bdi_put(bdi);
        WARN_ON_ONCE(!list_empty(&wb->b_attached));
-       kfree_rcu(wb, rcu);
+       call_rcu(&wb->rcu, cgwb_free_rcu);
 }
 
 static void cgwb_release(struct percpu_ref *refcnt)
index 032fb0e..3fae2d2 100644 (file)
@@ -1838,10 +1838,10 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
        if (is_swap_pmd(*pmd)) {
                swp_entry_t entry = pmd_to_swp_entry(*pmd);
                struct page *page = pfn_swap_entry_to_page(entry);
+               pmd_t newpmd;
 
                VM_BUG_ON(!is_pmd_migration_entry(*pmd));
                if (is_writable_migration_entry(entry)) {
-                       pmd_t newpmd;
                        /*
                         * A protection check is difficult so
                         * just be safe and disable write
@@ -1855,8 +1855,16 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
                                newpmd = pmd_swp_mksoft_dirty(newpmd);
                        if (pmd_swp_uffd_wp(*pmd))
                                newpmd = pmd_swp_mkuffd_wp(newpmd);
-                       set_pmd_at(mm, addr, pmd, newpmd);
+               } else {
+                       newpmd = *pmd;
                }
+
+               if (uffd_wp)
+                       newpmd = pmd_swp_mkuffd_wp(newpmd);
+               else if (uffd_wp_resolve)
+                       newpmd = pmd_swp_clear_uffd_wp(newpmd);
+               if (!pmd_same(*pmd, newpmd))
+                       set_pmd_at(mm, addr, pmd, newpmd);
                goto unlock;
        }
 #endif
@@ -2657,9 +2665,10 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
        VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
 
        is_hzp = is_huge_zero_page(&folio->page);
-       VM_WARN_ON_ONCE_FOLIO(is_hzp, folio);
-       if (is_hzp)
+       if (is_hzp) {
+               pr_warn_ratelimited("Called split_huge_page for huge zero page\n");
                return -EBUSY;
+       }
 
        if (folio_test_writeback(folio))
                return -EBUSY;
@@ -3251,6 +3260,8 @@ int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
        pmdswp = swp_entry_to_pmd(entry);
        if (pmd_soft_dirty(pmdval))
                pmdswp = pmd_swp_mksoft_dirty(pmdswp);
+       if (pmd_uffd_wp(pmdval))
+               pmdswp = pmd_swp_mkuffd_wp(pmdswp);
        set_pmd_at(mm, address, pvmw->pmd, pmdswp);
        page_remove_rmap(page, vma, true);
        put_page(page);
index 92e6f56..0ec69b9 100644 (file)
@@ -572,6 +572,10 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
                        result = SCAN_PTE_NON_PRESENT;
                        goto out;
                }
+               if (pte_uffd_wp(pteval)) {
+                       result = SCAN_PTE_UFFD_WP;
+                       goto out;
+               }
                page = vm_normal_page(vma, address, pteval);
                if (unlikely(!page) || unlikely(is_zone_device_page(page))) {
                        result = SCAN_PAGE_NULL;
index 3807502..ec0da72 100644 (file)
@@ -148,35 +148,74 @@ void kmsan_vunmap_range_noflush(unsigned long start, unsigned long end)
  * into the virtual memory. If those physical pages already had shadow/origin,
  * those are ignored.
  */
-void kmsan_ioremap_page_range(unsigned long start, unsigned long end,
-                             phys_addr_t phys_addr, pgprot_t prot,
-                             unsigned int page_shift)
+int kmsan_ioremap_page_range(unsigned long start, unsigned long end,
+                            phys_addr_t phys_addr, pgprot_t prot,
+                            unsigned int page_shift)
 {
        gfp_t gfp_mask = GFP_KERNEL | __GFP_ZERO;
        struct page *shadow, *origin;
        unsigned long off = 0;
-       int nr;
+       int nr, err = 0, clean = 0, mapped;
 
        if (!kmsan_enabled || kmsan_in_runtime())
-               return;
+               return 0;
 
        nr = (end - start) / PAGE_SIZE;
        kmsan_enter_runtime();
-       for (int i = 0; i < nr; i++, off += PAGE_SIZE) {
+       for (int i = 0; i < nr; i++, off += PAGE_SIZE, clean = i) {
                shadow = alloc_pages(gfp_mask, 1);
                origin = alloc_pages(gfp_mask, 1);
-               __vmap_pages_range_noflush(
+               if (!shadow || !origin) {
+                       err = -ENOMEM;
+                       goto ret;
+               }
+               mapped = __vmap_pages_range_noflush(
                        vmalloc_shadow(start + off),
                        vmalloc_shadow(start + off + PAGE_SIZE), prot, &shadow,
                        PAGE_SHIFT);
-               __vmap_pages_range_noflush(
+               if (mapped) {
+                       err = mapped;
+                       goto ret;
+               }
+               shadow = NULL;
+               mapped = __vmap_pages_range_noflush(
                        vmalloc_origin(start + off),
                        vmalloc_origin(start + off + PAGE_SIZE), prot, &origin,
                        PAGE_SHIFT);
+               if (mapped) {
+                       __vunmap_range_noflush(
+                               vmalloc_shadow(start + off),
+                               vmalloc_shadow(start + off + PAGE_SIZE));
+                       err = mapped;
+                       goto ret;
+               }
+               origin = NULL;
+       }
+       /* Page mapping loop finished normally, nothing to clean up. */
+       clean = 0;
+
+ret:
+       if (clean > 0) {
+               /*
+                * Something went wrong. Clean up shadow/origin pages allocated
+                * on the last loop iteration, then delete mappings created
+                * during the previous iterations.
+                */
+               if (shadow)
+                       __free_pages(shadow, 1);
+               if (origin)
+                       __free_pages(origin, 1);
+               __vunmap_range_noflush(
+                       vmalloc_shadow(start),
+                       vmalloc_shadow(start + clean * PAGE_SIZE));
+               __vunmap_range_noflush(
+                       vmalloc_origin(start),
+                       vmalloc_origin(start + clean * PAGE_SIZE));
        }
        flush_cache_vmap(vmalloc_shadow(start), vmalloc_shadow(end));
        flush_cache_vmap(vmalloc_origin(start), vmalloc_origin(end));
        kmsan_leave_runtime();
+       return err;
 }
 
 void kmsan_iounmap_page_range(unsigned long start, unsigned long end)
index a787c04..b8bb95e 100644 (file)
@@ -216,27 +216,29 @@ void kmsan_free_page(struct page *page, unsigned int order)
        kmsan_leave_runtime();
 }
 
-void kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end,
-                                   pgprot_t prot, struct page **pages,
-                                   unsigned int page_shift)
+int kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end,
+                                  pgprot_t prot, struct page **pages,
+                                  unsigned int page_shift)
 {
        unsigned long shadow_start, origin_start, shadow_end, origin_end;
        struct page **s_pages, **o_pages;
-       int nr, mapped;
+       int nr, mapped, err = 0;
 
        if (!kmsan_enabled)
-               return;
+               return 0;
 
        shadow_start = vmalloc_meta((void *)start, KMSAN_META_SHADOW);
        shadow_end = vmalloc_meta((void *)end, KMSAN_META_SHADOW);
        if (!shadow_start)
-               return;
+               return 0;
 
        nr = (end - start) / PAGE_SIZE;
        s_pages = kcalloc(nr, sizeof(*s_pages), GFP_KERNEL);
        o_pages = kcalloc(nr, sizeof(*o_pages), GFP_KERNEL);
-       if (!s_pages || !o_pages)
+       if (!s_pages || !o_pages) {
+               err = -ENOMEM;
                goto ret;
+       }
        for (int i = 0; i < nr; i++) {
                s_pages[i] = shadow_page_for(pages[i]);
                o_pages[i] = origin_page_for(pages[i]);
@@ -249,10 +251,16 @@ void kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end,
        kmsan_enter_runtime();
        mapped = __vmap_pages_range_noflush(shadow_start, shadow_end, prot,
                                            s_pages, page_shift);
-       KMSAN_WARN_ON(mapped);
+       if (mapped) {
+               err = mapped;
+               goto ret;
+       }
        mapped = __vmap_pages_range_noflush(origin_start, origin_end, prot,
                                            o_pages, page_shift);
-       KMSAN_WARN_ON(mapped);
+       if (mapped) {
+               err = mapped;
+               goto ret;
+       }
        kmsan_leave_runtime();
        flush_tlb_kernel_range(shadow_start, shadow_end);
        flush_tlb_kernel_range(origin_start, origin_end);
@@ -262,6 +270,7 @@ void kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end,
 ret:
        kfree(s_pages);
        kfree(o_pages);
+       return err;
 }
 
 /* Allocate metadata for pages allocated at boot time. */
index 340125d..9f389c5 100644 (file)
@@ -1456,7 +1456,7 @@ SYSCALL_DEFINE5(process_madvise, int, pidfd, const struct iovec __user *, vec,
                size_t, vlen, int, behavior, unsigned int, flags)
 {
        ssize_t ret;
-       struct iovec iovstack[UIO_FASTIOV], iovec;
+       struct iovec iovstack[UIO_FASTIOV];
        struct iovec *iov = iovstack;
        struct iov_iter iter;
        struct task_struct *task;
@@ -1503,12 +1503,11 @@ SYSCALL_DEFINE5(process_madvise, int, pidfd, const struct iovec __user *, vec,
        total_len = iov_iter_count(&iter);
 
        while (iov_iter_count(&iter)) {
-               iovec = iov_iter_iovec(&iter);
-               ret = do_madvise(mm, (unsigned long)iovec.iov_base,
-                                       iovec.iov_len, behavior);
+               ret = do_madvise(mm, (unsigned long)iter_iov_addr(&iter),
+                                       iter_iov_len(&iter), behavior);
                if (ret < 0)
                        break;
-               iov_iter_advance(&iter, iovec.iov_len);
+               iov_iter_advance(&iter, iter_iov_len(&iter));
        }
 
        ret = (total_len - iov_iter_count(&iter)) ? : ret;
index a256a24..2068b59 100644 (file)
@@ -790,61 +790,50 @@ static int vma_replace_policy(struct vm_area_struct *vma,
        return err;
 }
 
-/* Step 2: apply policy to a range and do splits. */
-static int mbind_range(struct mm_struct *mm, unsigned long start,
-                      unsigned long end, struct mempolicy *new_pol)
+/* Split or merge the VMA (if required) and apply the new policy */
+static int mbind_range(struct vma_iterator *vmi, struct vm_area_struct *vma,
+               struct vm_area_struct **prev, unsigned long start,
+               unsigned long end, struct mempolicy *new_pol)
 {
-       VMA_ITERATOR(vmi, mm, start);
-       struct vm_area_struct *prev;
-       struct vm_area_struct *vma;
-       int err = 0;
+       struct vm_area_struct *merged;
+       unsigned long vmstart, vmend;
        pgoff_t pgoff;
+       int err;
 
-       prev = vma_prev(&vmi);
-       vma = vma_find(&vmi, end);
-       if (WARN_ON(!vma))
+       vmend = min(end, vma->vm_end);
+       if (start > vma->vm_start) {
+               *prev = vma;
+               vmstart = start;
+       } else {
+               vmstart = vma->vm_start;
+       }
+
+       if (mpol_equal(vma_policy(vma), new_pol))
                return 0;
 
-       if (start > vma->vm_start)
-               prev = vma;
-
-       do {
-               unsigned long vmstart = max(start, vma->vm_start);
-               unsigned long vmend = min(end, vma->vm_end);
-
-               if (mpol_equal(vma_policy(vma), new_pol))
-                       goto next;
-
-               pgoff = vma->vm_pgoff +
-                       ((vmstart - vma->vm_start) >> PAGE_SHIFT);
-               prev = vma_merge(&vmi, mm, prev, vmstart, vmend, vma->vm_flags,
-                                vma->anon_vma, vma->vm_file, pgoff,
-                                new_pol, vma->vm_userfaultfd_ctx,
-                                anon_vma_name(vma));
-               if (prev) {
-                       vma = prev;
-                       goto replace;
-               }
-               if (vma->vm_start != vmstart) {
-                       err = split_vma(&vmi, vma, vmstart, 1);
-                       if (err)
-                               goto out;
-               }
-               if (vma->vm_end != vmend) {
-                       err = split_vma(&vmi, vma, vmend, 0);
-                       if (err)
-                               goto out;
-               }
-replace:
-               err = vma_replace_policy(vma, new_pol);
+       pgoff = vma->vm_pgoff + ((vmstart - vma->vm_start) >> PAGE_SHIFT);
+       merged = vma_merge(vmi, vma->vm_mm, *prev, vmstart, vmend, vma->vm_flags,
+                        vma->anon_vma, vma->vm_file, pgoff, new_pol,
+                        vma->vm_userfaultfd_ctx, anon_vma_name(vma));
+       if (merged) {
+               *prev = merged;
+               return vma_replace_policy(merged, new_pol);
+       }
+
+       if (vma->vm_start != vmstart) {
+               err = split_vma(vmi, vma, vmstart, 1);
                if (err)
-                       goto out;
-next:
-               prev = vma;
-       } for_each_vma_range(vmi, vma, end);
+                       return err;
+       }
 
-out:
-       return err;
+       if (vma->vm_end != vmend) {
+               err = split_vma(vmi, vma, vmend, 0);
+               if (err)
+                       return err;
+       }
+
+       *prev = vma;
+       return vma_replace_policy(vma, new_pol);
 }
 
 /* Set the process memory policy */
@@ -1259,6 +1248,8 @@ static long do_mbind(unsigned long start, unsigned long len,
                     nodemask_t *nmask, unsigned long flags)
 {
        struct mm_struct *mm = current->mm;
+       struct vm_area_struct *vma, *prev;
+       struct vma_iterator vmi;
        struct mempolicy *new;
        unsigned long end;
        int err;
@@ -1328,7 +1319,13 @@ static long do_mbind(unsigned long start, unsigned long len,
                goto up_out;
        }
 
-       err = mbind_range(mm, start, end, new);
+       vma_iter_init(&vmi, mm, start);
+       prev = vma_prev(&vmi);
+       for_each_vma_range(vmi, vma, end) {
+               err = mbind_range(&vmi, vma, &prev, start, end, new);
+               if (err)
+                       break;
+       }
 
        if (!err) {
                int nr_failed = 0;
@@ -1489,10 +1486,8 @@ SYSCALL_DEFINE4(set_mempolicy_home_node, unsigned long, start, unsigned long, le
                unsigned long, home_node, unsigned long, flags)
 {
        struct mm_struct *mm = current->mm;
-       struct vm_area_struct *vma;
+       struct vm_area_struct *vma, *prev;
        struct mempolicy *new, *old;
-       unsigned long vmstart;
-       unsigned long vmend;
        unsigned long end;
        int err = -ENOENT;
        VMA_ITERATOR(vmi, mm, start);
@@ -1521,6 +1516,7 @@ SYSCALL_DEFINE4(set_mempolicy_home_node, unsigned long, start, unsigned long, le
        if (end == start)
                return 0;
        mmap_write_lock(mm);
+       prev = vma_prev(&vmi);
        for_each_vma_range(vmi, vma, end) {
                /*
                 * If any vma in the range got policy other than MPOL_BIND
@@ -1541,9 +1537,7 @@ SYSCALL_DEFINE4(set_mempolicy_home_node, unsigned long, start, unsigned long, le
                }
 
                new->home_node = home_node;
-               vmstart = max(start, vma->vm_start);
-               vmend   = min(end, vma->vm_end);
-               err = mbind_range(mm, vmstart, vmend, new);
+               err = mbind_range(&vmi, vma, &prev, start, end, new);
                mpol_put(new);
                if (err)
                        break;
index ff68a67..d5475fb 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1518,7 +1518,8 @@ static inline int accountable_mapping(struct file *file, vm_flags_t vm_flags)
  */
 static unsigned long unmapped_area(struct vm_unmapped_area_info *info)
 {
-       unsigned long length, gap;
+       unsigned long length, gap, low_limit;
+       struct vm_area_struct *tmp;
 
        MA_STATE(mas, &current->mm->mm_mt, 0, 0);
 
@@ -1527,12 +1528,29 @@ static unsigned long unmapped_area(struct vm_unmapped_area_info *info)
        if (length < info->length)
                return -ENOMEM;
 
-       if (mas_empty_area(&mas, info->low_limit, info->high_limit - 1,
-                                 length))
+       low_limit = info->low_limit;
+retry:
+       if (mas_empty_area(&mas, low_limit, info->high_limit - 1, length))
                return -ENOMEM;
 
        gap = mas.index;
        gap += (info->align_offset - gap) & info->align_mask;
+       tmp = mas_next(&mas, ULONG_MAX);
+       if (tmp && (tmp->vm_flags & VM_GROWSDOWN)) { /* Avoid prev check if possible */
+               if (vm_start_gap(tmp) < gap + length - 1) {
+                       low_limit = tmp->vm_end;
+                       mas_reset(&mas);
+                       goto retry;
+               }
+       } else {
+               tmp = mas_prev(&mas, 0);
+               if (tmp && vm_end_gap(tmp) > gap) {
+                       low_limit = vm_end_gap(tmp);
+                       mas_reset(&mas);
+                       goto retry;
+               }
+       }
+
        return gap;
 }
 
@@ -1548,7 +1566,8 @@ static unsigned long unmapped_area(struct vm_unmapped_area_info *info)
  */
 static unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
 {
-       unsigned long length, gap;
+       unsigned long length, gap, high_limit, gap_end;
+       struct vm_area_struct *tmp;
 
        MA_STATE(mas, &current->mm->mm_mt, 0, 0);
        /* Adjust search length to account for worst case alignment overhead */
@@ -1556,12 +1575,31 @@ static unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
        if (length < info->length)
                return -ENOMEM;
 
-       if (mas_empty_area_rev(&mas, info->low_limit, info->high_limit - 1,
+       high_limit = info->high_limit;
+retry:
+       if (mas_empty_area_rev(&mas, info->low_limit, high_limit - 1,
                                length))
                return -ENOMEM;
 
        gap = mas.last + 1 - info->length;
        gap -= (gap - info->align_offset) & info->align_mask;
+       gap_end = mas.last;
+       tmp = mas_next(&mas, ULONG_MAX);
+       if (tmp && (tmp->vm_flags & VM_GROWSDOWN)) { /* Avoid prev check if possible */
+               if (vm_start_gap(tmp) <= gap_end) {
+                       high_limit = vm_start_gap(tmp);
+                       mas_reset(&mas);
+                       goto retry;
+               }
+       } else {
+               tmp = mas_prev(&mas, 0);
+               if (tmp && vm_end_gap(tmp) > gap) {
+                       high_limit = tmp->vm_start;
+                       mas_reset(&mas);
+                       goto retry;
+               }
+       }
+
        return gap;
 }
 
index 13e84d8..36351a0 100644 (file)
@@ -838,7 +838,7 @@ static int do_mprotect_pkey(unsigned long start, size_t len,
        }
        tlb_finish_mmu(&tlb);
 
-       if (vma_iter_end(&vmi) < end)
+       if (!error && vma_iter_end(&vmi) < end)
                error = -ENOMEM;
 
 out:
index 7136c36..8e39705 100644 (file)
@@ -6632,7 +6632,21 @@ static void __build_all_zonelists(void *data)
        int nid;
        int __maybe_unused cpu;
        pg_data_t *self = data;
+       unsigned long flags;
 
+       /*
+        * Explicitly disable this CPU's interrupts before taking seqlock
+        * to prevent any IRQ handler from calling into the page allocator
+        * (e.g. GFP_ATOMIC) that could hit zonelist_iter_begin and livelock.
+        */
+       local_irq_save(flags);
+       /*
+        * Explicitly disable this CPU's synchronous printk() before taking
+        * seqlock to prevent any printk() from trying to hold port->lock, for
+        * tty_insert_flip_string_and_push_buffer() on other CPU might be
+        * calling kmalloc(GFP_ATOMIC | __GFP_NOWARN) with port->lock held.
+        */
+       printk_deferred_enter();
        write_seqlock(&zonelist_update_seq);
 
 #ifdef CONFIG_NUMA
@@ -6671,6 +6685,8 @@ static void __build_all_zonelists(void *data)
        }
 
        write_sequnlock(&zonelist_update_seq);
+       printk_deferred_exit();
+       local_irq_restore(flags);
 }
 
 static noinline void __init
@@ -9450,6 +9466,9 @@ static bool pfn_range_valid_contig(struct zone *z, unsigned long start_pfn,
 
                if (PageReserved(page))
                        return false;
+
+               if (PageHuge(page))
+                       return false;
        }
        return true;
 }
index 57cb01b..423199e 100644 (file)
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -222,7 +222,7 @@ static void folio_batch_move_lru(struct folio_batch *fbatch, move_fn_t move_fn)
        if (lruvec)
                unlock_page_lruvec_irqrestore(lruvec, flags);
        folios_put(fbatch->folios, folio_batch_count(fbatch));
-       folio_batch_init(fbatch);
+       folio_batch_reinit(fbatch);
 }
 
 static void folio_batch_add_and_move(struct folio_batch *fbatch,
index a500720..31ff782 100644 (file)
@@ -313,8 +313,8 @@ int ioremap_page_range(unsigned long addr, unsigned long end,
                                 ioremap_max_page_shift);
        flush_cache_vmap(addr, end);
        if (!err)
-               kmsan_ioremap_page_range(addr, end, phys_addr, prot,
-                                        ioremap_max_page_shift);
+               err = kmsan_ioremap_page_range(addr, end, phys_addr, prot,
+                                              ioremap_max_page_shift);
        return err;
 }
 
@@ -605,7 +605,11 @@ int __vmap_pages_range_noflush(unsigned long addr, unsigned long end,
 int vmap_pages_range_noflush(unsigned long addr, unsigned long end,
                pgprot_t prot, struct page **pages, unsigned int page_shift)
 {
-       kmsan_vmap_pages_range_noflush(addr, end, prot, pages, page_shift);
+       int ret = kmsan_vmap_pages_range_noflush(addr, end, prot, pages,
+                                                page_shift);
+
+       if (ret)
+               return ret;
        return __vmap_pages_range_noflush(addr, end, prot, pages, page_shift);
 }
 
index 638a4d5..4bc6761 100644 (file)
@@ -868,12 +868,17 @@ static unsigned int ip_sabotage_in(void *priv,
 {
        struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
 
-       if (nf_bridge && !nf_bridge->in_prerouting &&
-           !netif_is_l3_master(skb->dev) &&
-           !netif_is_l3_slave(skb->dev)) {
-               nf_bridge_info_free(skb);
-               state->okfn(state->net, state->sk, skb);
-               return NF_STOLEN;
+       if (nf_bridge) {
+               if (nf_bridge->sabotage_in_done)
+                       return NF_ACCEPT;
+
+               if (!nf_bridge->in_prerouting &&
+                   !netif_is_l3_master(skb->dev) &&
+                   !netif_is_l3_slave(skb->dev)) {
+                       nf_bridge->sabotage_in_done = 1;
+                       state->okfn(state->net, state->sk, skb);
+                       return NF_STOLEN;
+               }
        }
 
        return NF_ACCEPT;
index de18e9c..ba95c4d 100644 (file)
@@ -148,6 +148,17 @@ br_switchdev_fdb_notify(struct net_bridge *br,
        if (test_bit(BR_FDB_LOCKED, &fdb->flags))
                return;
 
+       /* Entries with these flags were created using ndm_state == NUD_REACHABLE,
+        * ndm_flags == NTF_MASTER( | NTF_STICKY), ext_flags == 0 by something
+        * equivalent to 'bridge fdb add ... master dynamic (sticky)'.
+        * Drivers don't know how to deal with these, so don't notify them to
+        * avoid confusing them.
+        */
+       if (test_bit(BR_FDB_ADDED_BY_USER, &fdb->flags) &&
+           !test_bit(BR_FDB_STATIC, &fdb->flags) &&
+           !test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags))
+               return;
+
        br_switchdev_fdb_populate(br, &item, fdb, NULL);
 
        switch (type) {
index 488aec9..d1876f1 100644 (file)
@@ -32,7 +32,8 @@ static void *ipv6_rpl_segdata_pos(const struct ipv6_rpl_sr_hdr *hdr, int i)
 size_t ipv6_rpl_srh_size(unsigned char n, unsigned char cmpri,
                         unsigned char cmpre)
 {
-       return (n * IPV6_PFXTAIL_LEN(cmpri)) + IPV6_PFXTAIL_LEN(cmpre);
+       return sizeof(struct ipv6_rpl_sr_hdr) + (n * IPV6_PFXTAIL_LEN(cmpri)) +
+               IPV6_PFXTAIL_LEN(cmpre);
 }
 
 void ipv6_rpl_srh_decompress(struct ipv6_rpl_sr_hdr *outhdr,
index 06c5872..b998e9d 100644 (file)
@@ -2315,7 +2315,26 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
                              unsigned int flags)
 {
        struct mptcp_sock *msk = mptcp_sk(sk);
-       bool need_push, dispose_it;
+       bool dispose_it, need_push = false;
+
+       /* If the first subflow moved to a close state before accept, e.g. due
+        * to an incoming reset, mptcp either:
+        * - if either the subflow or the msk are dead, destroy the context
+        *   (the subflow socket is deleted by inet_child_forget) and the msk
+        * - otherwise do nothing at the moment and take action at accept and/or
+        *   listener shutdown - user-space must be able to accept() the closed
+        *   socket.
+        */
+       if (msk->in_accept_queue && msk->first == ssk) {
+               if (!sock_flag(sk, SOCK_DEAD) && !sock_flag(ssk, SOCK_DEAD))
+                       return;
+
+               /* ensure later check in mptcp_worker() will dispose the msk */
+               sock_set_flag(sk, SOCK_DEAD);
+               lock_sock_nested(ssk, SINGLE_DEPTH_NESTING);
+               mptcp_subflow_drop_ctx(ssk);
+               goto out_release;
+       }
 
        dispose_it = !msk->subflow || ssk != msk->subflow->sk;
        if (dispose_it)
@@ -2351,28 +2370,22 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
        if (!inet_csk(ssk)->icsk_ulp_ops) {
                WARN_ON_ONCE(!sock_flag(ssk, SOCK_DEAD));
                kfree_rcu(subflow, rcu);
-       } else if (msk->in_accept_queue && msk->first == ssk) {
-               /* if the first subflow moved to a close state, e.g. due to
-                * incoming reset and we reach here before inet_child_forget()
-                * the TCP stack could later try to close it via
-                * inet_csk_listen_stop(), or deliver it to the user space via
-                * accept().
-                * We can't delete the subflow - or risk a double free - nor let
-                * the msk survive - or will be leaked in the non accept scenario:
-                * fallback and let TCP cope with the subflow cleanup.
-                */
-               WARN_ON_ONCE(sock_flag(ssk, SOCK_DEAD));
-               mptcp_subflow_drop_ctx(ssk);
        } else {
                /* otherwise tcp will dispose of the ssk and subflow ctx */
-               if (ssk->sk_state == TCP_LISTEN)
+               if (ssk->sk_state == TCP_LISTEN) {
+                       tcp_set_state(ssk, TCP_CLOSE);
+                       mptcp_subflow_queue_clean(sk, ssk);
+                       inet_csk_listen_stop(ssk);
                        mptcp_event_pm_listener(ssk, MPTCP_EVENT_LISTENER_CLOSED);
+               }
 
                __tcp_close(ssk, 0);
 
                /* close acquired an extra ref */
                __sock_put(ssk);
        }
+
+out_release:
        release_sock(ssk);
 
        sock_put(ssk);
@@ -2427,21 +2440,14 @@ static void __mptcp_close_subflow(struct sock *sk)
                mptcp_close_ssk(sk, ssk, subflow);
        }
 
-       /* if the MPC subflow has been closed before the msk is accepted,
-        * msk will never be accept-ed, close it now
-        */
-       if (!msk->first && msk->in_accept_queue) {
-               sock_set_flag(sk, SOCK_DEAD);
-               inet_sk_state_store(sk, TCP_CLOSE);
-       }
 }
 
-static bool mptcp_check_close_timeout(const struct sock *sk)
+static bool mptcp_should_close(const struct sock *sk)
 {
        s32 delta = tcp_jiffies32 - inet_csk(sk)->icsk_mtup.probe_timestamp;
        struct mptcp_subflow_context *subflow;
 
-       if (delta >= TCP_TIMEWAIT_LEN)
+       if (delta >= TCP_TIMEWAIT_LEN || mptcp_sk(sk)->in_accept_queue)
                return true;
 
        /* if all subflows are in closed status don't bother with additional
@@ -2649,7 +2655,7 @@ static void mptcp_worker(struct work_struct *work)
         * even if it is orphaned and in FIN_WAIT2 state
         */
        if (sock_flag(sk, SOCK_DEAD)) {
-               if (mptcp_check_close_timeout(sk)) {
+               if (mptcp_should_close(sk)) {
                        inet_sk_state_store(sk, TCP_CLOSE);
                        mptcp_do_fastclose(sk);
                }
@@ -2895,6 +2901,14 @@ static void __mptcp_destroy_sock(struct sock *sk)
        sock_put(sk);
 }
 
+void __mptcp_unaccepted_force_close(struct sock *sk)
+{
+       sock_set_flag(sk, SOCK_DEAD);
+       inet_sk_state_store(sk, TCP_CLOSE);
+       mptcp_do_fastclose(sk);
+       __mptcp_destroy_sock(sk);
+}
+
 static __poll_t mptcp_check_readable(struct mptcp_sock *msk)
 {
        /* Concurrent splices from sk_receive_queue into receive_queue will
@@ -3733,6 +3747,18 @@ static int mptcp_stream_accept(struct socket *sock, struct socket *newsock,
                        if (!ssk->sk_socket)
                                mptcp_sock_graft(ssk, newsock);
                }
+
+               /* Do late cleanup for the first subflow as necessary. Also
+                * deal with bad peers not doing a complete shutdown.
+                */
+               if (msk->first &&
+                   unlikely(inet_sk_state_load(msk->first) == TCP_CLOSE)) {
+                       __mptcp_close_ssk(newsk, msk->first,
+                                         mptcp_subflow_ctx(msk->first), 0);
+                       if (unlikely(list_empty(&msk->conn_list)))
+                               inet_sk_state_store(newsk, TCP_CLOSE);
+               }
+
                release_sock(newsk);
        }
 
index 339a6f0..d6469b6 100644 (file)
@@ -629,10 +629,12 @@ void mptcp_close_ssk(struct sock *sk, struct sock *ssk,
                     struct mptcp_subflow_context *subflow);
 void __mptcp_subflow_send_ack(struct sock *ssk);
 void mptcp_subflow_reset(struct sock *ssk);
+void mptcp_subflow_queue_clean(struct sock *sk, struct sock *ssk);
 void mptcp_sock_graft(struct sock *sk, struct socket *parent);
 struct socket *__mptcp_nmpc_socket(const struct mptcp_sock *msk);
 bool __mptcp_close(struct sock *sk, long timeout);
 void mptcp_cancel_work(struct sock *sk);
+void __mptcp_unaccepted_force_close(struct sock *sk);
 void mptcp_set_owner_r(struct sk_buff *skb, struct sock *sk);
 
 bool mptcp_addresses_equal(const struct mptcp_addr_info *a,
index d345888..281c1cc 100644 (file)
@@ -723,9 +723,12 @@ void mptcp_subflow_drop_ctx(struct sock *ssk)
        if (!ctx)
                return;
 
-       subflow_ulp_fallback(ssk, ctx);
-       if (ctx->conn)
-               sock_put(ctx->conn);
+       list_del(&mptcp_subflow_ctx(ssk)->node);
+       if (inet_csk(ssk)->icsk_ulp_ops) {
+               subflow_ulp_fallback(ssk, ctx);
+               if (ctx->conn)
+                       sock_put(ctx->conn);
+       }
 
        kfree_rcu(ctx, rcu);
 }
@@ -1819,6 +1822,77 @@ static void subflow_state_change(struct sock *sk)
        }
 }
 
+void mptcp_subflow_queue_clean(struct sock *listener_sk, struct sock *listener_ssk)
+{
+       struct request_sock_queue *queue = &inet_csk(listener_ssk)->icsk_accept_queue;
+       struct mptcp_sock *msk, *next, *head = NULL;
+       struct request_sock *req;
+       struct sock *sk;
+
+       /* build a list of all unaccepted mptcp sockets */
+       spin_lock_bh(&queue->rskq_lock);
+       for (req = queue->rskq_accept_head; req; req = req->dl_next) {
+               struct mptcp_subflow_context *subflow;
+               struct sock *ssk = req->sk;
+
+               if (!sk_is_mptcp(ssk))
+                       continue;
+
+               subflow = mptcp_subflow_ctx(ssk);
+               if (!subflow || !subflow->conn)
+                       continue;
+
+               /* skip if already in list */
+               sk = subflow->conn;
+               msk = mptcp_sk(sk);
+               if (msk->dl_next || msk == head)
+                       continue;
+
+               sock_hold(sk);
+               msk->dl_next = head;
+               head = msk;
+       }
+       spin_unlock_bh(&queue->rskq_lock);
+       if (!head)
+               return;
+
+       /* can't acquire the msk socket lock under the subflow one,
+        * or will cause ABBA deadlock
+        */
+       release_sock(listener_ssk);
+
+       for (msk = head; msk; msk = next) {
+               sk = (struct sock *)msk;
+
+               lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
+               next = msk->dl_next;
+               msk->dl_next = NULL;
+
+               __mptcp_unaccepted_force_close(sk);
+               release_sock(sk);
+
+               /* lockdep will report a false positive ABBA deadlock
+                * between cancel_work_sync and the listener socket.
+                * The involved locks belong to different sockets WRT
+                * the existing AB chain.
+                * Using a per socket key is problematic as key
+                * deregistration requires process context and must be
+                * performed at socket disposal time, in atomic
+                * context.
+                * Just tell lockdep to consider the listener socket
+                * released here.
+                */
+               mutex_release(&listener_sk->sk_lock.dep_map, _RET_IP_);
+               mptcp_cancel_work(sk);
+               mutex_acquire(&listener_sk->sk_lock.dep_map, 0, 0, _RET_IP_);
+
+               sock_put(sk);
+       }
+
+       /* we are still under the listener msk socket lock */
+       lock_sock_nested(listener_ssk, SINGLE_DEPTH_NESTING);
+}
+
 static int subflow_ulp_init(struct sock *sk)
 {
        struct inet_connection_sock *icsk = inet_csk(sk);
index 6004d4b..e48ab8d 100644 (file)
@@ -3447,6 +3447,64 @@ static int nft_table_validate(struct net *net, const struct nft_table *table)
        return 0;
 }
 
+int nft_setelem_validate(const struct nft_ctx *ctx, struct nft_set *set,
+                        const struct nft_set_iter *iter,
+                        struct nft_set_elem *elem)
+{
+       const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv);
+       struct nft_ctx *pctx = (struct nft_ctx *)ctx;
+       const struct nft_data *data;
+       int err;
+
+       if (nft_set_ext_exists(ext, NFT_SET_EXT_FLAGS) &&
+           *nft_set_ext_flags(ext) & NFT_SET_ELEM_INTERVAL_END)
+               return 0;
+
+       data = nft_set_ext_data(ext);
+       switch (data->verdict.code) {
+       case NFT_JUMP:
+       case NFT_GOTO:
+               pctx->level++;
+               err = nft_chain_validate(ctx, data->verdict.chain);
+               if (err < 0)
+                       return err;
+               pctx->level--;
+               break;
+       default:
+               break;
+       }
+
+       return 0;
+}
+
+struct nft_set_elem_catchall {
+       struct list_head        list;
+       struct rcu_head         rcu;
+       void                    *elem;
+};
+
+int nft_set_catchall_validate(const struct nft_ctx *ctx, struct nft_set *set)
+{
+       u8 genmask = nft_genmask_next(ctx->net);
+       struct nft_set_elem_catchall *catchall;
+       struct nft_set_elem elem;
+       struct nft_set_ext *ext;
+       int ret = 0;
+
+       list_for_each_entry_rcu(catchall, &set->catchall_list, list) {
+               ext = nft_set_elem_ext(set, catchall->elem);
+               if (!nft_set_elem_active(ext, genmask))
+                       continue;
+
+               elem.priv = catchall->elem;
+               ret = nft_setelem_validate(ctx, set, NULL, &elem);
+               if (ret < 0)
+                       return ret;
+       }
+
+       return ret;
+}
+
 static struct nft_rule *nft_rule_lookup_byid(const struct net *net,
                                             const struct nft_chain *chain,
                                             const struct nlattr *nla);
@@ -4759,12 +4817,6 @@ err_set_name:
        return err;
 }
 
-struct nft_set_elem_catchall {
-       struct list_head        list;
-       struct rcu_head         rcu;
-       void                    *elem;
-};
-
 static void nft_set_catchall_destroy(const struct nft_ctx *ctx,
                                     struct nft_set *set)
 {
@@ -6056,7 +6108,8 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
        if (err < 0)
                return err;
 
-       if (!nla[NFTA_SET_ELEM_KEY] && !(flags & NFT_SET_ELEM_CATCHALL))
+       if (((flags & NFT_SET_ELEM_CATCHALL) && nla[NFTA_SET_ELEM_KEY]) ||
+           (!(flags & NFT_SET_ELEM_CATCHALL) && !nla[NFTA_SET_ELEM_KEY]))
                return -EINVAL;
 
        if (flags != 0) {
@@ -7052,7 +7105,7 @@ static int nf_tables_newobj(struct sk_buff *skb, const struct nfnl_info *info,
        }
 
        if (nla[NFTA_OBJ_USERDATA]) {
-               obj->udata = nla_memdup(nla[NFTA_OBJ_USERDATA], GFP_KERNEL);
+               obj->udata = nla_memdup(nla[NFTA_OBJ_USERDATA], GFP_KERNEL_ACCOUNT);
                if (obj->udata == NULL)
                        goto err_userdata;
 
index cae5a67..cecf8ab 100644 (file)
@@ -199,37 +199,6 @@ nla_put_failure:
        return -1;
 }
 
-static int nft_lookup_validate_setelem(const struct nft_ctx *ctx,
-                                      struct nft_set *set,
-                                      const struct nft_set_iter *iter,
-                                      struct nft_set_elem *elem)
-{
-       const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv);
-       struct nft_ctx *pctx = (struct nft_ctx *)ctx;
-       const struct nft_data *data;
-       int err;
-
-       if (nft_set_ext_exists(ext, NFT_SET_EXT_FLAGS) &&
-           *nft_set_ext_flags(ext) & NFT_SET_ELEM_INTERVAL_END)
-               return 0;
-
-       data = nft_set_ext_data(ext);
-       switch (data->verdict.code) {
-       case NFT_JUMP:
-       case NFT_GOTO:
-               pctx->level++;
-               err = nft_chain_validate(ctx, data->verdict.chain);
-               if (err < 0)
-                       return err;
-               pctx->level--;
-               break;
-       default:
-               break;
-       }
-
-       return 0;
-}
-
 static int nft_lookup_validate(const struct nft_ctx *ctx,
                               const struct nft_expr *expr,
                               const struct nft_data **d)
@@ -245,9 +214,12 @@ static int nft_lookup_validate(const struct nft_ctx *ctx,
        iter.skip       = 0;
        iter.count      = 0;
        iter.err        = 0;
-       iter.fn         = nft_lookup_validate_setelem;
+       iter.fn         = nft_setelem_validate;
 
        priv->set->ops->walk(ctx, priv->set, &iter);
+       if (!iter.err)
+               iter.err = nft_set_catchall_validate(ctx, priv->set);
+
        if (iter.err < 0)
                return iter.err;
 
index 2a6b6be..35785a3 100644 (file)
@@ -3235,6 +3235,9 @@ int tcf_exts_init_ex(struct tcf_exts *exts, struct net *net, int action,
 
 err_miss_alloc:
        tcf_exts_destroy(exts);
+#ifdef CONFIG_NET_CLS_ACT
+       exts->actions = NULL;
+#endif
        return err;
 }
 EXPORT_SYMBOL(tcf_exts_init_ex);
index cf5ebe4..02098a0 100644 (file)
@@ -421,15 +421,16 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
        } else
                weight = 1;
 
-       if (tb[TCA_QFQ_LMAX]) {
+       if (tb[TCA_QFQ_LMAX])
                lmax = nla_get_u32(tb[TCA_QFQ_LMAX]);
-               if (lmax < QFQ_MIN_LMAX || lmax > (1UL << QFQ_MTU_SHIFT)) {
-                       pr_notice("qfq: invalid max length %u\n", lmax);
-                       return -EINVAL;
-               }
-       } else
+       else
                lmax = psched_mtu(qdisc_dev(sch));
 
+       if (lmax < QFQ_MIN_LMAX || lmax > (1UL << QFQ_MTU_SHIFT)) {
+               pr_notice("qfq: invalid max length %u\n", lmax);
+               return -EINVAL;
+       }
+
        inv_w = ONE_FP / weight;
        weight = ONE_FP / inv_w;
 
index ce0541e..95ca783 100644 (file)
@@ -73,7 +73,6 @@ static void checksum_case(struct kunit *test)
 {
        const struct gss_krb5_test_param *param = test->param_value;
        struct xdr_buf buf = {
-               .head[0].iov_base       = param->plaintext->data,
                .head[0].iov_len        = param->plaintext->len,
                .len                    = param->plaintext->len,
        };
@@ -99,6 +98,10 @@ static void checksum_case(struct kunit *test)
        err = crypto_ahash_setkey(tfm, Kc.data, Kc.len);
        KUNIT_ASSERT_EQ(test, err, 0);
 
+       buf.head[0].iov_base = kunit_kzalloc(test, buf.head[0].iov_len, GFP_KERNEL);
+       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf.head[0].iov_base);
+       memcpy(buf.head[0].iov_base, param->plaintext->data, buf.head[0].iov_len);
+
        checksum.len = gk5e->cksumlength;
        checksum.data = kunit_kzalloc(test, checksum.len, GFP_KERNEL);
        KUNIT_ASSERT_NOT_ERR_OR_NULL(test, checksum.data);
@@ -1327,6 +1330,7 @@ static void rfc6803_encrypt_case(struct kunit *test)
        if (!gk5e)
                kunit_skip(test, "Encryption type is not available");
 
+       memset(usage_data, 0, sizeof(usage_data));
        usage.data[3] = param->constant;
 
        Ke.len = gk5e->Ke_length;
index f88d108..aef85e9 100644 (file)
@@ -262,6 +262,20 @@ BINDGEN_TARGET             := $(BINDGEN_TARGET_$(SRCARCH))
 # some configurations, with new GCC versions, etc.
 bindgen_extra_c_flags = -w --target=$(BINDGEN_TARGET)
 
+# Auto variable zero-initialization requires an additional special option with
+# clang that is going to be removed sometime in the future (likely in
+# clang-18), so make sure to pass this option only if clang supports it
+# (libclang major version < 16).
+#
+# https://github.com/llvm/llvm-project/issues/44842
+# https://github.com/llvm/llvm-project/blob/llvmorg-16.0.0-rc2/clang/docs/ReleaseNotes.rst#deprecated-compiler-flags
+ifdef CONFIG_INIT_STACK_ALL_ZERO
+libclang_maj_ver=$(shell $(BINDGEN) $(srctree)/scripts/rust_is_available_bindgen_libclang.h 2>&1 | sed -ne 's/.*clang version \([0-9]*\).*/\1/p')
+ifeq ($(shell expr $(libclang_maj_ver) \< 16), 1)
+bindgen_extra_c_flags += -enable-trivial-auto-var-init-zero-knowing-it-will-be-removed-from-clang
+endif
+endif
+
 bindgen_c_flags = $(filter-out $(bindgen_skip_c_flags), $(c_flags)) \
        $(bindgen_extra_c_flags)
 endif
@@ -283,7 +297,7 @@ quiet_cmd_bindgen = BINDGEN $@
                $(bindgen_target_cflags) $(bindgen_target_extra)
 
 $(obj)/bindings/bindings_generated.rs: private bindgen_target_flags = \
-    $(shell grep -v '^\#\|^$$' $(srctree)/$(src)/bindgen_parameters)
+    $(shell grep -v '^#\|^$$' $(srctree)/$(src)/bindgen_parameters)
 $(obj)/bindings/bindings_generated.rs: $(src)/bindings/bindings_helper.h \
     $(src)/bindgen_parameters FORCE
        $(call if_changed_dep,bindgen)
index 3010332..8009184 100644 (file)
@@ -18,7 +18,11 @@ use crate::bindings;
 
 // Called from `vsprintf` with format specifier `%pA`.
 #[no_mangle]
-unsafe fn rust_fmt_argument(buf: *mut c_char, end: *mut c_char, ptr: *const c_void) -> *mut c_char {
+unsafe extern "C" fn rust_fmt_argument(
+    buf: *mut c_char,
+    end: *mut c_char,
+    ptr: *const c_void,
+) -> *mut c_char {
     use fmt::Write;
     // SAFETY: The C contract guarantees that `buf` is valid if it's less than `end`.
     let mut w = unsafe { RawFormatter::from_ptrs(buf.cast(), end.cast()) };
index b771310..cd3d2a6 100644 (file)
@@ -408,7 +408,7 @@ impl RawFormatter {
     /// If `pos` is less than `end`, then the region between `pos` (inclusive) and `end`
     /// (exclusive) must be valid for writes for the lifetime of the returned [`RawFormatter`].
     pub(crate) unsafe fn from_ptrs(pos: *mut u8, end: *mut u8) -> Self {
-        // INVARIANT: The safety requierments guarantee the type invariants.
+        // INVARIANT: The safety requirements guarantee the type invariants.
         Self {
             beg: pos as _,
             pos: pos as _,
index 4d90691..4000ad0 100644 (file)
@@ -49,7 +49,7 @@ git-config-tar.zst = -c tar.tar.zst.command="$(ZSTD)"
 
 quiet_cmd_archive = ARCHIVE $@
       cmd_archive = git -C $(srctree) $(git-config-tar$(suffix $@)) archive \
-                    --output=$$(realpath $@) --prefix=$(basename $@)/ $(archive-args)
+                    --output=$$(realpath $@) $(archive-args)
 
 # Linux source tarball
 # ---------------------------------------------------------------------------
@@ -57,7 +57,7 @@ quiet_cmd_archive = ARCHIVE $@
 linux-tarballs := $(addprefix linux, .tar.gz)
 
 targets += $(linux-tarballs)
-$(linux-tarballs): archive-args = $$(cat $<)
+$(linux-tarballs): archive-args = --prefix=linux/ $$(cat $<)
 $(linux-tarballs): .tmp_HEAD FORCE
        $(call if_changed,archive)
 
@@ -189,7 +189,7 @@ perf-archive-args = --add-file=$$(realpath $(word 2, $^)) \
 perf-tarballs := $(addprefix perf-$(KERNELVERSION), .tar .tar.gz .tar.bz2 .tar.xz .tar.zst)
 
 targets += $(perf-tarballs)
-$(perf-tarballs): archive-args = $(perf-archive-args)
+$(perf-tarballs): archive-args = --prefix=perf-$(KERNELVERSION)/ $(perf-archive-args)
 $(perf-tarballs): tools/perf/MANIFEST .tmp_perf/HEAD .tmp_perf/PERF-VERSION-FILE FORCE
        $(call if_changed,archive)
 
index 7b6756a..4c3f645 100644 (file)
@@ -625,7 +625,7 @@ int main(int argc, char **argv)
        p = strrchr(argv[1], '/');
        p = p ? p + 1 : argv[1];
        grammar_name = strdup(p);
-       if (!p) {
+       if (!grammar_name) {
                perror(NULL);
                exit(1);
        }
index 0573c92..a7e28b6 100755 (executable)
@@ -45,10 +45,6 @@ Clang)
        version=$2.$3.$4
        min_version=$($min_tool_version llvm)
        ;;
-ICC)
-       version=$(($2 / 100)).$(($2 % 100)).$3
-       min_version=$($min_tool_version icc)
-       ;;
 *)
        echo "$orig_args: unknown C compiler" >&2
        exit 1
index ecc7ea9..946e250 100755 (executable)
@@ -104,7 +104,10 @@ def generate_crates(srctree, objtree, sysroot_src):
             name = path.name.replace(".rs", "")
 
             # Skip those that are not crate roots.
-            if f"{name}.o" not in open(path.parent / "Makefile").read():
+            try:
+                if f"{name}.o" not in open(path.parent / "Makefile").read():
+                    continue
+            except FileNotFoundError:
                 continue
 
             logging.info("Adding %s", name)
index 28b3831..464761a 100755 (executable)
@@ -13,4 +13,4 @@ set -e
 #
 # In the future, checking for the `.comment` section may be another
 # option, see https://github.com/rust-lang/rust/pull/97550.
-${NM} "$*" | grep -qE '^[0-9a-fA-F]+ r _R[^[:space:]]+16___IS_RUST_MODULE[^[:space:]]*$'
+${NM} "$*" | grep -qE '^[0-9a-fA-F]+ [Rr] _R[^[:space:]]+16___IS_RUST_MODULE[^[:space:]]*$'
index a4c2c22..74b83c9 100755 (executable)
@@ -190,7 +190,7 @@ EOF
 
 # Generate copyright file
 cat <<EOF > debian/copyright
-This is a packacked upstream version of the Linux kernel.
+This is a packaged upstream version of the Linux kernel.
 
 The sources may be found at most Linux archive sites, including:
 https://www.kernel.org/pub/linux/kernel
index 331380c..5868661 100644 (file)
@@ -3521,6 +3521,7 @@ static ssize_t snd_pcm_readv(struct kiocb *iocb, struct iov_iter *to)
        unsigned long i;
        void __user **bufs;
        snd_pcm_uframes_t frames;
+       const struct iovec *iov = iter_iov(to);
 
        pcm_file = iocb->ki_filp->private_data;
        substream = pcm_file->substream;
@@ -3530,18 +3531,20 @@ static ssize_t snd_pcm_readv(struct kiocb *iocb, struct iov_iter *to)
        if (runtime->state == SNDRV_PCM_STATE_OPEN ||
            runtime->state == SNDRV_PCM_STATE_DISCONNECTED)
                return -EBADFD;
-       if (!iter_is_iovec(to))
+       if (!to->user_backed)
                return -EINVAL;
        if (to->nr_segs > 1024 || to->nr_segs != runtime->channels)
                return -EINVAL;
-       if (!frame_aligned(runtime, to->iov->iov_len))
+       if (!frame_aligned(runtime, iov->iov_len))
                return -EINVAL;
-       frames = bytes_to_samples(runtime, to->iov->iov_len);
+       frames = bytes_to_samples(runtime, iov->iov_len);
        bufs = kmalloc_array(to->nr_segs, sizeof(void *), GFP_KERNEL);
        if (bufs == NULL)
                return -ENOMEM;
-       for (i = 0; i < to->nr_segs; ++i)
-               bufs[i] = to->iov[i].iov_base;
+       for (i = 0; i < to->nr_segs; ++i) {
+               bufs[i] = iov->iov_base;
+               iov++;
+       }
        result = snd_pcm_lib_readv(substream, bufs, frames);
        if (result > 0)
                result = frames_to_bytes(runtime, result);
@@ -3558,6 +3561,7 @@ static ssize_t snd_pcm_writev(struct kiocb *iocb, struct iov_iter *from)
        unsigned long i;
        void __user **bufs;
        snd_pcm_uframes_t frames;
+       const struct iovec *iov = iter_iov(from);
 
        pcm_file = iocb->ki_filp->private_data;
        substream = pcm_file->substream;
@@ -3567,17 +3571,19 @@ static ssize_t snd_pcm_writev(struct kiocb *iocb, struct iov_iter *from)
        if (runtime->state == SNDRV_PCM_STATE_OPEN ||
            runtime->state == SNDRV_PCM_STATE_DISCONNECTED)
                return -EBADFD;
-       if (!iter_is_iovec(from))
+       if (!from->user_backed)
                return -EINVAL;
        if (from->nr_segs > 128 || from->nr_segs != runtime->channels ||
-           !frame_aligned(runtime, from->iov->iov_len))
+           !frame_aligned(runtime, iov->iov_len))
                return -EINVAL;
-       frames = bytes_to_samples(runtime, from->iov->iov_len);
+       frames = bytes_to_samples(runtime, iov->iov_len);
        bufs = kmalloc_array(from->nr_segs, sizeof(void *), GFP_KERNEL);
        if (bufs == NULL)
                return -ENOMEM;
-       for (i = 0; i < from->nr_segs; ++i)
-               bufs[i] = from->iov[i].iov_base;
+       for (i = 0; i < from->nr_segs; ++i) {
+               bufs[i] = iov->iov_base;
+               iov++;
+       }
        result = snd_pcm_lib_writev(substream, bufs, frames);
        if (result > 0)
                result = frames_to_bytes(runtime, result);
index 3b9f077..f70d6a3 100644 (file)
@@ -9288,7 +9288,6 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1028, 0x0a62, "Dell Precision 5560", ALC289_FIXUP_DUAL_SPK),
        SND_PCI_QUIRK(0x1028, 0x0a9d, "Dell Latitude 5430", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x0a9e, "Dell Latitude 5430", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE),
-       SND_PCI_QUIRK(0x1028, 0x0ac9, "Dell Precision 3260", ALC283_FIXUP_CHROME_BOOK),
        SND_PCI_QUIRK(0x1028, 0x0b19, "Dell XPS 15 9520", ALC289_FIXUP_DUAL_SPK),
        SND_PCI_QUIRK(0x1028, 0x0b1a, "Dell Precision 5570", ALC289_FIXUP_DUAL_SPK),
        SND_PCI_QUIRK(0x1028, 0x0b37, "Dell Inspiron 16 Plus 7620 2-in-1", ALC295_FIXUP_DELL_INSPIRON_TOP_SPEAKERS),
@@ -9469,6 +9468,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x103c, 0x8b47, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x8b5d, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
        SND_PCI_QUIRK(0x103c, 0x8b5e, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+       SND_PCI_QUIRK(0x103c, 0x8b65, "HP ProBook 455 15.6 inch G10 Notebook PC", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
        SND_PCI_QUIRK(0x103c, 0x8b66, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
        SND_PCI_QUIRK(0x103c, 0x8b7a, "HP", ALC236_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x8b7d, "HP", ALC236_FIXUP_HP_GPIO_LED),
index f90a6a7..fde055c 100644 (file)
@@ -31,7 +31,7 @@ static int max98373_dac_event(struct snd_soc_dapm_widget *w,
                        MAX98373_GLOBAL_EN_MASK, 1);
                usleep_range(30000, 31000);
                break;
-       case SND_SOC_DAPM_POST_PMD:
+       case SND_SOC_DAPM_PRE_PMD:
                regmap_update_bits(max98373->regmap,
                        MAX98373_R20FF_GLOBAL_SHDN,
                        MAX98373_GLOBAL_EN_MASK, 0);
@@ -64,7 +64,7 @@ static const struct snd_kcontrol_new max98373_spkfb_control =
 static const struct snd_soc_dapm_widget max98373_dapm_widgets[] = {
 SND_SOC_DAPM_DAC_E("Amp Enable", "HiFi Playback",
        MAX98373_R202B_PCM_RX_EN, 0, 0, max98373_dac_event,
-       SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+       SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
 SND_SOC_DAPM_MUX("DAI Sel Mux", SND_SOC_NOPM, 0, 0,
        &max98373_dai_controls),
 SND_SOC_DAPM_OUTPUT("BE_OUT"),
index 3b81a46..05a7d15 100644 (file)
@@ -209,14 +209,19 @@ static int fsl_asrc_dma_hw_params(struct snd_soc_component *component,
                be_chan = soc_component_to_pcm(component_be)->chan[substream->stream];
                tmp_chan = be_chan;
        }
-       if (!tmp_chan)
-               tmp_chan = dma_request_slave_channel(dev_be, tx ? "tx" : "rx");
+       if (!tmp_chan) {
+               tmp_chan = dma_request_chan(dev_be, tx ? "tx" : "rx");
+               if (IS_ERR(tmp_chan)) {
+                       dev_err(dev, "failed to request DMA channel for Back-End\n");
+                       return -EINVAL;
+               }
+       }
 
        /*
         * An EDMA DEV_TO_DEV channel is fixed and bound with DMA event of each
         * peripheral, unlike SDMA channel that is allocated dynamically. So no
         * need to configure dma_request and dma_request2, but get dma_chan of
-        * Back-End device directly via dma_request_slave_channel.
+        * Back-End device directly via dma_request_chan.
         */
        if (!asrc->use_edma) {
                /* Get DMA request of Back-End */
index 1b19747..990bba0 100644 (file)
@@ -1546,7 +1546,7 @@ static const struct fsl_sai_soc_data fsl_sai_imx8qm_data = {
        .use_imx_pcm = true,
        .use_edma = true,
        .fifo_depth = 64,
-       .pins = 1,
+       .pins = 4,
        .reg_offset = 0,
        .mclk0_is_mclk1 = false,
        .flags = 0,
index 669b99a..3a5394c 100644 (file)
@@ -1806,10 +1806,12 @@ static int sof_ipc4_route_setup(struct snd_sof_dev *sdev, struct snd_sof_route *
        int ret;
 
        if (!src_fw_module || !sink_fw_module) {
-               /* The NULL module will print as "(efault)" */
-               dev_err(sdev->dev, "source %s or sink %s widget weren't set up properly\n",
-                       src_fw_module->man4_module_entry.name,
-                       sink_fw_module->man4_module_entry.name);
+               dev_err(sdev->dev,
+                       "cannot bind %s -> %s, no firmware module for: %s%s\n",
+                       src_widget->widget->name, sink_widget->widget->name,
+                       src_fw_module ? "" : " source",
+                       sink_fw_module ? "" : " sink");
+
                return -ENODEV;
        }
 
index 8d33830..85412ae 100644 (file)
@@ -183,6 +183,7 @@ static int sof_suspend(struct device *dev, bool runtime_suspend)
        const struct sof_ipc_tplg_ops *tplg_ops = sof_ipc_get_ops(sdev, tplg);
        pm_message_t pm_state;
        u32 target_state = snd_sof_dsp_power_target(sdev);
+       u32 old_state = sdev->dsp_power_state.state;
        int ret;
 
        /* do nothing if dsp suspend callback is not set */
@@ -192,7 +193,12 @@ static int sof_suspend(struct device *dev, bool runtime_suspend)
        if (runtime_suspend && !sof_ops(sdev)->runtime_suspend)
                return 0;
 
-       if (tplg_ops && tplg_ops->tear_down_all_pipelines)
+       /* we need to tear down pipelines only if the DSP hardware is
+        * active, which happens for PCI devices. if the device is
+        * suspended, it is brought back to full power and then
+        * suspended again
+        */
+       if (tplg_ops && tplg_ops->tear_down_all_pipelines && (old_state == SOF_DSP_PM_D0))
                tplg_ops->tear_down_all_pipelines(sdev, false);
 
        if (sdev->fw_state != SOF_FW_BOOT_COMPLETE)
index e497875..37e9f68 100644 (file)
@@ -39,7 +39,7 @@ help:
        @echo '  turbostat              - Intel CPU idle stats and freq reporting tool'
        @echo '  usb                    - USB testing tools'
        @echo '  virtio                 - vhost test module'
-       @echo '  vm                     - misc vm tools'
+       @echo '  mm                     - misc mm tools'
        @echo '  wmi                    - WMI interface examples'
        @echo '  x86_energy_perf_policy - Intel energy policy tool'
        @echo ''
@@ -69,7 +69,7 @@ acpi: FORCE
 cpupower: FORCE
        $(call descend,power/$@)
 
-cgroup counter firewire hv guest bootconfig spi usb virtio vm bpf iio gpio objtool leds wmi pci firmware debugging tracing: FORCE
+cgroup counter firewire hv guest bootconfig spi usb virtio mm bpf iio gpio objtool leds wmi pci firmware debugging tracing: FORCE
        $(call descend,$@)
 
 bpf/%: FORCE
@@ -118,7 +118,7 @@ kvm_stat: FORCE
 
 all: acpi cgroup counter cpupower gpio hv firewire \
                perf selftests bootconfig spi turbostat usb \
-               virtio vm bpf x86_energy_perf_policy \
+               virtio mm bpf x86_energy_perf_policy \
                tmon freefall iio objtool kvm_stat wmi \
                pci debugging tracing thermal thermometer thermal-engine
 
@@ -128,7 +128,7 @@ acpi_install:
 cpupower_install:
        $(call descend,power/$(@:_install=),install)
 
-cgroup_install counter_install firewire_install gpio_install hv_install iio_install perf_install bootconfig_install spi_install usb_install virtio_install vm_install bpf_install objtool_install wmi_install pci_install debugging_install tracing_install:
+cgroup_install counter_install firewire_install gpio_install hv_install iio_install perf_install bootconfig_install spi_install usb_install virtio_install mm_install bpf_install objtool_install wmi_install pci_install debugging_install tracing_install:
        $(call descend,$(@:_install=),install)
 
 selftests_install:
@@ -158,7 +158,7 @@ kvm_stat_install:
 install: acpi_install cgroup_install counter_install cpupower_install gpio_install \
                hv_install firewire_install iio_install \
                perf_install selftests_install turbostat_install usb_install \
-               virtio_install vm_install bpf_install x86_energy_perf_policy_install \
+               virtio_install mm_install bpf_install x86_energy_perf_policy_install \
                tmon_install freefall_install objtool_install kvm_stat_install \
                wmi_install pci_install debugging_install intel-speed-select_install \
                tracing_install thermometer_install thermal-engine_install
@@ -169,7 +169,7 @@ acpi_clean:
 cpupower_clean:
        $(call descend,power/cpupower,clean)
 
-cgroup_clean counter_clean hv_clean firewire_clean bootconfig_clean spi_clean usb_clean virtio_clean vm_clean wmi_clean bpf_clean iio_clean gpio_clean objtool_clean leds_clean pci_clean firmware_clean debugging_clean tracing_clean:
+cgroup_clean counter_clean hv_clean firewire_clean bootconfig_clean spi_clean usb_clean virtio_clean mm_clean wmi_clean bpf_clean iio_clean gpio_clean objtool_clean leds_clean pci_clean firmware_clean debugging_clean tracing_clean:
        $(call descend,$(@:_clean=),clean)
 
 libapi_clean:
@@ -211,7 +211,7 @@ build_clean:
 
 clean: acpi_clean cgroup_clean counter_clean cpupower_clean hv_clean firewire_clean \
                perf_clean selftests_clean turbostat_clean bootconfig_clean spi_clean usb_clean virtio_clean \
-               vm_clean bpf_clean iio_clean x86_energy_perf_policy_clean tmon_clean \
+               mm_clean bpf_clean iio_clean x86_energy_perf_policy_clean tmon_clean \
                freefall_clean build_clean libbpf_clean libsubcmd_clean \
                gpio_clean objtool_clean leds_clean wmi_clean pci_clean firmware_clean debugging_clean \
                intel-speed-select_clean tracing_clean thermal_clean thermometer_clean thermal-engine_clean
index d4e32b3..00b4ba1 100644 (file)
@@ -2,7 +2,7 @@
 #ifndef __ASM_LOONGARCH_BITSPERLONG_H
 #define __ASM_LOONGARCH_BITSPERLONG_H
 
-#define __BITS_PER_LONG (__SIZEOF_POINTER__ * 8)
+#define __BITS_PER_LONG (__SIZEOF_LONG__ * 8)
 
 #include <asm-generic/bitsperlong.h>
 
index 7c2ac12..9979889 100644 (file)
@@ -857,7 +857,7 @@ int main(int argc, char **argv)
                        if (cull & CULL_PID || filter & FILTER_PID)
                                fprintf(fout, ", PID %d", list[i].pid);
                        if (cull & CULL_TGID || filter & FILTER_TGID)
-                               fprintf(fout, ", TGID %d", list[i].pid);
+                               fprintf(fout, ", TGID %d", list[i].tgid);
                        if (cull & CULL_COMM || filter & FILTER_COMM)
                                fprintf(fout, ", task_comm_name: %s", list[i].comm);
                        if (cull & CULL_ALLOCATOR) {