Merge tag 'ovl-fixes-6.2-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/mszere...
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 27 Jan 2023 21:39:30 +0000 (13:39 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 27 Jan 2023 21:39:30 +0000 (13:39 -0800)
Pull overlayfs fixes from Miklos Szeredi:
 "Fix two bugs, a recent one introduced in the last cycle, and an older
  one from v5.11"

* tag 'ovl-fixes-6.2-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/mszeredi/vfs:
  ovl: fail on invalid uid/gid mapping at copy up
  ovl: fix tmpfile leak

117 files changed:
CREDITS
Documentation/devicetree/bindings/riscv/cpus.yaml
Documentation/networking/bridge.rst
Documentation/networking/nf_conntrack-sysctl.rst
MAINTAINERS
arch/arm/Makefile
arch/arm/crypto/Makefile
arch/arm/mm/nommu.c
arch/arm/mm/proc-macros.S
arch/arm64/include/asm/efi.h
arch/arm64/include/asm/stacktrace.h
arch/arm64/kernel/efi-rt-wrapper.S
arch/arm64/kernel/efi.c
arch/arm64/kernel/stacktrace.c
arch/arm64/kvm/guest.c
arch/arm64/kvm/vgic/vgic-v3.c
arch/arm64/kvm/vgic/vgic-v4.c
arch/arm64/kvm/vgic/vgic.h
arch/riscv/include/asm/alternative-macros.h
arch/riscv/kernel/head.S
arch/riscv/kernel/probes/simulate-insn.c
arch/riscv/kernel/smpboot.c
arch/x86/events/intel/core.c
arch/x86/events/intel/cstate.c
arch/x86/kernel/cpu/aperfmperf.c
arch/x86/kvm/vmx/vmx.c
drivers/acpi/video_detect.c
drivers/edac/edac_device.c
drivers/edac/qcom_edac.c
drivers/gpu/drm/amd/amdgpu/imu_v11_0.c
drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
drivers/gpu/drm/amd/display/dc/core/dc_link.c
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
drivers/gpu/drm/display/drm_dp_mst_topology.c
drivers/gpu/drm/drm_fbdev_generic.c
drivers/gpu/drm/drm_vma_manager.c
drivers/gpu/drm/i915/gem/i915_gem_mman.c
drivers/gpu/drm/i915/gt/intel_lrc.c
drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.c
drivers/gpu/drm/vmwgfx/vmwgfx_msg_arm64.h [changed mode: 0755->0644]
drivers/net/dsa/microchip/ksz9477_i2c.c
drivers/net/ethernet/adi/adin1110.c
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/engleder/tsnep_main.c
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/intel/iavf/iavf.h
drivers/net/ethernet/intel/iavf/iavf_ethtool.c
drivers/net/ethernet/intel/iavf/iavf_main.c
drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
drivers/net/ethernet/intel/ice/ice_lib.c
drivers/net/ethernet/intel/ice/ice_main.c
drivers/net/ethernet/microsoft/mana/gdma_main.c
drivers/net/ethernet/renesas/ravb_main.c
drivers/net/ethernet/renesas/rswitch.c
drivers/net/ethernet/renesas/rswitch.h
drivers/net/mdio/mdio-mux-meson-g12a.c
drivers/perf/arm-cmn.c
drivers/platform/x86/amd/pmc.c
drivers/platform/x86/apple-gmux.c
drivers/platform/x86/asus-wmi.c
drivers/platform/x86/dell/dell-wmi-base.c
drivers/platform/x86/gigabyte-wmi.c
drivers/platform/x86/hp/hp-wmi.c
drivers/platform/x86/thinkpad_acpi.c
drivers/scsi/device_handler/scsi_dh_alua.c
drivers/scsi/hpsa.c
drivers/scsi/iscsi_tcp.c
drivers/scsi/libiscsi.c
drivers/target/target_core_tmr.c
drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c
drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.h
drivers/ufs/core/ufshcd.c
drivers/vfio/vfio_iommu_type1.c
fs/ext4/xattr.c
fs/fuse/acl.c
fs/fuse/dir.c
fs/fuse/fuse_i.h
fs/fuse/inode.c
fs/fuse/xattr.c
fs/gfs2/log.c
fs/nfsd/filecache.c
include/drm/drm_fb_helper.h
include/drm/drm_vma_manager.h
include/linux/apple-gmux.h
include/net/mana/gdma.h
include/scsi/libiscsi.h
include/uapi/linux/netfilter/nf_conntrack_sctp.h
include/uapi/linux/netfilter/nfnetlink_cttimeout.h
include/ufs/ufshcd.h
kernel/module/main.c
kernel/sched/core.c
kernel/sched/fair.c
lib/nlattr.c
net/core/net_namespace.c
net/ipv4/fib_semantics.c
net/ipv4/metrics.c
net/ipv6/ip6_output.c
net/mctp/af_mctp.c
net/mctp/route.c
net/netfilter/nf_conntrack_proto_sctp.c
net/netfilter/nf_conntrack_standalone.c
net/netfilter/nft_set_rbtree.c
net/netlink/af_netlink.c
net/netrom/nr_timer.c
net/sched/sch_taprio.c
net/sctp/bind_addr.c
net/x25/af_x25.c
rust/kernel/print.rs
sound/soc/codecs/es8326.c [changed mode: 0755->0644]
sound/soc/codecs/es8326.h [changed mode: 0755->0644]
tools/testing/selftests/amd-pstate/Makefile
tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.c
tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c
virt/kvm/vfio.c

diff --git a/CREDITS b/CREDITS
index 4e302a4..acac06b 100644 (file)
--- a/CREDITS
+++ b/CREDITS
@@ -2489,6 +2489,13 @@ D: XF86_Mach8
 D: XF86_8514
 D: cfdisk (curses based disk partitioning program)
 
+N: Mat Martineau
+E: mat@martineau.name
+D: MPTCP subsystem co-maintainer 2020-2023
+D: Keyctl restricted keyring and Diffie-Hellman UAPI
+D: Bluetooth L2CAP ERTM mode and AMP
+S: USA
+
 N: John S. Marvin
 E: jsm@fc.hp.com
 D: PA-RISC port
index c672076..a2884e3 100644 (file)
@@ -83,7 +83,7 @@ properties:
       insensitive, letters in the riscv,isa string must be all
       lowercase to simplify parsing.
     $ref: "/schemas/types.yaml#/definitions/string"
-    pattern: ^rv(?:64|32)imaf?d?q?c?b?v?k?h?(?:_[hsxz](?:[a-z])+)*$
+    pattern: ^rv(?:64|32)imaf?d?q?c?b?k?j?p?v?h?(?:[hsxz](?:[a-z])+)?(?:_[hsxz](?:[a-z])+)*$
 
   # RISC-V requires 'timebase-frequency' in /cpus, so disallow it here
   timebase-frequency: false
index 4aef9cd..c859f3c 100644 (file)
@@ -8,7 +8,7 @@ In order to use the Ethernet bridging functionality, you'll need the
 userspace tools.
 
 Documentation for Linux bridging is on:
-   http://www.linuxfoundation.org/collaborate/workgroups/networking/bridge
+   https://wiki.linuxfoundation.org/networking/bridge
 
 The bridge-utilities are maintained at:
    git://git.kernel.org/pub/scm/linux/kernel/git/shemminger/bridge-utils.git
index 49db1d1..8b1045c 100644 (file)
@@ -173,7 +173,9 @@ nf_conntrack_sctp_timeout_cookie_echoed - INTEGER (seconds)
        default 3
 
 nf_conntrack_sctp_timeout_established - INTEGER (seconds)
-       default 432000 (5 days)
+       default 210
+
+       Default is set to (hb_interval * path_max_retrans + rto_max)
 
 nf_conntrack_sctp_timeout_shutdown_sent - INTEGER (seconds)
        default 0.3
@@ -190,12 +192,6 @@ nf_conntrack_sctp_timeout_heartbeat_sent - INTEGER (seconds)
        This timeout is used to setup conntrack entry on secondary paths.
        Default is set to hb_interval.
 
-nf_conntrack_sctp_timeout_heartbeat_acked - INTEGER (seconds)
-       default 210
-
-       This timeout is used to setup conntrack entry on secondary paths.
-       Default is set to (hb_interval * path_max_retrans + rto_max)
-
 nf_conntrack_udp_timeout - INTEGER (seconds)
        default 30
 
index f781f93..c224f3d 100644 (file)
@@ -7615,7 +7615,6 @@ S:        Maintained
 F:     drivers/firmware/efi/test/
 
 EFI VARIABLE FILESYSTEM
-M:     Matthew Garrett <matthew.garrett@nebula.com>
 M:     Jeremy Kerr <jk@ozlabs.org>
 M:     Ard Biesheuvel <ardb@kernel.org>
 L:     linux-efi@vger.kernel.org
@@ -8467,16 +8466,16 @@ F:      fs/fscache/
 F:     include/linux/fscache*.h
 
 FSCRYPT: FILE SYSTEM LEVEL ENCRYPTION SUPPORT
+M:     Eric Biggers <ebiggers@kernel.org>
 M:     Theodore Y. Ts'o <tytso@mit.edu>
 M:     Jaegeuk Kim <jaegeuk@kernel.org>
-M:     Eric Biggers <ebiggers@kernel.org>
 L:     linux-fscrypt@vger.kernel.org
 S:     Supported
 Q:     https://patchwork.kernel.org/project/linux-fscrypt/list/
-T:     git git://git.kernel.org/pub/scm/fs/fscrypt/fscrypt.git
+T:     git https://git.kernel.org/pub/scm/fs/fscrypt/linux.git
 F:     Documentation/filesystems/fscrypt.rst
 F:     fs/crypto/
-F:     include/linux/fscrypt*.h
+F:     include/linux/fscrypt.h
 F:     include/uapi/linux/fscrypt.h
 
 FSI SUBSYSTEM
@@ -8519,10 +8518,10 @@ F:      include/linux/fsnotify*.h
 FSVERITY: READ-ONLY FILE-BASED AUTHENTICITY PROTECTION
 M:     Eric Biggers <ebiggers@kernel.org>
 M:     Theodore Y. Ts'o <tytso@mit.edu>
-L:     linux-fscrypt@vger.kernel.org
+L:     fsverity@lists.linux.dev
 S:     Supported
-Q:     https://patchwork.kernel.org/project/linux-fscrypt/list/
-T:     git git://git.kernel.org/pub/scm/fs/fscrypt/fscrypt.git fsverity
+Q:     https://patchwork.kernel.org/project/fsverity/list/
+T:     git https://git.kernel.org/pub/scm/fs/fsverity/linux.git
 F:     Documentation/filesystems/fsverity.rst
 F:     fs/verity/
 F:     include/linux/fsverity.h
@@ -14633,7 +14632,6 @@ F:      net/netfilter/xt_SECMARK.c
 F:     net/netlabel/
 
 NETWORKING [MPTCP]
-M:     Mat Martineau <mathew.j.martineau@linux.intel.com>
 M:     Matthieu Baerts <matthieu.baerts@tessares.net>
 L:     netdev@vger.kernel.org
 L:     mptcp@lists.linux.dev
@@ -17962,6 +17960,7 @@ M:      Albert Ou <aou@eecs.berkeley.edu>
 L:     linux-riscv@lists.infradead.org
 S:     Supported
 Q:     https://patchwork.kernel.org/project/linux-riscv/list/
+C:     irc://irc.libera.chat/riscv
 P:     Documentation/riscv/patch-acceptance.rst
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux.git
 F:     arch/riscv/
index 4067f51..955b036 100644 (file)
@@ -132,7 +132,7 @@ AFLAGS_NOWARN       :=$(call as-option,-Wa$(comma)-mno-warn-deprecated,-Wa$(comma)-W)
 
 ifeq ($(CONFIG_THUMB2_KERNEL),y)
 CFLAGS_ISA     :=-Wa,-mimplicit-it=always $(AFLAGS_NOWARN)
-AFLAGS_ISA     :=$(CFLAGS_ISA) -Wa$(comma)-mthumb -D__thumb2__=2
+AFLAGS_ISA     :=$(CFLAGS_ISA) -Wa$(comma)-mthumb
 CFLAGS_ISA     +=-mthumb
 else
 CFLAGS_ISA     :=$(call cc-option,-marm,) $(AFLAGS_NOWARN)
index 971e745..13e62c7 100644 (file)
@@ -53,7 +53,12 @@ $(obj)/%-core.S: $(src)/%-armv4.pl
 
 clean-files += poly1305-core.S sha256-core.S sha512-core.S
 
+aflags-thumb2-$(CONFIG_THUMB2_KERNEL)  := -U__thumb2__ -D__thumb2__=1
+
+AFLAGS_sha256-core.o += $(aflags-thumb2-y)
+AFLAGS_sha512-core.o += $(aflags-thumb2-y)
+
 # massage the perlasm code a bit so we only get the NEON routine if we need it
 poly1305-aflags-$(CONFIG_CPU_V7) := -U__LINUX_ARM_ARCH__ -D__LINUX_ARM_ARCH__=5
 poly1305-aflags-$(CONFIG_KERNEL_MODE_NEON) := -U__LINUX_ARM_ARCH__ -D__LINUX_ARM_ARCH__=7
-AFLAGS_poly1305-core.o += $(poly1305-aflags-y)
+AFLAGS_poly1305-core.o += $(poly1305-aflags-y) $(aflags-thumb2-y)
index c1494a4..53f2d87 100644 (file)
@@ -161,7 +161,7 @@ void __init paging_init(const struct machine_desc *mdesc)
        mpu_setup();
 
        /* allocate the zero page. */
-       zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+       zero_page = (void *)memblock_alloc(PAGE_SIZE, PAGE_SIZE);
        if (!zero_page)
                panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
                      __func__, PAGE_SIZE, PAGE_SIZE);
index fa6999e..e43f6d7 100644 (file)
@@ -6,6 +6,7 @@
  *  VM_EXEC
  */
 #include <asm/asm-offsets.h>
+#include <asm/pgtable.h>
 #include <asm/thread_info.h>
 
 #ifdef CONFIG_CPU_V7M
index 31d13a6..de4ff90 100644 (file)
@@ -48,8 +48,17 @@ int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md);
 })
 
 extern spinlock_t efi_rt_lock;
+extern u64 *efi_rt_stack_top;
 efi_status_t __efi_rt_asm_wrapper(void *, const char *, ...);
 
+/*
+ * efi_rt_stack_top[-1] contains the value the stack pointer had before
+ * switching to the EFI runtime stack.
+ */
+#define current_in_efi()                                               \
+       (!preemptible() && efi_rt_stack_top != NULL &&                  \
+        on_task_stack(current, READ_ONCE(efi_rt_stack_top[-1]), 1))
+
 #define ARCH_EFI_IRQ_FLAGS_MASK (PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT)
 
 /*
index 4e5354b..66ec8ca 100644 (file)
@@ -106,4 +106,19 @@ static inline struct stack_info stackinfo_get_sdei_critical(void)
 #define stackinfo_get_sdei_critical()  stackinfo_get_unknown()
 #endif
 
+#ifdef CONFIG_EFI
+extern u64 *efi_rt_stack_top;
+
+static inline struct stack_info stackinfo_get_efi(void)
+{
+       unsigned long high = (u64)efi_rt_stack_top;
+       unsigned long low = high - THREAD_SIZE;
+
+       return (struct stack_info) {
+               .low = low,
+               .high = high,
+       };
+}
+#endif
+
 #endif /* __ASM_STACKTRACE_H */
index d872d18..e8ae803 100644 (file)
@@ -46,7 +46,10 @@ SYM_FUNC_START(__efi_rt_asm_wrapper)
        mov     x4, x6
        blr     x8
 
+       mov     x16, sp
        mov     sp, x29
+       str     xzr, [x16, #8]                  // clear recorded task SP value
+
        ldp     x1, x2, [sp, #16]
        cmp     x2, x18
        ldp     x29, x30, [sp], #112
@@ -71,6 +74,9 @@ SYM_FUNC_END(__efi_rt_asm_wrapper)
 SYM_CODE_START(__efi_rt_asm_recover)
        mov     sp, x30
 
+       ldr_l   x16, efi_rt_stack_top           // clear recorded task SP value
+       str     xzr, [x16, #-8]
+
        ldp     x19, x20, [sp, #32]
        ldp     x21, x22, [sp, #48]
        ldp     x23, x24, [sp, #64]
index fab05de..b273900 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/init.h>
 
 #include <asm/efi.h>
+#include <asm/stacktrace.h>
 
 static bool region_is_misaligned(const efi_memory_desc_t *md)
 {
@@ -154,7 +155,7 @@ asmlinkage efi_status_t __efi_rt_asm_recover(void);
 bool efi_runtime_fixup_exception(struct pt_regs *regs, const char *msg)
 {
         /* Check whether the exception occurred while running the firmware */
-       if (current_work() != &efi_rts_work.work || regs->pc >= TASK_SIZE_64)
+       if (!current_in_efi() || regs->pc >= TASK_SIZE_64)
                return false;
 
        pr_err(FW_BUG "Unable to handle %s in EFI runtime service\n", msg);
index 117e2c1..8315430 100644 (file)
@@ -5,6 +5,7 @@
  * Copyright (C) 2012 ARM Ltd.
  */
 #include <linux/kernel.h>
+#include <linux/efi.h>
 #include <linux/export.h>
 #include <linux/ftrace.h>
 #include <linux/sched.h>
@@ -12,6 +13,7 @@
 #include <linux/sched/task_stack.h>
 #include <linux/stacktrace.h>
 
+#include <asm/efi.h>
 #include <asm/irq.h>
 #include <asm/stack_pointer.h>
 #include <asm/stacktrace.h>
@@ -186,6 +188,13 @@ void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
                        : stackinfo_get_unknown();              \
        })
 
+#define STACKINFO_EFI                                          \
+       ({                                                      \
+               ((task == current) && current_in_efi())         \
+                       ? stackinfo_get_efi()                   \
+                       : stackinfo_get_unknown();              \
+       })
+
 noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry,
                              void *cookie, struct task_struct *task,
                              struct pt_regs *regs)
@@ -200,6 +209,9 @@ noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry,
                STACKINFO_SDEI(normal),
                STACKINFO_SDEI(critical),
 #endif
+#ifdef CONFIG_EFI
+               STACKINFO_EFI,
+#endif
        };
        struct unwind_state state = {
                .stacks = stacks,
index 5626ddb..cf4c495 100644 (file)
@@ -1079,7 +1079,7 @@ long kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm,
 
                        /* uaccess failed, don't leave stale tags */
                        if (num_tags != MTE_GRANULES_PER_PAGE)
-                               mte_clear_page_tags(page);
+                               mte_clear_page_tags(maddr);
                        set_page_mte_tagged(page);
 
                        kvm_release_pfn_dirty(pfn);
index 2074521..2624963 100644 (file)
@@ -350,26 +350,23 @@ retry:
  * The deactivation of the doorbell interrupt will trigger the
  * unmapping of the associated vPE.
  */
-static void unmap_all_vpes(struct vgic_dist *dist)
+static void unmap_all_vpes(struct kvm *kvm)
 {
-       struct irq_desc *desc;
+       struct vgic_dist *dist = &kvm->arch.vgic;
        int i;
 
-       for (i = 0; i < dist->its_vm.nr_vpes; i++) {
-               desc = irq_to_desc(dist->its_vm.vpes[i]->irq);
-               irq_domain_deactivate_irq(irq_desc_get_irq_data(desc));
-       }
+       for (i = 0; i < dist->its_vm.nr_vpes; i++)
+               free_irq(dist->its_vm.vpes[i]->irq, kvm_get_vcpu(kvm, i));
 }
 
-static void map_all_vpes(struct vgic_dist *dist)
+static void map_all_vpes(struct kvm *kvm)
 {
-       struct irq_desc *desc;
+       struct vgic_dist *dist = &kvm->arch.vgic;
        int i;
 
-       for (i = 0; i < dist->its_vm.nr_vpes; i++) {
-               desc = irq_to_desc(dist->its_vm.vpes[i]->irq);
-               irq_domain_activate_irq(irq_desc_get_irq_data(desc), false);
-       }
+       for (i = 0; i < dist->its_vm.nr_vpes; i++)
+               WARN_ON(vgic_v4_request_vpe_irq(kvm_get_vcpu(kvm, i),
+                                               dist->its_vm.vpes[i]->irq));
 }
 
 /**
@@ -394,7 +391,7 @@ int vgic_v3_save_pending_tables(struct kvm *kvm)
         * and enabling of the doorbells have already been done.
         */
        if (kvm_vgic_global_state.has_gicv4_1) {
-               unmap_all_vpes(dist);
+               unmap_all_vpes(kvm);
                vlpi_avail = true;
        }
 
@@ -444,7 +441,7 @@ int vgic_v3_save_pending_tables(struct kvm *kvm)
 
 out:
        if (vlpi_avail)
-               map_all_vpes(dist);
+               map_all_vpes(kvm);
 
        return ret;
 }
index ad06ba6..a413718 100644 (file)
@@ -222,6 +222,11 @@ void vgic_v4_get_vlpi_state(struct vgic_irq *irq, bool *val)
        *val = !!(*ptr & mask);
 }
 
+int vgic_v4_request_vpe_irq(struct kvm_vcpu *vcpu, int irq)
+{
+       return request_irq(irq, vgic_v4_doorbell_handler, 0, "vcpu", vcpu);
+}
+
 /**
  * vgic_v4_init - Initialize the GICv4 data structures
  * @kvm:       Pointer to the VM being initialized
@@ -283,8 +288,7 @@ int vgic_v4_init(struct kvm *kvm)
                        irq_flags &= ~IRQ_NOAUTOEN;
                irq_set_status_flags(irq, irq_flags);
 
-               ret = request_irq(irq, vgic_v4_doorbell_handler,
-                                 0, "vcpu", vcpu);
+               ret = vgic_v4_request_vpe_irq(vcpu, irq);
                if (ret) {
                        kvm_err("failed to allocate vcpu IRQ%d\n", irq);
                        /*
index 0c8da72..23e280f 100644 (file)
@@ -331,5 +331,6 @@ int vgic_v4_init(struct kvm *kvm);
 void vgic_v4_teardown(struct kvm *kvm);
 void vgic_v4_configure_vsgis(struct kvm *kvm);
 void vgic_v4_get_vlpi_state(struct vgic_irq *irq, bool *val);
+int vgic_v4_request_vpe_irq(struct kvm_vcpu *vcpu, int irq);
 
 #endif
index 7226e24..2c0f4c8 100644 (file)
@@ -46,7 +46,7 @@
 
 .macro ALTERNATIVE_CFG_2 old_c, new_c_1, vendor_id_1, errata_id_1, enable_1,   \
                                new_c_2, vendor_id_2, errata_id_2, enable_2
-       ALTERNATIVE_CFG \old_c, \new_c_1, \vendor_id_1, \errata_id_1, \enable_1
+       ALTERNATIVE_CFG "\old_c", "\new_c_1", \vendor_id_1, \errata_id_1, \enable_1
        ALT_NEW_CONTENT \vendor_id_2, \errata_id_2, \enable_2, \new_c_2
 .endm
 
index b865046..4bf6c44 100644 (file)
@@ -326,7 +326,7 @@ clear_bss_done:
        call soc_early_init
        tail start_kernel
 
-#if CONFIG_RISCV_BOOT_SPINWAIT
+#ifdef CONFIG_RISCV_BOOT_SPINWAIT
 .Lsecondary_start:
        /* Set trap vector to spin forever to help debug */
        la a3, .Lsecondary_park
index d73e96f..a20568b 100644 (file)
@@ -71,11 +71,11 @@ bool __kprobes simulate_jalr(u32 opcode, unsigned long addr, struct pt_regs *reg
        u32 rd_index = (opcode >> 7) & 0x1f;
        u32 rs1_index = (opcode >> 15) & 0x1f;
 
-       ret = rv_insn_reg_set_val(regs, rd_index, addr + 4);
+       ret = rv_insn_reg_get_val(regs, rs1_index, &base_addr);
        if (!ret)
                return ret;
 
-       ret = rv_insn_reg_get_val(regs, rs1_index, &base_addr);
+       ret = rv_insn_reg_set_val(regs, rd_index, addr + 4);
        if (!ret)
                return ret;
 
index 3373df4..ddb2afb 100644 (file)
@@ -39,7 +39,6 @@ static DECLARE_COMPLETION(cpu_running);
 
 void __init smp_prepare_boot_cpu(void)
 {
-       init_cpu_topology();
 }
 
 void __init smp_prepare_cpus(unsigned int max_cpus)
@@ -48,6 +47,8 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
        int ret;
        unsigned int curr_cpuid;
 
+       init_cpu_topology();
+
        curr_cpuid = smp_processor_id();
        store_cpu_topology(curr_cpuid);
        numa_store_cpu_info(curr_cpuid);
index dfd2c12..bafdc2b 100644 (file)
@@ -6339,6 +6339,7 @@ __init int intel_pmu_init(void)
                break;
 
        case INTEL_FAM6_SAPPHIRERAPIDS_X:
+       case INTEL_FAM6_EMERALDRAPIDS_X:
                pmem = true;
                x86_pmu.late_ack = true;
                memcpy(hw_cache_event_ids, spr_hw_cache_event_ids, sizeof(hw_cache_event_ids));
index 3019fb1..551741e 100644 (file)
@@ -677,6 +677,7 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = {
        X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X,           &icx_cstates),
        X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D,           &icx_cstates),
        X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X,    &icx_cstates),
+       X86_MATCH_INTEL_FAM6_MODEL(EMERALDRAPIDS_X,     &icx_cstates),
 
        X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L,         &icl_cstates),
        X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE,           &icl_cstates),
index 1f60a2b..fdbb5f0 100644 (file)
@@ -330,7 +330,16 @@ static void __init bp_init_freq_invariance(void)
 
 static void disable_freq_invariance_workfn(struct work_struct *work)
 {
+       int cpu;
+
        static_branch_disable(&arch_scale_freq_key);
+
+       /*
+        * Set arch_freq_scale to a default value on all cpus
+        * This negates the effect of scaling
+        */
+       for_each_possible_cpu(cpu)
+               per_cpu(arch_freq_scale, cpu) = SCHED_CAPACITY_SCALE;
 }
 
 static DECLARE_WORK(disable_freq_invariance_work,
index fc9008d..7eec022 100644 (file)
@@ -3440,18 +3440,15 @@ static u32 vmx_segment_access_rights(struct kvm_segment *var)
 {
        u32 ar;
 
-       if (var->unusable || !var->present)
-               ar = 1 << 16;
-       else {
-               ar = var->type & 15;
-               ar |= (var->s & 1) << 4;
-               ar |= (var->dpl & 3) << 5;
-               ar |= (var->present & 1) << 7;
-               ar |= (var->avl & 1) << 12;
-               ar |= (var->l & 1) << 13;
-               ar |= (var->db & 1) << 14;
-               ar |= (var->g & 1) << 15;
-       }
+       ar = var->type & 15;
+       ar |= (var->s & 1) << 4;
+       ar |= (var->dpl & 3) << 5;
+       ar |= (var->present & 1) << 7;
+       ar |= (var->avl & 1) << 12;
+       ar |= (var->l & 1) << 13;
+       ar |= (var->db & 1) << 14;
+       ar |= (var->g & 1) << 15;
+       ar |= (var->unusable || !var->present) << 16;
 
        return ar;
 }
index 65cec7b..a8c0260 100644 (file)
@@ -110,26 +110,6 @@ static bool nvidia_wmi_ec_supported(void)
 }
 #endif
 
-static bool apple_gmux_backlight_present(void)
-{
-       struct acpi_device *adev;
-       struct device *dev;
-
-       adev = acpi_dev_get_first_match_dev(GMUX_ACPI_HID, NULL, -1);
-       if (!adev)
-               return false;
-
-       dev = acpi_get_first_physical_node(adev);
-       if (!dev)
-               return false;
-
-       /*
-        * drivers/platform/x86/apple-gmux.c only supports old style
-        * Apple GMUX with an IO-resource.
-        */
-       return pnp_get_resource(to_pnp_dev(dev), IORESOURCE_IO, 0) != NULL;
-}
-
 /* Force to use vendor driver when the ACPI device is known to be
  * buggy */
 static int video_detect_force_vendor(const struct dmi_system_id *d)
@@ -612,6 +592,14 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
        },
        {
         .callback = video_detect_force_native,
+        /* Asus U46E */
+        .matches = {
+               DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
+               DMI_MATCH(DMI_PRODUCT_NAME, "U46E"),
+               },
+       },
+       {
+        .callback = video_detect_force_native,
         /* Asus UX303UB */
         .matches = {
                DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
@@ -620,6 +608,23 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
        },
        {
         .callback = video_detect_force_native,
+        /* HP EliteBook 8460p */
+        .matches = {
+               DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+               DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 8460p"),
+               },
+       },
+       {
+        .callback = video_detect_force_native,
+        /* HP Pavilion g6-1d80nr / B4U19UA */
+        .matches = {
+               DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+               DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion g6 Notebook PC"),
+               DMI_MATCH(DMI_PRODUCT_SKU, "B4U19UA"),
+               },
+       },
+       {
+        .callback = video_detect_force_native,
         /* Samsung N150P */
         .matches = {
                DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
@@ -766,6 +771,7 @@ static enum acpi_backlight_type __acpi_video_get_backlight_type(bool native)
 {
        static DEFINE_MUTEX(init_mutex);
        static bool nvidia_wmi_ec_present;
+       static bool apple_gmux_present;
        static bool native_available;
        static bool init_done;
        static long video_caps;
@@ -779,6 +785,7 @@ static enum acpi_backlight_type __acpi_video_get_backlight_type(bool native)
                                    ACPI_UINT32_MAX, find_video, NULL,
                                    &video_caps, NULL);
                nvidia_wmi_ec_present = nvidia_wmi_ec_supported();
+               apple_gmux_present = apple_gmux_detect(NULL, NULL);
                init_done = true;
        }
        if (native)
@@ -800,7 +807,7 @@ static enum acpi_backlight_type __acpi_video_get_backlight_type(bool native)
        if (nvidia_wmi_ec_present)
                return acpi_backlight_nvidia_wmi_ec;
 
-       if (apple_gmux_backlight_present())
+       if (apple_gmux_present)
                return acpi_backlight_apple_gmux;
 
        /* Use ACPI video if available, except when native should be preferred. */
index 878deb4..0689e15 100644 (file)
@@ -34,6 +34,9 @@
 static DEFINE_MUTEX(device_ctls_mutex);
 static LIST_HEAD(edac_device_list);
 
+/* Default workqueue processing interval on this instance, in msecs */
+#define DEFAULT_POLL_INTERVAL 1000
+
 #ifdef CONFIG_EDAC_DEBUG
 static void edac_device_dump_device(struct edac_device_ctl_info *edac_dev)
 {
@@ -336,7 +339,7 @@ static void edac_device_workq_function(struct work_struct *work_req)
         * whole one second to save timers firing all over the period
         * between integral seconds
         */
-       if (edac_dev->poll_msec == 1000)
+       if (edac_dev->poll_msec == DEFAULT_POLL_INTERVAL)
                edac_queue_work(&edac_dev->work, round_jiffies_relative(edac_dev->delay));
        else
                edac_queue_work(&edac_dev->work, edac_dev->delay);
@@ -366,7 +369,7 @@ static void edac_device_workq_setup(struct edac_device_ctl_info *edac_dev,
         * timers firing on sub-second basis, while they are happy
         * to fire together on the 1 second exactly
         */
-       if (edac_dev->poll_msec == 1000)
+       if (edac_dev->poll_msec == DEFAULT_POLL_INTERVAL)
                edac_queue_work(&edac_dev->work, round_jiffies_relative(edac_dev->delay));
        else
                edac_queue_work(&edac_dev->work, edac_dev->delay);
@@ -400,7 +403,7 @@ void edac_device_reset_delay_period(struct edac_device_ctl_info *edac_dev,
        edac_dev->delay     = msecs_to_jiffies(msec);
 
        /* See comment in edac_device_workq_setup() above */
-       if (edac_dev->poll_msec == 1000)
+       if (edac_dev->poll_msec == DEFAULT_POLL_INTERVAL)
                edac_mod_work(&edac_dev->work, round_jiffies_relative(edac_dev->delay));
        else
                edac_mod_work(&edac_dev->work, edac_dev->delay);
@@ -442,11 +445,7 @@ int edac_device_add_device(struct edac_device_ctl_info *edac_dev)
                /* This instance is NOW RUNNING */
                edac_dev->op_state = OP_RUNNING_POLL;
 
-               /*
-                * enable workq processing on this instance,
-                * default = 1000 msec
-                */
-               edac_device_workq_setup(edac_dev, 1000);
+               edac_device_workq_setup(edac_dev, edac_dev->poll_msec ?: DEFAULT_POLL_INTERVAL);
        } else {
                edac_dev->op_state = OP_RUNNING_INTERRUPT;
        }
index 97a27e4..c45519f 100644 (file)
@@ -252,7 +252,7 @@ clear:
 static int
 dump_syn_reg(struct edac_device_ctl_info *edev_ctl, int err_type, u32 bank)
 {
-       struct llcc_drv_data *drv = edev_ctl->pvt_info;
+       struct llcc_drv_data *drv = edev_ctl->dev->platform_data;
        int ret;
 
        ret = dump_syn_reg_values(drv, bank, err_type);
@@ -289,7 +289,7 @@ static irqreturn_t
 llcc_ecc_irq_handler(int irq, void *edev_ctl)
 {
        struct edac_device_ctl_info *edac_dev_ctl = edev_ctl;
-       struct llcc_drv_data *drv = edac_dev_ctl->pvt_info;
+       struct llcc_drv_data *drv = edac_dev_ctl->dev->platform_data;
        irqreturn_t irq_rc = IRQ_NONE;
        u32 drp_error, trp_error, i;
        int ret;
@@ -358,7 +358,6 @@ static int qcom_llcc_edac_probe(struct platform_device *pdev)
        edev_ctl->dev_name = dev_name(dev);
        edev_ctl->ctl_name = "llcc";
        edev_ctl->panic_on_ue = LLCC_ERP_PANIC_ON_UE;
-       edev_ctl->pvt_info = llcc_driv_data;
 
        rc = edac_device_add_device(edev_ctl);
        if (rc)
index 95548c5..077c53c 100644 (file)
@@ -35,6 +35,7 @@ MODULE_FIRMWARE("amdgpu/gc_11_0_0_imu.bin");
 MODULE_FIRMWARE("amdgpu/gc_11_0_1_imu.bin");
 MODULE_FIRMWARE("amdgpu/gc_11_0_2_imu.bin");
 MODULE_FIRMWARE("amdgpu/gc_11_0_3_imu.bin");
+MODULE_FIRMWARE("amdgpu/gc_11_0_4_imu.bin");
 
 static int imu_v11_0_init_microcode(struct amdgpu_device *adev)
 {
index 970b066..5dff79e 100644 (file)
@@ -40,6 +40,8 @@ MODULE_FIRMWARE("amdgpu/gc_11_0_2_mes.bin");
 MODULE_FIRMWARE("amdgpu/gc_11_0_2_mes1.bin");
 MODULE_FIRMWARE("amdgpu/gc_11_0_3_mes.bin");
 MODULE_FIRMWARE("amdgpu/gc_11_0_3_mes1.bin");
+MODULE_FIRMWARE("amdgpu/gc_11_0_4_mes.bin");
+MODULE_FIRMWARE("amdgpu/gc_11_0_4_mes1.bin");
 
 static int mes_v11_0_hw_fini(void *handle);
 static int mes_v11_0_kiq_hw_init(struct amdgpu_device *adev);
@@ -196,7 +198,6 @@ static int mes_v11_0_add_hw_queue(struct amdgpu_mes *mes,
        mes_add_queue_pkt.trap_handler_addr = input->tba_addr;
        mes_add_queue_pkt.tma_addr = input->tma_addr;
        mes_add_queue_pkt.is_kfd_process = input->is_kfd_process;
-       mes_add_queue_pkt.trap_en = 1;
 
        /* For KFD, gds_size is re-used for queue size (needed in MES for AQL queues) */
        mes_add_queue_pkt.is_aql_queue = input->is_aql_queue;
index 4d42033..af37bc6 100644 (file)
@@ -8881,6 +8881,13 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
                if (!dm_old_crtc_state->stream)
                        goto skip_modeset;
 
+               /* Unset freesync video if it was active before */
+               if (dm_old_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED) {
+                       dm_new_crtc_state->freesync_config.state = VRR_STATE_INACTIVE;
+                       dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = 0;
+               }
+
+               /* Now check if we should set freesync video mode */
                if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
                    is_timing_unchanged_for_freesync(new_crtc_state,
                                                     old_crtc_state)) {
@@ -9497,6 +9504,8 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
        bool lock_and_validation_needed = false;
        struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
 #if defined(CONFIG_DRM_AMD_DC_DCN)
+       struct drm_dp_mst_topology_mgr *mgr;
+       struct drm_dp_mst_topology_state *mst_state;
        struct dsc_mst_fairness_vars vars[MAX_PIPES];
 #endif
 
@@ -9745,6 +9754,28 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
                lock_and_validation_needed = true;
        }
 
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+       /* set the slot info for each mst_state based on the link encoding format */
+       for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
+               struct amdgpu_dm_connector *aconnector;
+               struct drm_connector *connector;
+               struct drm_connector_list_iter iter;
+               u8 link_coding_cap;
+
+               drm_connector_list_iter_begin(dev, &iter);
+               drm_for_each_connector_iter(connector, &iter) {
+                       if (connector->index == mst_state->mgr->conn_base_id) {
+                               aconnector = to_amdgpu_dm_connector(connector);
+                               link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
+                               drm_dp_mst_update_slots(mst_state, link_coding_cap);
+
+                               break;
+                       }
+               }
+               drm_connector_list_iter_end(&iter);
+       }
+#endif
+
        /**
         * Streams and planes are reset when there are changes that affect
         * bandwidth. Anything that affects bandwidth needs to go through
index 6994c9a..5cff56b 100644 (file)
@@ -120,23 +120,50 @@ enum dc_edid_status dm_helpers_parse_edid_caps(
 }
 
 static void
-fill_dc_mst_payload_table_from_drm(struct drm_dp_mst_topology_state *mst_state,
-                                  struct amdgpu_dm_connector *aconnector,
+fill_dc_mst_payload_table_from_drm(struct dc_link *link,
+                                  bool enable,
+                                  struct drm_dp_mst_atomic_payload *target_payload,
                                   struct dc_dp_mst_stream_allocation_table *table)
 {
        struct dc_dp_mst_stream_allocation_table new_table = { 0 };
        struct dc_dp_mst_stream_allocation *sa;
-       struct drm_dp_mst_atomic_payload *payload;
+       struct link_mst_stream_allocation_table copy_of_link_table =
+                                                                               link->mst_stream_alloc_table;
+
+       int i;
+       int current_hw_table_stream_cnt = copy_of_link_table.stream_count;
+       struct link_mst_stream_allocation *dc_alloc;
+
+       /* TODO: refactor to set link->mst_stream_alloc_table directly if possible.*/
+       if (enable) {
+               dc_alloc =
+               &copy_of_link_table.stream_allocations[current_hw_table_stream_cnt];
+               dc_alloc->vcp_id = target_payload->vcpi;
+               dc_alloc->slot_count = target_payload->time_slots;
+       } else {
+               for (i = 0; i < copy_of_link_table.stream_count; i++) {
+                       dc_alloc =
+                       &copy_of_link_table.stream_allocations[i];
+
+                       if (dc_alloc->vcp_id == target_payload->vcpi) {
+                               dc_alloc->vcp_id = 0;
+                               dc_alloc->slot_count = 0;
+                               break;
+                       }
+               }
+               ASSERT(i != copy_of_link_table.stream_count);
+       }
 
        /* Fill payload info*/
-       list_for_each_entry(payload, &mst_state->payloads, next) {
-               if (payload->delete)
-                       continue;
-
-               sa = &new_table.stream_allocations[new_table.stream_count];
-               sa->slot_count = payload->time_slots;
-               sa->vcp_id = payload->vcpi;
-               new_table.stream_count++;
+       for (i = 0; i < MAX_CONTROLLER_NUM; i++) {
+               dc_alloc =
+                       &copy_of_link_table.stream_allocations[i];
+               if (dc_alloc->vcp_id > 0 && dc_alloc->slot_count > 0) {
+                       sa = &new_table.stream_allocations[new_table.stream_count];
+                       sa->slot_count = dc_alloc->slot_count;
+                       sa->vcp_id = dc_alloc->vcp_id;
+                       new_table.stream_count++;
+               }
        }
 
        /* Overwrite the old table */
@@ -185,7 +212,7 @@ bool dm_helpers_dp_mst_write_payload_allocation_table(
         * AUX message. The sequence is slot 1-63 allocated sequence for each
         * stream. AMD ASIC stream slot allocation should follow the same
         * sequence. copy DRM MST allocation to dc */
-       fill_dc_mst_payload_table_from_drm(mst_state, aconnector, proposed_table);
+       fill_dc_mst_payload_table_from_drm(stream->link, enable, payload, proposed_table);
 
        return true;
 }
index d7a044e..abdbd43 100644 (file)
@@ -903,11 +903,6 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
        if (IS_ERR(mst_state))
                return PTR_ERR(mst_state);
 
-       mst_state->pbn_div = dm_mst_get_pbn_divider(dc_link);
-#if defined(CONFIG_DRM_AMD_DC_DCN)
-       drm_dp_mst_update_slots(mst_state, dc_link_dp_mst_decide_link_encoding_format(dc_link));
-#endif
-
        /* Set up params */
        for (i = 0; i < dc_state->stream_count; i++) {
                struct dc_dsc_policy dsc_policy = {0};
index 342e906..c88f044 100644 (file)
@@ -3995,10 +3995,13 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
        struct fixed31_32 avg_time_slots_per_mtp = dc_fixpt_from_int(0);
        int i;
        bool mst_mode = (link->type == dc_connection_mst_branch);
+       /* adjust for drm changes*/
+       bool update_drm_mst_state = true;
        const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
        const struct dc_link_settings empty_link_settings = {0};
        DC_LOGGER_INIT(link->ctx->logger);
 
+
        /* deallocate_mst_payload is called before disable link. When mode or
         * disable/enable monitor, new stream is created which is not in link
         * stream[] yet. For this, payload is not allocated yet, so de-alloc
@@ -4014,7 +4017,7 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
                                &empty_link_settings,
                                avg_time_slots_per_mtp);
 
-       if (mst_mode) {
+       if (mst_mode || update_drm_mst_state) {
                /* when link is in mst mode, reply on mst manager to remove
                 * payload
                 */
@@ -4077,11 +4080,18 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
                        stream->ctx,
                        stream);
 
+               if (!update_drm_mst_state)
+                       dm_helpers_dp_mst_send_payload_allocation(
+                               stream->ctx,
+                               stream,
+                               false);
+       }
+
+       if (update_drm_mst_state)
                dm_helpers_dp_mst_send_payload_allocation(
                        stream->ctx,
                        stream,
                        false);
-       }
 
        return DC_OK;
 }
index 4c20d17..cf96c3f 100644 (file)
@@ -145,6 +145,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_0_message_map[SMU_MSG_MAX_COUNT] =
        MSG_MAP(SetBadMemoryPagesRetiredFlagsPerChannel,
                            PPSMC_MSG_SetBadMemoryPagesRetiredFlagsPerChannel,   0),
        MSG_MAP(AllowGpo,                       PPSMC_MSG_SetGpoAllow,           0),
+       MSG_MAP(AllowIHHostInterrupt,           PPSMC_MSG_AllowIHHostInterrupt,       0),
 };
 
 static struct cmn2asic_mapping smu_v13_0_0_clk_map[SMU_CLK_COUNT] = {
index 51a4668..4ca3726 100644 (file)
@@ -3372,6 +3372,9 @@ void drm_dp_remove_payload(struct drm_dp_mst_topology_mgr *mgr,
 
        mgr->payload_count--;
        mgr->next_start_slot -= payload->time_slots;
+
+       if (payload->delete)
+               drm_dp_mst_put_port_malloc(payload->port);
 }
 EXPORT_SYMBOL(drm_dp_remove_payload);
 
@@ -4327,7 +4330,6 @@ int drm_dp_atomic_release_time_slots(struct drm_atomic_state *state,
 
        drm_dbg_atomic(mgr->dev, "[MST PORT:%p] TU %d -> 0\n", port, payload->time_slots);
        if (!payload->delete) {
-               drm_dp_mst_put_port_malloc(port);
                payload->pbn = 0;
                payload->delete = true;
                topology_state->payload_mask &= ~BIT(payload->vcpi - 1);
index ab86956..593aa32 100644 (file)
@@ -171,11 +171,6 @@ static const struct fb_ops drm_fbdev_fb_ops = {
        .fb_imageblit   = drm_fbdev_fb_imageblit,
 };
 
-static struct fb_deferred_io drm_fbdev_defio = {
-       .delay          = HZ / 20,
-       .deferred_io    = drm_fb_helper_deferred_io,
-};
-
 /*
  * This function uses the client API to create a framebuffer backed by a dumb buffer.
  */
@@ -222,8 +217,14 @@ static int drm_fbdev_fb_probe(struct drm_fb_helper *fb_helper,
                        return -ENOMEM;
                fbi->flags |= FBINFO_VIRTFB | FBINFO_READS_FAST;
 
-               fbi->fbdefio = &drm_fbdev_defio;
-               fb_deferred_io_init(fbi);
+               /* Set a default deferred I/O handler */
+               fb_helper->fbdefio.delay = HZ / 20;
+               fb_helper->fbdefio.deferred_io = drm_fb_helper_deferred_io;
+
+               fbi->fbdefio = &fb_helper->fbdefio;
+               ret = fb_deferred_io_init(fbi);
+               if (ret)
+                       return ret;
        } else {
                /* buffer is mapped for HW framebuffer */
                ret = drm_client_buffer_vmap(fb_helper->buffer, &map);
index 7de37f8..83229a0 100644 (file)
@@ -240,27 +240,8 @@ void drm_vma_offset_remove(struct drm_vma_offset_manager *mgr,
 }
 EXPORT_SYMBOL(drm_vma_offset_remove);
 
-/**
- * drm_vma_node_allow - Add open-file to list of allowed users
- * @node: Node to modify
- * @tag: Tag of file to remove
- *
- * Add @tag to the list of allowed open-files for this node. If @tag is
- * already on this list, the ref-count is incremented.
- *
- * The list of allowed-users is preserved across drm_vma_offset_add() and
- * drm_vma_offset_remove() calls. You may even call it if the node is currently
- * not added to any offset-manager.
- *
- * You must remove all open-files the same number of times as you added them
- * before destroying the node. Otherwise, you will leak memory.
- *
- * This is locked against concurrent access internally.
- *
- * RETURNS:
- * 0 on success, negative error code on internal failure (out-of-mem)
- */
-int drm_vma_node_allow(struct drm_vma_offset_node *node, struct drm_file *tag)
+static int vma_node_allow(struct drm_vma_offset_node *node,
+                         struct drm_file *tag, bool ref_counted)
 {
        struct rb_node **iter;
        struct rb_node *parent = NULL;
@@ -282,7 +263,8 @@ int drm_vma_node_allow(struct drm_vma_offset_node *node, struct drm_file *tag)
                entry = rb_entry(*iter, struct drm_vma_offset_file, vm_rb);
 
                if (tag == entry->vm_tag) {
-                       entry->vm_count++;
+                       if (ref_counted)
+                               entry->vm_count++;
                        goto unlock;
                } else if (tag > entry->vm_tag) {
                        iter = &(*iter)->rb_right;
@@ -307,9 +289,59 @@ unlock:
        kfree(new);
        return ret;
 }
+
+/**
+ * drm_vma_node_allow - Add open-file to list of allowed users
+ * @node: Node to modify
+ * @tag: Tag of file to remove
+ *
+ * Add @tag to the list of allowed open-files for this node. If @tag is
+ * already on this list, the ref-count is incremented.
+ *
+ * The list of allowed-users is preserved across drm_vma_offset_add() and
+ * drm_vma_offset_remove() calls. You may even call it if the node is currently
+ * not added to any offset-manager.
+ *
+ * You must remove all open-files the same number of times as you added them
+ * before destroying the node. Otherwise, you will leak memory.
+ *
+ * This is locked against concurrent access internally.
+ *
+ * RETURNS:
+ * 0 on success, negative error code on internal failure (out-of-mem)
+ */
+int drm_vma_node_allow(struct drm_vma_offset_node *node, struct drm_file *tag)
+{
+       return vma_node_allow(node, tag, true);
+}
 EXPORT_SYMBOL(drm_vma_node_allow);
 
 /**
+ * drm_vma_node_allow_once - Add open-file to list of allowed users
+ * @node: Node to modify
+ * @tag: Tag of file to remove
+ *
+ * Add @tag to the list of allowed open-files for this node.
+ *
+ * The list of allowed-users is preserved across drm_vma_offset_add() and
+ * drm_vma_offset_remove() calls. You may even call it if the node is currently
+ * not added to any offset-manager.
+ *
+ * This is not ref-counted unlike drm_vma_node_allow() hence drm_vma_node_revoke()
+ * should only be called once after this.
+ *
+ * This is locked against concurrent access internally.
+ *
+ * RETURNS:
+ * 0 on success, negative error code on internal failure (out-of-mem)
+ */
+int drm_vma_node_allow_once(struct drm_vma_offset_node *node, struct drm_file *tag)
+{
+       return vma_node_allow(node, tag, false);
+}
+EXPORT_SYMBOL(drm_vma_node_allow_once);
+
+/**
  * drm_vma_node_revoke - Remove open-file from list of allowed users
  * @node: Node to modify
  * @tag: Tag of file to remove
index 0ad44f3..c7c252d 100644 (file)
@@ -697,7 +697,7 @@ insert:
        GEM_BUG_ON(lookup_mmo(obj, mmap_type) != mmo);
 out:
        if (file)
-               drm_vma_node_allow(&mmo->vma_node, file);
+               drm_vma_node_allow_once(&mmo->vma_node, file);
        return mmo;
 
 err:
index 7771a19..bbeeb6d 100644 (file)
@@ -288,39 +288,6 @@ static const u8 dg2_xcs_offsets[] = {
        END
 };
 
-static const u8 mtl_xcs_offsets[] = {
-       NOP(1),
-       LRI(13, POSTED),
-       REG16(0x244),
-       REG(0x034),
-       REG(0x030),
-       REG(0x038),
-       REG(0x03c),
-       REG(0x168),
-       REG(0x140),
-       REG(0x110),
-       REG(0x1c0),
-       REG(0x1c4),
-       REG(0x1c8),
-       REG(0x180),
-       REG16(0x2b4),
-       NOP(4),
-
-       NOP(1),
-       LRI(9, POSTED),
-       REG16(0x3a8),
-       REG16(0x28c),
-       REG16(0x288),
-       REG16(0x284),
-       REG16(0x280),
-       REG16(0x27c),
-       REG16(0x278),
-       REG16(0x274),
-       REG16(0x270),
-
-       END
-};
-
 static const u8 gen8_rcs_offsets[] = {
        NOP(1),
        LRI(14, POSTED),
@@ -739,9 +706,7 @@ static const u8 *reg_offsets(const struct intel_engine_cs *engine)
                else
                        return gen8_rcs_offsets;
        } else {
-               if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 70))
-                       return mtl_xcs_offsets;
-               else if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55))
+               if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55))
                        return dg2_xcs_offsets;
                else if (GRAPHICS_VER(engine->i915) >= 12)
                        return gen12_xcs_offsets;
index 310fb83..2990dd4 100644 (file)
@@ -28,8 +28,7 @@ struct intel_engine_cs *intel_selftest_find_any_engine(struct intel_gt *gt)
 
 int intel_selftest_modify_policy(struct intel_engine_cs *engine,
                                 struct intel_selftest_saved_policy *saved,
-                                u32 modify_type)
-
+                                enum selftest_scheduler_modify modify_type)
 {
        int err;
 
old mode 100755 (executable)
new mode 100644 (file)
index c1a633c..e315f66 100644 (file)
@@ -104,7 +104,7 @@ static const struct of_device_id ksz9477_dt_ids[] = {
        },
        {
                .compatible = "microchip,ksz8563",
-               .data = &ksz_switch_chips[KSZ9893]
+               .data = &ksz_switch_chips[KSZ8563]
        },
        {
                .compatible = "microchip,ksz9567",
index 0805f24..c26b859 100644 (file)
@@ -356,7 +356,7 @@ static int adin1110_read_fifo(struct adin1110_port_priv *port_priv)
 
        if ((port_priv->flags & IFF_ALLMULTI && rxb->pkt_type == PACKET_MULTICAST) ||
            (port_priv->flags & IFF_BROADCAST && rxb->pkt_type == PACKET_BROADCAST))
-               rxb->offload_fwd_mark = 1;
+               rxb->offload_fwd_mark = port_priv->priv->forwarding;
 
        netif_rx(rxb);
 
index 59debdc..5874729 100644 (file)
@@ -11166,7 +11166,7 @@ static void tg3_reset_task(struct work_struct *work)
        rtnl_lock();
        tg3_full_lock(tp, 0);
 
-       if (!netif_running(tp->dev)) {
+       if (tp->pcierr_recovery || !netif_running(tp->dev)) {
                tg3_flag_clear(tp, RESET_TASK_PENDING);
                tg3_full_unlock(tp);
                rtnl_unlock();
@@ -18101,6 +18101,9 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
 
        netdev_info(netdev, "PCI I/O error detected\n");
 
+       /* Want to make sure that the reset task doesn't run */
+       tg3_reset_task_cancel(tp);
+
        rtnl_lock();
 
        /* Could be second call or maybe we don't have netdev yet */
@@ -18117,9 +18120,6 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
 
        tg3_timer_stop(tp);
 
-       /* Want to make sure that the reset task doesn't run */
-       tg3_reset_task_cancel(tp);
-
        netif_device_detach(netdev);
 
        /* Clean up software state, even if MMIO is blocked */
index bf0190e..00e2108 100644 (file)
@@ -450,7 +450,7 @@ static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
                /* ring full, shall not happen because queue is stopped if full
                 * below
                 */
-               netif_stop_queue(tx->adapter->netdev);
+               netif_stop_subqueue(tx->adapter->netdev, tx->queue_index);
 
                spin_unlock_irqrestore(&tx->lock, flags);
 
@@ -493,7 +493,7 @@ static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
 
        if (tsnep_tx_desc_available(tx) < (MAX_SKB_FRAGS + 1)) {
                /* ring can get full with next frame */
-               netif_stop_queue(tx->adapter->netdev);
+               netif_stop_subqueue(tx->adapter->netdev, tx->queue_index);
        }
 
        spin_unlock_irqrestore(&tx->lock, flags);
@@ -503,11 +503,14 @@ static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
 
 static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget)
 {
+       struct tsnep_tx_entry *entry;
+       struct netdev_queue *nq;
        unsigned long flags;
        int budget = 128;
-       struct tsnep_tx_entry *entry;
-       int count;
        int length;
+       int count;
+
+       nq = netdev_get_tx_queue(tx->adapter->netdev, tx->queue_index);
 
        spin_lock_irqsave(&tx->lock, flags);
 
@@ -564,8 +567,8 @@ static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget)
        } while (likely(budget));
 
        if ((tsnep_tx_desc_available(tx) >= ((MAX_SKB_FRAGS + 1) * 2)) &&
-           netif_queue_stopped(tx->adapter->netdev)) {
-               netif_wake_queue(tx->adapter->netdev);
+           netif_tx_queue_stopped(nq)) {
+               netif_tx_wake_queue(nq);
        }
 
        spin_unlock_irqrestore(&tx->lock, flags);
index 644f3c9..2341597 100644 (file)
@@ -3191,7 +3191,7 @@ static void fec_enet_free_buffers(struct net_device *ndev)
        for (q = 0; q < fep->num_rx_queues; q++) {
                rxq = fep->rx_queue[q];
                for (i = 0; i < rxq->bd.ring_size; i++)
-                       page_pool_release_page(rxq->page_pool, rxq->rx_skb_info[i].page);
+                       page_pool_put_full_page(rxq->page_pool, rxq->rx_skb_info[i].page, false);
 
                for (i = 0; i < XDP_STATS_TOTAL; i++)
                        rxq->stats[i] = 0;
index 0d1bab4..2a9f1ee 100644 (file)
@@ -249,6 +249,7 @@ struct iavf_cloud_filter {
 
 /* board specific private data structure */
 struct iavf_adapter {
+       struct workqueue_struct *wq;
        struct work_struct reset_task;
        struct work_struct adminq_task;
        struct delayed_work client_task;
@@ -459,7 +460,6 @@ struct iavf_device {
 
 /* needed by iavf_ethtool.c */
 extern char iavf_driver_name[];
-extern struct workqueue_struct *iavf_wq;
 
 static inline const char *iavf_state_str(enum iavf_state_t state)
 {
index d79ead5..6f171d1 100644 (file)
@@ -532,7 +532,7 @@ static int iavf_set_priv_flags(struct net_device *netdev, u32 flags)
        if (changed_flags & IAVF_FLAG_LEGACY_RX) {
                if (netif_running(netdev)) {
                        adapter->flags |= IAVF_FLAG_RESET_NEEDED;
-                       queue_work(iavf_wq, &adapter->reset_task);
+                       queue_work(adapter->wq, &adapter->reset_task);
                }
        }
 
@@ -672,7 +672,7 @@ static int iavf_set_ringparam(struct net_device *netdev,
 
        if (netif_running(netdev)) {
                adapter->flags |= IAVF_FLAG_RESET_NEEDED;
-               queue_work(iavf_wq, &adapter->reset_task);
+               queue_work(adapter->wq, &adapter->reset_task);
        }
 
        return 0;
@@ -1433,7 +1433,7 @@ static int iavf_add_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx
        adapter->aq_required |= IAVF_FLAG_AQ_ADD_FDIR_FILTER;
        spin_unlock_bh(&adapter->fdir_fltr_lock);
 
-       mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
+       mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
 
 ret:
        if (err && fltr)
@@ -1474,7 +1474,7 @@ static int iavf_del_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx
        spin_unlock_bh(&adapter->fdir_fltr_lock);
 
        if (fltr && fltr->state == IAVF_FDIR_FLTR_DEL_REQUEST)
-               mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
+               mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
 
        return err;
 }
@@ -1658,7 +1658,7 @@ iavf_set_adv_rss_hash_opt(struct iavf_adapter *adapter,
        spin_unlock_bh(&adapter->adv_rss_lock);
 
        if (!err)
-               mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
+               mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
 
        mutex_unlock(&adapter->crit_lock);
 
index adc02ad..4b09785 100644 (file)
@@ -49,7 +49,6 @@ MODULE_DESCRIPTION("Intel(R) Ethernet Adaptive Virtual Function Network Driver")
 MODULE_LICENSE("GPL v2");
 
 static const struct net_device_ops iavf_netdev_ops;
-struct workqueue_struct *iavf_wq;
 
 int iavf_status_to_errno(enum iavf_status status)
 {
@@ -277,7 +276,7 @@ void iavf_schedule_reset(struct iavf_adapter *adapter)
        if (!(adapter->flags &
              (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED))) {
                adapter->flags |= IAVF_FLAG_RESET_NEEDED;
-               queue_work(iavf_wq, &adapter->reset_task);
+               queue_work(adapter->wq, &adapter->reset_task);
        }
 }
 
@@ -291,7 +290,7 @@ void iavf_schedule_reset(struct iavf_adapter *adapter)
 void iavf_schedule_request_stats(struct iavf_adapter *adapter)
 {
        adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_STATS;
-       mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
+       mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
 }
 
 /**
@@ -411,7 +410,7 @@ static irqreturn_t iavf_msix_aq(int irq, void *data)
 
        if (adapter->state != __IAVF_REMOVE)
                /* schedule work on the private workqueue */
-               queue_work(iavf_wq, &adapter->adminq_task);
+               queue_work(adapter->wq, &adapter->adminq_task);
 
        return IRQ_HANDLED;
 }
@@ -1034,7 +1033,7 @@ int iavf_replace_primary_mac(struct iavf_adapter *adapter,
 
        /* schedule the watchdog task to immediately process the request */
        if (f) {
-               queue_work(iavf_wq, &adapter->watchdog_task.work);
+               mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
                return 0;
        }
        return -ENOMEM;
@@ -1257,7 +1256,7 @@ static void iavf_up_complete(struct iavf_adapter *adapter)
        adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_QUEUES;
        if (CLIENT_ENABLED(adapter))
                adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_OPEN;
-       mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
+       mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
 }
 
 /**
@@ -1414,7 +1413,7 @@ void iavf_down(struct iavf_adapter *adapter)
                adapter->aq_required |= IAVF_FLAG_AQ_DISABLE_QUEUES;
        }
 
-       mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
+       mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
 }
 
 /**
@@ -2248,7 +2247,7 @@ iavf_set_vlan_offload_features(struct iavf_adapter *adapter,
 
        if (aq_required) {
                adapter->aq_required |= aq_required;
-               mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
+               mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
        }
 }
 
@@ -2693,6 +2692,15 @@ static void iavf_watchdog_task(struct work_struct *work)
                goto restart_watchdog;
        }
 
+       if ((adapter->flags & IAVF_FLAG_SETUP_NETDEV_FEATURES) &&
+           adapter->netdev_registered &&
+           !test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section) &&
+           rtnl_trylock()) {
+               netdev_update_features(adapter->netdev);
+               rtnl_unlock();
+               adapter->flags &= ~IAVF_FLAG_SETUP_NETDEV_FEATURES;
+       }
+
        if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
                iavf_change_state(adapter, __IAVF_COMM_FAILED);
 
@@ -2700,7 +2708,7 @@ static void iavf_watchdog_task(struct work_struct *work)
                adapter->aq_required = 0;
                adapter->current_op = VIRTCHNL_OP_UNKNOWN;
                mutex_unlock(&adapter->crit_lock);
-               queue_work(iavf_wq, &adapter->reset_task);
+               queue_work(adapter->wq, &adapter->reset_task);
                return;
        }
 
@@ -2708,31 +2716,31 @@ static void iavf_watchdog_task(struct work_struct *work)
        case __IAVF_STARTUP:
                iavf_startup(adapter);
                mutex_unlock(&adapter->crit_lock);
-               queue_delayed_work(iavf_wq, &adapter->watchdog_task,
+               queue_delayed_work(adapter->wq, &adapter->watchdog_task,
                                   msecs_to_jiffies(30));
                return;
        case __IAVF_INIT_VERSION_CHECK:
                iavf_init_version_check(adapter);
                mutex_unlock(&adapter->crit_lock);
-               queue_delayed_work(iavf_wq, &adapter->watchdog_task,
+               queue_delayed_work(adapter->wq, &adapter->watchdog_task,
                                   msecs_to_jiffies(30));
                return;
        case __IAVF_INIT_GET_RESOURCES:
                iavf_init_get_resources(adapter);
                mutex_unlock(&adapter->crit_lock);
-               queue_delayed_work(iavf_wq, &adapter->watchdog_task,
+               queue_delayed_work(adapter->wq, &adapter->watchdog_task,
                                   msecs_to_jiffies(1));
                return;
        case __IAVF_INIT_EXTENDED_CAPS:
                iavf_init_process_extended_caps(adapter);
                mutex_unlock(&adapter->crit_lock);
-               queue_delayed_work(iavf_wq, &adapter->watchdog_task,
+               queue_delayed_work(adapter->wq, &adapter->watchdog_task,
                                   msecs_to_jiffies(1));
                return;
        case __IAVF_INIT_CONFIG_ADAPTER:
                iavf_init_config_adapter(adapter);
                mutex_unlock(&adapter->crit_lock);
-               queue_delayed_work(iavf_wq, &adapter->watchdog_task,
+               queue_delayed_work(adapter->wq, &adapter->watchdog_task,
                                   msecs_to_jiffies(1));
                return;
        case __IAVF_INIT_FAILED:
@@ -2751,14 +2759,14 @@ static void iavf_watchdog_task(struct work_struct *work)
                        adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED;
                        iavf_shutdown_adminq(hw);
                        mutex_unlock(&adapter->crit_lock);
-                       queue_delayed_work(iavf_wq,
+                       queue_delayed_work(adapter->wq,
                                           &adapter->watchdog_task, (5 * HZ));
                        return;
                }
                /* Try again from failed step*/
                iavf_change_state(adapter, adapter->last_state);
                mutex_unlock(&adapter->crit_lock);
-               queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ);
+               queue_delayed_work(adapter->wq, &adapter->watchdog_task, HZ);
                return;
        case __IAVF_COMM_FAILED:
                if (test_bit(__IAVF_IN_REMOVE_TASK,
@@ -2789,13 +2797,14 @@ static void iavf_watchdog_task(struct work_struct *work)
                adapter->aq_required = 0;
                adapter->current_op = VIRTCHNL_OP_UNKNOWN;
                mutex_unlock(&adapter->crit_lock);
-               queue_delayed_work(iavf_wq,
+               queue_delayed_work(adapter->wq,
                                   &adapter->watchdog_task,
                                   msecs_to_jiffies(10));
                return;
        case __IAVF_RESETTING:
                mutex_unlock(&adapter->crit_lock);
-               queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2);
+               queue_delayed_work(adapter->wq, &adapter->watchdog_task,
+                                  HZ * 2);
                return;
        case __IAVF_DOWN:
        case __IAVF_DOWN_PENDING:
@@ -2834,9 +2843,9 @@ static void iavf_watchdog_task(struct work_struct *work)
                adapter->aq_required = 0;
                adapter->current_op = VIRTCHNL_OP_UNKNOWN;
                dev_err(&adapter->pdev->dev, "Hardware reset detected\n");
-               queue_work(iavf_wq, &adapter->reset_task);
+               queue_work(adapter->wq, &adapter->reset_task);
                mutex_unlock(&adapter->crit_lock);
-               queue_delayed_work(iavf_wq,
+               queue_delayed_work(adapter->wq,
                                   &adapter->watchdog_task, HZ * 2);
                return;
        }
@@ -2845,12 +2854,13 @@ static void iavf_watchdog_task(struct work_struct *work)
        mutex_unlock(&adapter->crit_lock);
 restart_watchdog:
        if (adapter->state >= __IAVF_DOWN)
-               queue_work(iavf_wq, &adapter->adminq_task);
+               queue_work(adapter->wq, &adapter->adminq_task);
        if (adapter->aq_required)
-               queue_delayed_work(iavf_wq, &adapter->watchdog_task,
+               queue_delayed_work(adapter->wq, &adapter->watchdog_task,
                                   msecs_to_jiffies(20));
        else
-               queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2);
+               queue_delayed_work(adapter->wq, &adapter->watchdog_task,
+                                  HZ * 2);
 }
 
 /**
@@ -2952,7 +2962,7 @@ static void iavf_reset_task(struct work_struct *work)
         */
        if (!mutex_trylock(&adapter->crit_lock)) {
                if (adapter->state != __IAVF_REMOVE)
-                       queue_work(iavf_wq, &adapter->reset_task);
+                       queue_work(adapter->wq, &adapter->reset_task);
 
                goto reset_finish;
        }
@@ -3116,7 +3126,7 @@ continue_reset:
        bitmap_clear(adapter->vsi.active_cvlans, 0, VLAN_N_VID);
        bitmap_clear(adapter->vsi.active_svlans, 0, VLAN_N_VID);
 
-       mod_delayed_work(iavf_wq, &adapter->watchdog_task, 2);
+       mod_delayed_work(adapter->wq, &adapter->watchdog_task, 2);
 
        /* We were running when the reset started, so we need to restore some
         * state here.
@@ -3208,7 +3218,7 @@ static void iavf_adminq_task(struct work_struct *work)
                if (adapter->state == __IAVF_REMOVE)
                        return;
 
-               queue_work(iavf_wq, &adapter->adminq_task);
+               queue_work(adapter->wq, &adapter->adminq_task);
                goto out;
        }
 
@@ -3232,24 +3242,6 @@ static void iavf_adminq_task(struct work_struct *work)
        } while (pending);
        mutex_unlock(&adapter->crit_lock);
 
-       if ((adapter->flags & IAVF_FLAG_SETUP_NETDEV_FEATURES)) {
-               if (adapter->netdev_registered ||
-                   !test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section)) {
-                       struct net_device *netdev = adapter->netdev;
-
-                       rtnl_lock();
-                       netdev_update_features(netdev);
-                       rtnl_unlock();
-                       /* Request VLAN offload settings */
-                       if (VLAN_V2_ALLOWED(adapter))
-                               iavf_set_vlan_offload_features
-                                       (adapter, 0, netdev->features);
-
-                       iavf_set_queue_vlan_tag_loc(adapter);
-               }
-
-               adapter->flags &= ~IAVF_FLAG_SETUP_NETDEV_FEATURES;
-       }
        if ((adapter->flags &
             (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED)) ||
            adapter->state == __IAVF_RESETTING)
@@ -4349,7 +4341,7 @@ static int iavf_change_mtu(struct net_device *netdev, int new_mtu)
 
        if (netif_running(netdev)) {
                adapter->flags |= IAVF_FLAG_RESET_NEEDED;
-               queue_work(iavf_wq, &adapter->reset_task);
+               queue_work(adapter->wq, &adapter->reset_task);
        }
 
        return 0;
@@ -4898,6 +4890,13 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        hw = &adapter->hw;
        hw->back = adapter;
 
+       adapter->wq = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM,
+                                             iavf_driver_name);
+       if (!adapter->wq) {
+               err = -ENOMEM;
+               goto err_alloc_wq;
+       }
+
        adapter->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
        iavf_change_state(adapter, __IAVF_STARTUP);
 
@@ -4942,7 +4941,7 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        INIT_WORK(&adapter->adminq_task, iavf_adminq_task);
        INIT_DELAYED_WORK(&adapter->watchdog_task, iavf_watchdog_task);
        INIT_DELAYED_WORK(&adapter->client_task, iavf_client_task);
-       queue_delayed_work(iavf_wq, &adapter->watchdog_task,
+       queue_delayed_work(adapter->wq, &adapter->watchdog_task,
                           msecs_to_jiffies(5 * (pdev->devfn & 0x07)));
 
        /* Setup the wait queue for indicating transition to down status */
@@ -4954,6 +4953,8 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        return 0;
 
 err_ioremap:
+       destroy_workqueue(adapter->wq);
+err_alloc_wq:
        free_netdev(netdev);
 err_alloc_etherdev:
        pci_disable_pcie_error_reporting(pdev);
@@ -5023,7 +5024,7 @@ static int __maybe_unused iavf_resume(struct device *dev_d)
                return err;
        }
 
-       queue_work(iavf_wq, &adapter->reset_task);
+       queue_work(adapter->wq, &adapter->reset_task);
 
        netif_device_attach(adapter->netdev);
 
@@ -5170,6 +5171,8 @@ static void iavf_remove(struct pci_dev *pdev)
        }
        spin_unlock_bh(&adapter->adv_rss_lock);
 
+       destroy_workqueue(adapter->wq);
+
        free_netdev(netdev);
 
        pci_disable_pcie_error_reporting(pdev);
@@ -5196,24 +5199,11 @@ static struct pci_driver iavf_driver = {
  **/
 static int __init iavf_init_module(void)
 {
-       int ret;
-
        pr_info("iavf: %s\n", iavf_driver_string);
 
        pr_info("%s\n", iavf_copyright);
 
-       iavf_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1,
-                                 iavf_driver_name);
-       if (!iavf_wq) {
-               pr_err("%s: Failed to create workqueue\n", iavf_driver_name);
-               return -ENOMEM;
-       }
-
-       ret = pci_register_driver(&iavf_driver);
-       if (ret)
-               destroy_workqueue(iavf_wq);
-
-       return ret;
+       return pci_register_driver(&iavf_driver);
 }
 
 module_init(iavf_init_module);
@@ -5227,7 +5217,6 @@ module_init(iavf_init_module);
 static void __exit iavf_exit_module(void)
 {
        pci_unregister_driver(&iavf_driver);
-       destroy_workqueue(iavf_wq);
 }
 
 module_exit(iavf_exit_module);
index 24a701f..365ca0c 100644 (file)
@@ -1952,7 +1952,7 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
                        if (!(adapter->flags & IAVF_FLAG_RESET_PENDING)) {
                                adapter->flags |= IAVF_FLAG_RESET_PENDING;
                                dev_info(&adapter->pdev->dev, "Scheduling reset task\n");
-                               queue_work(iavf_wq, &adapter->reset_task);
+                               queue_work(adapter->wq, &adapter->reset_task);
                        }
                        break;
                default:
@@ -2226,6 +2226,14 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
 
                iavf_process_config(adapter);
                adapter->flags |= IAVF_FLAG_SETUP_NETDEV_FEATURES;
+
+               /* Request VLAN offload settings */
+               if (VLAN_V2_ALLOWED(adapter))
+                       iavf_set_vlan_offload_features(adapter, 0,
+                                                      netdev->features);
+
+               iavf_set_queue_vlan_tag_loc(adapter);
+
                was_mac_changed = !ether_addr_equal(netdev->dev_addr,
                                                    adapter->hw.mac.addr);
 
index 94aa834..a596e07 100644 (file)
@@ -3235,9 +3235,6 @@ int ice_vsi_release(struct ice_vsi *vsi)
                }
        }
 
-       if (vsi->type == ICE_VSI_PF)
-               ice_devlink_destroy_pf_port(pf);
-
        if (vsi->type == ICE_VSI_VF &&
            vsi->agg_node && vsi->agg_node->valid)
                vsi->agg_node->num_vsis--;
index a9a7f8b..237ede2 100644 (file)
@@ -4590,7 +4590,7 @@ static void ice_print_wake_reason(struct ice_pf *pf)
 }
 
 /**
- * ice_register_netdev - register netdev and devlink port
+ * ice_register_netdev - register netdev
  * @pf: pointer to the PF struct
  */
 static int ice_register_netdev(struct ice_pf *pf)
@@ -4602,11 +4602,6 @@ static int ice_register_netdev(struct ice_pf *pf)
        if (!vsi || !vsi->netdev)
                return -EIO;
 
-       err = ice_devlink_create_pf_port(pf);
-       if (err)
-               goto err_devlink_create;
-
-       SET_NETDEV_DEVLINK_PORT(vsi->netdev, &pf->devlink_port);
        err = register_netdev(vsi->netdev);
        if (err)
                goto err_register_netdev;
@@ -4617,8 +4612,6 @@ static int ice_register_netdev(struct ice_pf *pf)
 
        return 0;
 err_register_netdev:
-       ice_devlink_destroy_pf_port(pf);
-err_devlink_create:
        free_netdev(vsi->netdev);
        vsi->netdev = NULL;
        clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
@@ -4636,6 +4629,7 @@ static int
 ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
 {
        struct device *dev = &pdev->dev;
+       struct ice_vsi *vsi;
        struct ice_pf *pf;
        struct ice_hw *hw;
        int i, err;
@@ -4918,6 +4912,18 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
        pcie_print_link_status(pf->pdev);
 
 probe_done:
+       err = ice_devlink_create_pf_port(pf);
+       if (err)
+               goto err_create_pf_port;
+
+       vsi = ice_get_main_vsi(pf);
+       if (!vsi || !vsi->netdev) {
+               err = -EINVAL;
+               goto err_netdev_reg;
+       }
+
+       SET_NETDEV_DEVLINK_PORT(vsi->netdev, &pf->devlink_port);
+
        err = ice_register_netdev(pf);
        if (err)
                goto err_netdev_reg;
@@ -4955,6 +4961,8 @@ err_init_aux_unroll:
 err_devlink_reg_param:
        ice_devlink_unregister_params(pf);
 err_netdev_reg:
+       ice_devlink_destroy_pf_port(pf);
+err_create_pf_port:
 err_send_version_unroll:
        ice_vsi_release_all(pf);
 err_alloc_sw_unroll:
@@ -5083,6 +5091,7 @@ static void ice_remove(struct pci_dev *pdev)
        ice_setup_mc_magic_wake(pf);
        ice_vsi_release_all(pf);
        mutex_destroy(&(&pf->hw)->fdir_fltr_lock);
+       ice_devlink_destroy_pf_port(pf);
        ice_set_wake(pf);
        ice_free_irq_msix_misc(pf);
        ice_for_each_vsi(pf, i) {
index e708c2d..b144f22 100644 (file)
@@ -1259,13 +1259,20 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev)
                gic->handler = NULL;
                gic->arg = NULL;
 
+               if (!i)
+                       snprintf(gic->name, MANA_IRQ_NAME_SZ, "mana_hwc@pci:%s",
+                                pci_name(pdev));
+               else
+                       snprintf(gic->name, MANA_IRQ_NAME_SZ, "mana_q%d@pci:%s",
+                                i - 1, pci_name(pdev));
+
                irq = pci_irq_vector(pdev, i);
                if (irq < 0) {
                        err = irq;
                        goto free_mask;
                }
 
-               err = request_irq(irq, mana_gd_intr, 0, "mana_intr", gic);
+               err = request_irq(irq, mana_gd_intr, 0, gic->name, gic);
                if (err)
                        goto free_mask;
                irq_set_affinity_and_hint(irq, req_mask);
index b4e0fc7..0f54849 100644 (file)
@@ -1101,14 +1101,14 @@ static void ravb_error_interrupt(struct net_device *ndev)
        ravb_write(ndev, ~(EIS_QFS | EIS_RESERVED), EIS);
        if (eis & EIS_QFS) {
                ris2 = ravb_read(ndev, RIS2);
-               ravb_write(ndev, ~(RIS2_QFF0 | RIS2_RFFF | RIS2_RESERVED),
+               ravb_write(ndev, ~(RIS2_QFF0 | RIS2_QFF1 | RIS2_RFFF | RIS2_RESERVED),
                           RIS2);
 
                /* Receive Descriptor Empty int */
                if (ris2 & RIS2_QFF0)
                        priv->stats[RAVB_BE].rx_over_errors++;
 
-                   /* Receive Descriptor Empty int */
+               /* Receive Descriptor Empty int */
                if (ris2 & RIS2_QFF1)
                        priv->stats[RAVB_NC].rx_over_errors++;
 
@@ -2973,6 +2973,9 @@ static int __maybe_unused ravb_suspend(struct device *dev)
        else
                ret = ravb_close(ndev);
 
+       if (priv->info->ccc_gac)
+               ravb_ptp_stop(ndev);
+
        return ret;
 }
 
@@ -3011,6 +3014,9 @@ static int __maybe_unused ravb_resume(struct device *dev)
        /* Restore descriptor base address table */
        ravb_write(ndev, priv->desc_bat_dma, DBAT);
 
+       if (priv->info->ccc_gac)
+               ravb_ptp_init(ndev, priv->pdev);
+
        if (netif_running(ndev)) {
                if (priv->wol_enabled) {
                        ret = ravb_wol_restore(ndev);
index 6441892..2370c77 100644 (file)
@@ -1074,8 +1074,11 @@ static struct device_node *rswitch_get_port_node(struct rswitch_device *rdev)
                        port = NULL;
                        goto out;
                }
-               if (index == rdev->etha->index)
+               if (index == rdev->etha->index) {
+                       if (!of_device_is_available(port))
+                               port = NULL;
                        break;
+               }
        }
 
 out:
@@ -1106,7 +1109,7 @@ static int rswitch_etha_get_params(struct rswitch_device *rdev)
 
        port = rswitch_get_port_node(rdev);
        if (!port)
-               return -ENODEV;
+               return 0;       /* ignored */
 
        err = of_get_phy_mode(port, &rdev->etha->phy_interface);
        of_node_put(port);
@@ -1324,13 +1327,13 @@ static int rswitch_ether_port_init_all(struct rswitch_private *priv)
 {
        int i, err;
 
-       for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
+       rswitch_for_each_enabled_port(priv, i) {
                err = rswitch_ether_port_init_one(priv->rdev[i]);
                if (err)
                        goto err_init_one;
        }
 
-       for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
+       rswitch_for_each_enabled_port(priv, i) {
                err = rswitch_serdes_init(priv->rdev[i]);
                if (err)
                        goto err_serdes;
@@ -1339,12 +1342,12 @@ static int rswitch_ether_port_init_all(struct rswitch_private *priv)
        return 0;
 
 err_serdes:
-       for (i--; i >= 0; i--)
+       rswitch_for_each_enabled_port_continue_reverse(priv, i)
                rswitch_serdes_deinit(priv->rdev[i]);
        i = RSWITCH_NUM_PORTS;
 
 err_init_one:
-       for (i--; i >= 0; i--)
+       rswitch_for_each_enabled_port_continue_reverse(priv, i)
                rswitch_ether_port_deinit_one(priv->rdev[i]);
 
        return err;
@@ -1608,6 +1611,7 @@ static int rswitch_device_alloc(struct rswitch_private *priv, int index)
        netif_napi_add(ndev, &rdev->napi, rswitch_poll);
 
        port = rswitch_get_port_node(rdev);
+       rdev->disabled = !port;
        err = of_get_ethdev_address(port, ndev);
        of_node_put(port);
        if (err) {
@@ -1707,16 +1711,16 @@ static int rswitch_init(struct rswitch_private *priv)
        if (err)
                goto err_ether_port_init_all;
 
-       for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
+       rswitch_for_each_enabled_port(priv, i) {
                err = register_netdev(priv->rdev[i]->ndev);
                if (err) {
-                       for (i--; i >= 0; i--)
+                       rswitch_for_each_enabled_port_continue_reverse(priv, i)
                                unregister_netdev(priv->rdev[i]->ndev);
                        goto err_register_netdev;
                }
        }
 
-       for (i = 0; i < RSWITCH_NUM_PORTS; i++)
+       rswitch_for_each_enabled_port(priv, i)
                netdev_info(priv->rdev[i]->ndev, "MAC address %pM\n",
                            priv->rdev[i]->ndev->dev_addr);
 
index edbdd1b..49efb0f 100644 (file)
 #define RSWITCH_MAX_NUM_QUEUES 128
 
 #define RSWITCH_NUM_PORTS      3
+#define rswitch_for_each_enabled_port(priv, i)         \
+       for (i = 0; i < RSWITCH_NUM_PORTS; i++)         \
+               if (priv->rdev[i]->disabled)            \
+                       continue;                       \
+               else
+
+#define rswitch_for_each_enabled_port_continue_reverse(priv, i)        \
+       for (i--; i >= 0; i--)                                  \
+               if (priv->rdev[i]->disabled)                    \
+                       continue;                               \
+               else
 
 #define TX_RING_SIZE           1024
 #define RX_RING_SIZE           1024
@@ -938,6 +949,7 @@ struct rswitch_device {
        struct rswitch_gwca_queue *tx_queue;
        struct rswitch_gwca_queue *rx_queue;
        u8 ts_tag;
+       bool disabled;
 
        int port;
        struct rswitch_etha *etha;
index 4a2e94f..c4542ec 100644 (file)
@@ -4,6 +4,7 @@
  */
 
 #include <linux/bitfield.h>
+#include <linux/delay.h>
 #include <linux/clk.h>
 #include <linux/clk-provider.h>
 #include <linux/device.h>
@@ -150,6 +151,7 @@ static const struct clk_ops g12a_ephy_pll_ops = {
 
 static int g12a_enable_internal_mdio(struct g12a_mdio_mux *priv)
 {
+       u32 value;
        int ret;
 
        /* Enable the phy clock */
@@ -163,18 +165,25 @@ static int g12a_enable_internal_mdio(struct g12a_mdio_mux *priv)
 
        /* Initialize ephy control */
        writel(EPHY_G12A_ID, priv->regs + ETH_PHY_CNTL0);
-       writel(FIELD_PREP(PHY_CNTL1_ST_MODE, 3) |
-              FIELD_PREP(PHY_CNTL1_ST_PHYADD, EPHY_DFLT_ADD) |
-              FIELD_PREP(PHY_CNTL1_MII_MODE, EPHY_MODE_RMII) |
-              PHY_CNTL1_CLK_EN |
-              PHY_CNTL1_CLKFREQ |
-              PHY_CNTL1_PHY_ENB,
-              priv->regs + ETH_PHY_CNTL1);
+
+       /* Make sure we get a 0 -> 1 transition on the enable bit */
+       value = FIELD_PREP(PHY_CNTL1_ST_MODE, 3) |
+               FIELD_PREP(PHY_CNTL1_ST_PHYADD, EPHY_DFLT_ADD) |
+               FIELD_PREP(PHY_CNTL1_MII_MODE, EPHY_MODE_RMII) |
+               PHY_CNTL1_CLK_EN |
+               PHY_CNTL1_CLKFREQ;
+       writel(value, priv->regs + ETH_PHY_CNTL1);
        writel(PHY_CNTL2_USE_INTERNAL |
               PHY_CNTL2_SMI_SRC_MAC |
               PHY_CNTL2_RX_CLK_EPHY,
               priv->regs + ETH_PHY_CNTL2);
 
+       value |= PHY_CNTL1_PHY_ENB;
+       writel(value, priv->regs + ETH_PHY_CNTL1);
+
+       /* The phy needs a bit of time to power up */
+       mdelay(10);
+
        return 0;
 }
 
index b80a9b7..1deb61b 100644 (file)
@@ -1576,7 +1576,6 @@ static int arm_cmn_event_init(struct perf_event *event)
                        hw->dn++;
                        continue;
                }
-               hw->dtcs_used |= arm_cmn_node_to_xp(cmn, dn)->dtc;
                hw->num_dns++;
                if (bynodeid)
                        break;
@@ -1589,6 +1588,12 @@ static int arm_cmn_event_init(struct perf_event *event)
                        nodeid, nid.x, nid.y, nid.port, nid.dev, type);
                return -EINVAL;
        }
+       /*
+        * Keep assuming non-cycles events count in all DTC domains; turns out
+        * it's hard to make a worthwhile optimisation around this, short of
+        * going all-in with domain-local counter allocation as well.
+        */
+       hw->dtcs_used = (1U << cmn->num_dtcs) - 1;
 
        return arm_cmn_validate_group(cmn, event);
 }
index 8d92498..3cbb01e 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/pci.h>
 #include <linux/platform_device.h>
 #include <linux/rtc.h>
+#include <linux/serio.h>
 #include <linux/suspend.h>
 #include <linux/seq_file.h>
 #include <linux/uaccess.h>
@@ -160,6 +161,10 @@ static bool enable_stb;
 module_param(enable_stb, bool, 0644);
 MODULE_PARM_DESC(enable_stb, "Enable the STB debug mechanism");
 
+static bool disable_workarounds;
+module_param(disable_workarounds, bool, 0644);
+MODULE_PARM_DESC(disable_workarounds, "Disable workarounds for platform bugs");
+
 static struct amd_pmc_dev pmc;
 static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, u32 arg, u32 *data, u8 msg, bool ret);
 static int amd_pmc_read_stb(struct amd_pmc_dev *dev, u32 *buf);
@@ -653,6 +658,33 @@ static int amd_pmc_get_os_hint(struct amd_pmc_dev *dev)
        return -EINVAL;
 }
 
+static int amd_pmc_czn_wa_irq1(struct amd_pmc_dev *pdev)
+{
+       struct device *d;
+       int rc;
+
+       if (!pdev->major) {
+               rc = amd_pmc_get_smu_version(pdev);
+               if (rc)
+                       return rc;
+       }
+
+       if (pdev->major > 64 || (pdev->major == 64 && pdev->minor > 65))
+               return 0;
+
+       d = bus_find_device_by_name(&serio_bus, NULL, "serio0");
+       if (!d)
+               return 0;
+       if (device_may_wakeup(d)) {
+               dev_info_once(d, "Disabling IRQ1 wakeup source to avoid platform firmware bug\n");
+               disable_irq_wake(1);
+               device_set_wakeup_enable(d, false);
+       }
+       put_device(d);
+
+       return 0;
+}
+
 static int amd_pmc_verify_czn_rtc(struct amd_pmc_dev *pdev, u32 *arg)
 {
        struct rtc_device *rtc_device;
@@ -715,8 +747,8 @@ static void amd_pmc_s2idle_prepare(void)
        /* Reset and Start SMU logging - to monitor the s0i3 stats */
        amd_pmc_setup_smu_logging(pdev);
 
-       /* Activate CZN specific RTC functionality */
-       if (pdev->cpu_id == AMD_CPU_ID_CZN) {
+       /* Activate CZN specific platform bug workarounds */
+       if (pdev->cpu_id == AMD_CPU_ID_CZN && !disable_workarounds) {
                rc = amd_pmc_verify_czn_rtc(pdev, &arg);
                if (rc) {
                        dev_err(pdev->dev, "failed to set RTC: %d\n", rc);
@@ -782,6 +814,25 @@ static struct acpi_s2idle_dev_ops amd_pmc_s2idle_dev_ops = {
        .check = amd_pmc_s2idle_check,
        .restore = amd_pmc_s2idle_restore,
 };
+
+static int __maybe_unused amd_pmc_suspend_handler(struct device *dev)
+{
+       struct amd_pmc_dev *pdev = dev_get_drvdata(dev);
+
+       if (pdev->cpu_id == AMD_CPU_ID_CZN && !disable_workarounds) {
+               int rc = amd_pmc_czn_wa_irq1(pdev);
+
+               if (rc) {
+                       dev_err(pdev->dev, "failed to adjust keyboard wakeup: %d\n", rc);
+                       return rc;
+               }
+       }
+
+       return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(amd_pmc_pm, amd_pmc_suspend_handler, NULL);
+
 #endif
 
 static const struct pci_device_id pmc_pci_ids[] = {
@@ -980,6 +1031,9 @@ static struct platform_driver amd_pmc_driver = {
                .name = "amd_pmc",
                .acpi_match_table = amd_pmc_acpi_ids,
                .dev_groups = pmc_groups,
+#ifdef CONFIG_SUSPEND
+               .pm = &amd_pmc_pm,
+#endif
        },
        .probe = amd_pmc_probe,
        .remove = amd_pmc_remove,
index ca33df7..9333f82 100644 (file)
@@ -64,29 +64,6 @@ struct apple_gmux_data {
 
 static struct apple_gmux_data *apple_gmux_data;
 
-/*
- * gmux port offsets. Many of these are not yet used, but may be in the
- * future, and it's useful to have them documented here anyhow.
- */
-#define GMUX_PORT_VERSION_MAJOR                0x04
-#define GMUX_PORT_VERSION_MINOR                0x05
-#define GMUX_PORT_VERSION_RELEASE      0x06
-#define GMUX_PORT_SWITCH_DISPLAY       0x10
-#define GMUX_PORT_SWITCH_GET_DISPLAY   0x11
-#define GMUX_PORT_INTERRUPT_ENABLE     0x14
-#define GMUX_PORT_INTERRUPT_STATUS     0x16
-#define GMUX_PORT_SWITCH_DDC           0x28
-#define GMUX_PORT_SWITCH_EXTERNAL      0x40
-#define GMUX_PORT_SWITCH_GET_EXTERNAL  0x41
-#define GMUX_PORT_DISCRETE_POWER       0x50
-#define GMUX_PORT_MAX_BRIGHTNESS       0x70
-#define GMUX_PORT_BRIGHTNESS           0x74
-#define GMUX_PORT_VALUE                        0xc2
-#define GMUX_PORT_READ                 0xd0
-#define GMUX_PORT_WRITE                        0xd4
-
-#define GMUX_MIN_IO_LEN                        (GMUX_PORT_BRIGHTNESS + 4)
-
 #define GMUX_INTERRUPT_ENABLE          0xff
 #define GMUX_INTERRUPT_DISABLE         0x00
 
@@ -249,23 +226,6 @@ static void gmux_write32(struct apple_gmux_data *gmux_data, int port,
                gmux_pio_write32(gmux_data, port, val);
 }
 
-static bool gmux_is_indexed(struct apple_gmux_data *gmux_data)
-{
-       u16 val;
-
-       outb(0xaa, gmux_data->iostart + 0xcc);
-       outb(0x55, gmux_data->iostart + 0xcd);
-       outb(0x00, gmux_data->iostart + 0xce);
-
-       val = inb(gmux_data->iostart + 0xcc) |
-               (inb(gmux_data->iostart + 0xcd) << 8);
-
-       if (val == 0x55aa)
-               return true;
-
-       return false;
-}
-
 /**
  * DOC: Backlight control
  *
@@ -605,60 +565,43 @@ static int gmux_probe(struct pnp_dev *pnp, const struct pnp_device_id *id)
        int ret = -ENXIO;
        acpi_status status;
        unsigned long long gpe;
+       bool indexed = false;
+       u32 version;
 
        if (apple_gmux_data)
                return -EBUSY;
 
+       if (!apple_gmux_detect(pnp, &indexed)) {
+               pr_info("gmux device not present\n");
+               return -ENODEV;
+       }
+
        gmux_data = kzalloc(sizeof(*gmux_data), GFP_KERNEL);
        if (!gmux_data)
                return -ENOMEM;
        pnp_set_drvdata(pnp, gmux_data);
 
        res = pnp_get_resource(pnp, IORESOURCE_IO, 0);
-       if (!res) {
-               pr_err("Failed to find gmux I/O resource\n");
-               goto err_free;
-       }
-
        gmux_data->iostart = res->start;
        gmux_data->iolen = resource_size(res);
 
-       if (gmux_data->iolen < GMUX_MIN_IO_LEN) {
-               pr_err("gmux I/O region too small (%lu < %u)\n",
-                      gmux_data->iolen, GMUX_MIN_IO_LEN);
-               goto err_free;
-       }
-
        if (!request_region(gmux_data->iostart, gmux_data->iolen,
                            "Apple gmux")) {
                pr_err("gmux I/O already in use\n");
                goto err_free;
        }
 
-       /*
-        * Invalid version information may indicate either that the gmux
-        * device isn't present or that it's a new one that uses indexed
-        * io
-        */
-
-       ver_major = gmux_read8(gmux_data, GMUX_PORT_VERSION_MAJOR);
-       ver_minor = gmux_read8(gmux_data, GMUX_PORT_VERSION_MINOR);
-       ver_release = gmux_read8(gmux_data, GMUX_PORT_VERSION_RELEASE);
-       if (ver_major == 0xff && ver_minor == 0xff && ver_release == 0xff) {
-               if (gmux_is_indexed(gmux_data)) {
-                       u32 version;
-                       mutex_init(&gmux_data->index_lock);
-                       gmux_data->indexed = true;
-                       version = gmux_read32(gmux_data,
-                               GMUX_PORT_VERSION_MAJOR);
-                       ver_major = (version >> 24) & 0xff;
-                       ver_minor = (version >> 16) & 0xff;
-                       ver_release = (version >> 8) & 0xff;
-               } else {
-                       pr_info("gmux device not present\n");
-                       ret = -ENODEV;
-                       goto err_release;
-               }
+       if (indexed) {
+               mutex_init(&gmux_data->index_lock);
+               gmux_data->indexed = true;
+               version = gmux_read32(gmux_data, GMUX_PORT_VERSION_MAJOR);
+               ver_major = (version >> 24) & 0xff;
+               ver_minor = (version >> 16) & 0xff;
+               ver_release = (version >> 8) & 0xff;
+       } else {
+               ver_major = gmux_read8(gmux_data, GMUX_PORT_VERSION_MAJOR);
+               ver_minor = gmux_read8(gmux_data, GMUX_PORT_VERSION_MINOR);
+               ver_release = gmux_read8(gmux_data, GMUX_PORT_VERSION_RELEASE);
        }
        pr_info("Found gmux version %d.%d.%d [%s]\n", ver_major, ver_minor,
                ver_release, (gmux_data->indexed ? "indexed" : "classic"));
index 104188d..1038dfd 100644 (file)
@@ -225,6 +225,7 @@ struct asus_wmi {
 
        int tablet_switch_event_code;
        u32 tablet_switch_dev_id;
+       bool tablet_switch_inverted;
 
        enum fan_type fan_type;
        enum fan_type gpu_fan_type;
@@ -493,6 +494,13 @@ static bool asus_wmi_dev_is_present(struct asus_wmi *asus, u32 dev_id)
 }
 
 /* Input **********************************************************************/
+static void asus_wmi_tablet_sw_report(struct asus_wmi *asus, bool value)
+{
+       input_report_switch(asus->inputdev, SW_TABLET_MODE,
+                           asus->tablet_switch_inverted ? !value : value);
+       input_sync(asus->inputdev);
+}
+
 static void asus_wmi_tablet_sw_init(struct asus_wmi *asus, u32 dev_id, int event_code)
 {
        struct device *dev = &asus->platform_device->dev;
@@ -501,7 +509,7 @@ static void asus_wmi_tablet_sw_init(struct asus_wmi *asus, u32 dev_id, int event
        result = asus_wmi_get_devstate_simple(asus, dev_id);
        if (result >= 0) {
                input_set_capability(asus->inputdev, EV_SW, SW_TABLET_MODE);
-               input_report_switch(asus->inputdev, SW_TABLET_MODE, result);
+               asus_wmi_tablet_sw_report(asus, result);
                asus->tablet_switch_dev_id = dev_id;
                asus->tablet_switch_event_code = event_code;
        } else if (result == -ENODEV) {
@@ -534,6 +542,7 @@ static int asus_wmi_input_init(struct asus_wmi *asus)
        case asus_wmi_no_tablet_switch:
                break;
        case asus_wmi_kbd_dock_devid:
+               asus->tablet_switch_inverted = true;
                asus_wmi_tablet_sw_init(asus, ASUS_WMI_DEVID_KBD_DOCK, NOTIFY_KBD_DOCK_CHANGE);
                break;
        case asus_wmi_lid_flip_devid:
@@ -573,10 +582,8 @@ static void asus_wmi_tablet_mode_get_state(struct asus_wmi *asus)
                return;
 
        result = asus_wmi_get_devstate_simple(asus, asus->tablet_switch_dev_id);
-       if (result >= 0) {
-               input_report_switch(asus->inputdev, SW_TABLET_MODE, result);
-               input_sync(asus->inputdev);
-       }
+       if (result >= 0)
+               asus_wmi_tablet_sw_report(asus, result);
 }
 
 /* dGPU ********************************************************************/
index 0a259a2..502783a 100644 (file)
@@ -261,6 +261,9 @@ static const struct key_entry dell_wmi_keymap_type_0010[] = {
        { KE_KEY,    0x57, { KEY_BRIGHTNESSDOWN } },
        { KE_KEY,    0x58, { KEY_BRIGHTNESSUP } },
 
+       /*Speaker Mute*/
+       { KE_KEY, 0x109, { KEY_MUTE} },
+
        /* Mic mute */
        { KE_KEY, 0x150, { KEY_MICMUTE } },
 
index 5e7e665..322cfae 100644 (file)
@@ -141,6 +141,7 @@ static u8 gigabyte_wmi_detect_sensor_usability(struct wmi_device *wdev)
 
 static const struct dmi_system_id gigabyte_wmi_known_working_platforms[] = {
        DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B450M DS3H-CF"),
+       DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B450M DS3H WIFI-CF"),
        DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B450M S2H V2"),
        DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 AORUS ELITE AX V2"),
        DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 AORUS ELITE"),
index 0a99058..2ef201b 100644 (file)
@@ -90,6 +90,7 @@ enum hp_wmi_event_ids {
        HPWMI_PEAKSHIFT_PERIOD          = 0x0F,
        HPWMI_BATTERY_CHARGE_PERIOD     = 0x10,
        HPWMI_SANITIZATION_MODE         = 0x17,
+       HPWMI_OMEN_KEY                  = 0x1D,
        HPWMI_SMART_EXPERIENCE_APP      = 0x21,
 };
 
@@ -216,6 +217,8 @@ static const struct key_entry hp_wmi_keymap[] = {
        { KE_KEY, 0x213b,  { KEY_INFO } },
        { KE_KEY, 0x2169,  { KEY_ROTATE_DISPLAY } },
        { KE_KEY, 0x216a,  { KEY_SETUP } },
+       { KE_KEY, 0x21a5,  { KEY_PROG2 } }, /* HP Omen Key */
+       { KE_KEY, 0x21a7,  { KEY_FN_ESC } },
        { KE_KEY, 0x21a9,  { KEY_TOUCHPAD_OFF } },
        { KE_KEY, 0x121a9, { KEY_TOUCHPAD_ON } },
        { KE_KEY, 0x231b,  { KEY_HELP } },
@@ -548,7 +551,7 @@ static int __init hp_wmi_enable_hotkeys(void)
 
 static int hp_wmi_set_block(void *data, bool blocked)
 {
-       enum hp_wmi_radio r = (enum hp_wmi_radio) data;
+       enum hp_wmi_radio r = (long)data;
        int query = BIT(r + 8) | ((!blocked) << r);
        int ret;
 
@@ -810,6 +813,7 @@ static void hp_wmi_notify(u32 value, void *context)
        case HPWMI_SMART_ADAPTER:
                break;
        case HPWMI_BEZEL_BUTTON:
+       case HPWMI_OMEN_KEY:
                key_code = hp_wmi_read_int(HPWMI_HOTKEY_QUERY);
                if (key_code < 0)
                        break;
index a959468..02860c3 100644 (file)
@@ -10496,8 +10496,7 @@ static int dytc_profile_set(struct platform_profile_handler *pprof,
                        if (err)
                                goto unlock;
                }
-       }
-       if (dytc_capabilities & BIT(DYTC_FC_PSC)) {
+       } else if (dytc_capabilities & BIT(DYTC_FC_PSC)) {
                err = dytc_command(DYTC_SET_COMMAND(DYTC_FUNCTION_PSC, perfmode, 1), &output);
                if (err)
                        goto unlock;
@@ -10525,14 +10524,16 @@ static void dytc_profile_refresh(void)
                        err = dytc_command(DYTC_CMD_MMC_GET, &output);
                else
                        err = dytc_cql_command(DYTC_CMD_GET, &output);
-       } else if (dytc_capabilities & BIT(DYTC_FC_PSC))
+               funcmode = DYTC_FUNCTION_MMC;
+       } else if (dytc_capabilities & BIT(DYTC_FC_PSC)) {
                err = dytc_command(DYTC_CMD_GET, &output);
-
+               /* Check if we are PSC mode, or have AMT enabled */
+               funcmode = (output >> DYTC_GET_FUNCTION_BIT) & 0xF;
+       }
        mutex_unlock(&dytc_mutex);
        if (err)
                return;
 
-       funcmode = (output >> DYTC_GET_FUNCTION_BIT) & 0xF;
        perfmode = (output >> DYTC_GET_MODE_BIT) & 0xF;
        convert_dytc_to_profile(funcmode, perfmode, &profile);
        if (profile != dytc_current_profile) {
index 49cc18a..29a2865 100644 (file)
@@ -981,6 +981,9 @@ queue_rtpg:
  *
  * Returns true if and only if alua_rtpg_work() will be called asynchronously.
  * That function is responsible for calling @qdata->fn().
+ *
+ * Context: may be called from atomic context (alua_check()) only if the caller
+ *     holds an sdev reference.
  */
 static bool alua_rtpg_queue(struct alua_port_group *pg,
                            struct scsi_device *sdev,
@@ -989,8 +992,6 @@ static bool alua_rtpg_queue(struct alua_port_group *pg,
        int start_queue = 0;
        unsigned long flags;
 
-       might_sleep();
-
        if (WARN_ON_ONCE(!pg) || scsi_device_get(sdev))
                return false;
 
index 4dbf51e..f6da348 100644 (file)
@@ -5850,7 +5850,7 @@ static int hpsa_scsi_host_alloc(struct ctlr_info *h)
 {
        struct Scsi_Host *sh;
 
-       sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h));
+       sh = scsi_host_alloc(&hpsa_driver_template, sizeof(struct ctlr_info));
        if (sh == NULL) {
                dev_err(&h->pdev->dev, "scsi_host_alloc failed\n");
                return -ENOMEM;
index 1d1cf64..0454d94 100644 (file)
@@ -849,7 +849,7 @@ static int iscsi_sw_tcp_host_get_param(struct Scsi_Host *shost,
                                       enum iscsi_host_param param, char *buf)
 {
        struct iscsi_sw_tcp_host *tcp_sw_host = iscsi_host_priv(shost);
-       struct iscsi_session *session = tcp_sw_host->session;
+       struct iscsi_session *session;
        struct iscsi_conn *conn;
        struct iscsi_tcp_conn *tcp_conn;
        struct iscsi_sw_tcp_conn *tcp_sw_conn;
@@ -859,6 +859,7 @@ static int iscsi_sw_tcp_host_get_param(struct Scsi_Host *shost,
 
        switch (param) {
        case ISCSI_HOST_PARAM_IPADDRESS:
+               session = tcp_sw_host->session;
                if (!session)
                        return -ENOTCONN;
 
@@ -959,11 +960,13 @@ iscsi_sw_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
        if (!cls_session)
                goto remove_host;
        session = cls_session->dd_data;
-       tcp_sw_host = iscsi_host_priv(shost);
-       tcp_sw_host->session = session;
 
        if (iscsi_tcp_r2tpool_alloc(session))
                goto remove_session;
+
+       /* We are now fully setup so expose the session to sysfs. */
+       tcp_sw_host = iscsi_host_priv(shost);
+       tcp_sw_host->session = session;
        return cls_session;
 
 remove_session:
@@ -983,10 +986,17 @@ static void iscsi_sw_tcp_session_destroy(struct iscsi_cls_session *cls_session)
        if (WARN_ON_ONCE(session->leadconn))
                return;
 
+       iscsi_session_remove(cls_session);
+       /*
+        * Our get_host_param needs to access the session, so remove the
+        * host from sysfs before freeing the session to make sure userspace
+        * is no longer accessing the callout.
+        */
+       iscsi_host_remove(shost, false);
+
        iscsi_tcp_r2tpool_free(cls_session->dd_data);
-       iscsi_session_teardown(cls_session);
 
-       iscsi_host_remove(shost, false);
+       iscsi_session_free(cls_session);
        iscsi_host_free(shost);
 }
 
index ef2fc86..127f3d7 100644 (file)
@@ -3104,17 +3104,32 @@ dec_session_count:
 }
 EXPORT_SYMBOL_GPL(iscsi_session_setup);
 
-/**
- * iscsi_session_teardown - destroy session, host, and cls_session
- * @cls_session: iscsi session
+/*
+ * issi_session_remove - Remove session from iSCSI class.
  */
-void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
+void iscsi_session_remove(struct iscsi_cls_session *cls_session)
 {
        struct iscsi_session *session = cls_session->dd_data;
-       struct module *owner = cls_session->transport->owner;
        struct Scsi_Host *shost = session->host;
 
        iscsi_remove_session(cls_session);
+       /*
+        * host removal only has to wait for its children to be removed from
+        * sysfs, and iscsi_tcp needs to do iscsi_host_remove before freeing
+        * the session, so drop the session count here.
+        */
+       iscsi_host_dec_session_cnt(shost);
+}
+EXPORT_SYMBOL_GPL(iscsi_session_remove);
+
+/**
+ * iscsi_session_free - Free iscsi session and it's resources
+ * @cls_session: iscsi session
+ */
+void iscsi_session_free(struct iscsi_cls_session *cls_session)
+{
+       struct iscsi_session *session = cls_session->dd_data;
+       struct module *owner = cls_session->transport->owner;
 
        iscsi_pool_free(&session->cmdpool);
        kfree(session->password);
@@ -3132,10 +3147,19 @@ void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
        kfree(session->discovery_parent_type);
 
        iscsi_free_session(cls_session);
-
-       iscsi_host_dec_session_cnt(shost);
        module_put(owner);
 }
+EXPORT_SYMBOL_GPL(iscsi_session_free);
+
+/**
+ * iscsi_session_teardown - destroy session and cls_session
+ * @cls_session: iscsi session
+ */
+void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
+{
+       iscsi_session_remove(cls_session);
+       iscsi_session_free(cls_session);
+}
 EXPORT_SYMBOL_GPL(iscsi_session_teardown);
 
 /**
index bac1114..2b95b45 100644 (file)
@@ -73,8 +73,8 @@ static bool __target_check_io_state(struct se_cmd *se_cmd,
 {
        struct se_session *sess = se_cmd->se_sess;
 
-       assert_spin_locked(&sess->sess_cmd_lock);
-       WARN_ON_ONCE(!irqs_disabled());
+       lockdep_assert_held(&sess->sess_cmd_lock);
+
        /*
         * If command already reached CMD_T_COMPLETE state within
         * target_complete_cmd() or CMD_T_FABRIC_STOP due to shutdown,
index 62c0aa5..0a4eaa3 100644 (file)
@@ -44,11 +44,13 @@ static int int340x_thermal_get_trip_temp(struct thermal_zone_device *zone,
                                         int trip, int *temp)
 {
        struct int34x_thermal_zone *d = zone->devdata;
-       int i;
+       int i, ret = 0;
 
        if (d->override_ops && d->override_ops->get_trip_temp)
                return d->override_ops->get_trip_temp(zone, trip, temp);
 
+       mutex_lock(&d->trip_mutex);
+
        if (trip < d->aux_trip_nr)
                *temp = d->aux_trips[trip];
        else if (trip == d->crt_trip_id)
@@ -66,10 +68,12 @@ static int int340x_thermal_get_trip_temp(struct thermal_zone_device *zone,
                        }
                }
                if (i == INT340X_THERMAL_MAX_ACT_TRIP_COUNT)
-                       return -EINVAL;
+                       ret = -EINVAL;
        }
 
-       return 0;
+       mutex_unlock(&d->trip_mutex);
+
+       return ret;
 }
 
 static int int340x_thermal_get_trip_type(struct thermal_zone_device *zone,
@@ -77,11 +81,13 @@ static int int340x_thermal_get_trip_type(struct thermal_zone_device *zone,
                                         enum thermal_trip_type *type)
 {
        struct int34x_thermal_zone *d = zone->devdata;
-       int i;
+       int i, ret = 0;
 
        if (d->override_ops && d->override_ops->get_trip_type)
                return d->override_ops->get_trip_type(zone, trip, type);
 
+       mutex_lock(&d->trip_mutex);
+
        if (trip < d->aux_trip_nr)
                *type = THERMAL_TRIP_PASSIVE;
        else if (trip == d->crt_trip_id)
@@ -99,10 +105,12 @@ static int int340x_thermal_get_trip_type(struct thermal_zone_device *zone,
                        }
                }
                if (i == INT340X_THERMAL_MAX_ACT_TRIP_COUNT)
-                       return -EINVAL;
+                       ret = -EINVAL;
        }
 
-       return 0;
+       mutex_unlock(&d->trip_mutex);
+
+       return ret;
 }
 
 static int int340x_thermal_set_trip_temp(struct thermal_zone_device *zone,
@@ -180,6 +188,8 @@ int int340x_thermal_read_trips(struct int34x_thermal_zone *int34x_zone)
        int trip_cnt = int34x_zone->aux_trip_nr;
        int i;
 
+       mutex_lock(&int34x_zone->trip_mutex);
+
        int34x_zone->crt_trip_id = -1;
        if (!int340x_thermal_get_trip_config(int34x_zone->adev->handle, "_CRT",
                                             &int34x_zone->crt_temp))
@@ -207,6 +217,8 @@ int int340x_thermal_read_trips(struct int34x_thermal_zone *int34x_zone)
                int34x_zone->act_trips[i].valid = true;
        }
 
+       mutex_unlock(&int34x_zone->trip_mutex);
+
        return trip_cnt;
 }
 EXPORT_SYMBOL_GPL(int340x_thermal_read_trips);
@@ -230,6 +242,8 @@ struct int34x_thermal_zone *int340x_thermal_zone_add(struct acpi_device *adev,
        if (!int34x_thermal_zone)
                return ERR_PTR(-ENOMEM);
 
+       mutex_init(&int34x_thermal_zone->trip_mutex);
+
        int34x_thermal_zone->adev = adev;
        int34x_thermal_zone->override_ops = override_ops;
 
@@ -281,6 +295,7 @@ err_thermal_zone:
        acpi_lpat_free_conversion_table(int34x_thermal_zone->lpat_table);
        kfree(int34x_thermal_zone->aux_trips);
 err_trip_alloc:
+       mutex_destroy(&int34x_thermal_zone->trip_mutex);
        kfree(int34x_thermal_zone);
        return ERR_PTR(ret);
 }
@@ -292,6 +307,7 @@ void int340x_thermal_zone_remove(struct int34x_thermal_zone
        thermal_zone_device_unregister(int34x_thermal_zone->zone);
        acpi_lpat_free_conversion_table(int34x_thermal_zone->lpat_table);
        kfree(int34x_thermal_zone->aux_trips);
+       mutex_destroy(&int34x_thermal_zone->trip_mutex);
        kfree(int34x_thermal_zone);
 }
 EXPORT_SYMBOL_GPL(int340x_thermal_zone_remove);
index 3b4971d..8f9872a 100644 (file)
@@ -32,6 +32,7 @@ struct int34x_thermal_zone {
        struct thermal_zone_device_ops *override_ops;
        void *priv_data;
        struct acpi_lpat_conversion_table *lpat_table;
+       struct mutex trip_mutex;
 };
 
 struct int34x_thermal_zone *int340x_thermal_zone_add(struct acpi_device *,
index bda61be..3a1c4d3 100644 (file)
@@ -1234,12 +1234,14 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
         * clock scaling is in progress
         */
        ufshcd_scsi_block_requests(hba);
+       mutex_lock(&hba->wb_mutex);
        down_write(&hba->clk_scaling_lock);
 
        if (!hba->clk_scaling.is_allowed ||
            ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
                ret = -EBUSY;
                up_write(&hba->clk_scaling_lock);
+               mutex_unlock(&hba->wb_mutex);
                ufshcd_scsi_unblock_requests(hba);
                goto out;
        }
@@ -1251,12 +1253,16 @@ out:
        return ret;
 }
 
-static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, bool writelock)
+static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, int err, bool scale_up)
 {
-       if (writelock)
-               up_write(&hba->clk_scaling_lock);
-       else
-               up_read(&hba->clk_scaling_lock);
+       up_write(&hba->clk_scaling_lock);
+
+       /* Enable Write Booster if we have scaled up else disable it */
+       if (ufshcd_enable_wb_if_scaling_up(hba) && !err)
+               ufshcd_wb_toggle(hba, scale_up);
+
+       mutex_unlock(&hba->wb_mutex);
+
        ufshcd_scsi_unblock_requests(hba);
        ufshcd_release(hba);
 }
@@ -1273,7 +1279,6 @@ static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, bool writelock)
 static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
 {
        int ret = 0;
-       bool is_writelock = true;
 
        ret = ufshcd_clock_scaling_prepare(hba);
        if (ret)
@@ -1302,15 +1307,8 @@ static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
                }
        }
 
-       /* Enable Write Booster if we have scaled up else disable it */
-       if (ufshcd_enable_wb_if_scaling_up(hba)) {
-               downgrade_write(&hba->clk_scaling_lock);
-               is_writelock = false;
-               ufshcd_wb_toggle(hba, scale_up);
-       }
-
 out_unprepare:
-       ufshcd_clock_scaling_unprepare(hba, is_writelock);
+       ufshcd_clock_scaling_unprepare(hba, ret, scale_up);
        return ret;
 }
 
@@ -6066,9 +6064,11 @@ static void ufshcd_force_error_recovery(struct ufs_hba *hba)
 
 static void ufshcd_clk_scaling_allow(struct ufs_hba *hba, bool allow)
 {
+       mutex_lock(&hba->wb_mutex);
        down_write(&hba->clk_scaling_lock);
        hba->clk_scaling.is_allowed = allow;
        up_write(&hba->clk_scaling_lock);
+       mutex_unlock(&hba->wb_mutex);
 }
 
 static void ufshcd_clk_scaling_suspend(struct ufs_hba *hba, bool suspend)
@@ -9793,6 +9793,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
        /* Initialize mutex for exception event control */
        mutex_init(&hba->ee_ctrl_mutex);
 
+       mutex_init(&hba->wb_mutex);
        init_rwsem(&hba->clk_scaling_lock);
 
        ufshcd_init_clk_gating(hba);
index 23c24fe..2209372 100644 (file)
@@ -1856,24 +1856,33 @@ unwind:
  * significantly boosts non-hugetlbfs mappings and doesn't seem to hurt when
  * hugetlbfs is in use.
  */
-static void vfio_test_domain_fgsp(struct vfio_domain *domain)
+static void vfio_test_domain_fgsp(struct vfio_domain *domain, struct list_head *regions)
 {
-       struct page *pages;
        int ret, order = get_order(PAGE_SIZE * 2);
+       struct vfio_iova *region;
+       struct page *pages;
+       dma_addr_t start;
 
        pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
        if (!pages)
                return;
 
-       ret = iommu_map(domain->domain, 0, page_to_phys(pages), PAGE_SIZE * 2,
-                       IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE);
-       if (!ret) {
-               size_t unmapped = iommu_unmap(domain->domain, 0, PAGE_SIZE);
+       list_for_each_entry(region, regions, list) {
+               start = ALIGN(region->start, PAGE_SIZE * 2);
+               if (start >= region->end || (region->end - start < PAGE_SIZE * 2))
+                       continue;
 
-               if (unmapped == PAGE_SIZE)
-                       iommu_unmap(domain->domain, PAGE_SIZE, PAGE_SIZE);
-               else
-                       domain->fgsp = true;
+               ret = iommu_map(domain->domain, start, page_to_phys(pages), PAGE_SIZE * 2,
+                               IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE);
+               if (!ret) {
+                       size_t unmapped = iommu_unmap(domain->domain, start, PAGE_SIZE);
+
+                       if (unmapped == PAGE_SIZE)
+                               iommu_unmap(domain->domain, start + PAGE_SIZE, PAGE_SIZE);
+                       else
+                               domain->fgsp = true;
+               }
+               break;
        }
 
        __free_pages(pages, order);
@@ -2326,7 +2335,7 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
                }
        }
 
-       vfio_test_domain_fgsp(domain);
+       vfio_test_domain_fgsp(domain, &iova_copy);
 
        /* replay mappings on new domains */
        ret = vfio_iommu_replay(iommu, domain);
index 69a1b8c..a2f04a3 100644 (file)
@@ -482,11 +482,12 @@ ext4_xattr_inode_verify_hashes(struct inode *ea_inode,
                 */
                e_hash = ext4_xattr_hash_entry_signed(entry->e_name, entry->e_name_len,
                                                        &tmp_data, 1);
-               if (e_hash == entry->e_hash)
-                       return 0;
-
                /* Still no match - bad */
-               return -EFSCORRUPTED;
+               if (e_hash != entry->e_hash)
+                       return -EFSCORRUPTED;
+
+               /* Let people know about old hash */
+               pr_warn_once("ext4: filesystem with signed xattr name hash");
        }
        return 0;
 }
@@ -3096,7 +3097,7 @@ static __le32 ext4_xattr_hash_entry(char *name, size_t name_len, __le32 *value,
        while (name_len--) {
                hash = (hash << NAME_HASH_SHIFT) ^
                       (hash >> (8*sizeof(hash) - NAME_HASH_SHIFT)) ^
-                      *name++;
+                      (unsigned char)*name++;
        }
        while (value_count--) {
                hash = (hash << VALUE_HASH_SHIFT) ^
index a4850ae..ad67036 100644 (file)
 #include <linux/posix_acl.h>
 #include <linux/posix_acl_xattr.h>
 
-struct posix_acl *fuse_get_acl(struct inode *inode, int type, bool rcu)
+static struct posix_acl *__fuse_get_acl(struct fuse_conn *fc,
+                                       struct user_namespace *mnt_userns,
+                                       struct inode *inode, int type, bool rcu)
 {
-       struct fuse_conn *fc = get_fuse_conn(inode);
        int size;
        const char *name;
        void *value = NULL;
@@ -25,7 +26,7 @@ struct posix_acl *fuse_get_acl(struct inode *inode, int type, bool rcu)
        if (fuse_is_bad(inode))
                return ERR_PTR(-EIO);
 
-       if (!fc->posix_acl || fc->no_getxattr)
+       if (fc->no_getxattr)
                return NULL;
 
        if (type == ACL_TYPE_ACCESS)
@@ -53,6 +54,46 @@ struct posix_acl *fuse_get_acl(struct inode *inode, int type, bool rcu)
        return acl;
 }
 
+static inline bool fuse_no_acl(const struct fuse_conn *fc,
+                              const struct inode *inode)
+{
+       /*
+        * Refuse interacting with POSIX ACLs for daemons that
+        * don't support FUSE_POSIX_ACL and are not mounted on
+        * the host to retain backwards compatibility.
+        */
+       return !fc->posix_acl && (i_user_ns(inode) != &init_user_ns);
+}
+
+struct posix_acl *fuse_get_acl(struct user_namespace *mnt_userns,
+                              struct dentry *dentry, int type)
+{
+       struct inode *inode = d_inode(dentry);
+       struct fuse_conn *fc = get_fuse_conn(inode);
+
+       if (fuse_no_acl(fc, inode))
+               return ERR_PTR(-EOPNOTSUPP);
+
+       return __fuse_get_acl(fc, mnt_userns, inode, type, false);
+}
+
+struct posix_acl *fuse_get_inode_acl(struct inode *inode, int type, bool rcu)
+{
+       struct fuse_conn *fc = get_fuse_conn(inode);
+
+       /*
+        * FUSE daemons before FUSE_POSIX_ACL was introduced could get and set
+        * POSIX ACLs without them being used for permission checking by the
+        * vfs. Retain that behavior for backwards compatibility as there are
+        * filesystems that do all permission checking for acls in the daemon
+        * and not in the kernel.
+        */
+       if (!fc->posix_acl)
+               return NULL;
+
+       return __fuse_get_acl(fc, &init_user_ns, inode, type, rcu);
+}
+
 int fuse_set_acl(struct user_namespace *mnt_userns, struct dentry *dentry,
                 struct posix_acl *acl, int type)
 {
@@ -64,7 +105,7 @@ int fuse_set_acl(struct user_namespace *mnt_userns, struct dentry *dentry,
        if (fuse_is_bad(inode))
                return -EIO;
 
-       if (!fc->posix_acl || fc->no_setxattr)
+       if (fc->no_setxattr || fuse_no_acl(fc, inode))
                return -EOPNOTSUPP;
 
        if (type == ACL_TYPE_ACCESS)
@@ -99,7 +140,13 @@ int fuse_set_acl(struct user_namespace *mnt_userns, struct dentry *dentry,
                        return ret;
                }
 
-               if (!vfsgid_in_group_p(i_gid_into_vfsgid(&init_user_ns, inode)) &&
+               /*
+                * Fuse daemons without FUSE_POSIX_ACL never changed the passed
+                * through POSIX ACLs. Such daemons don't expect setgid bits to
+                * be stripped.
+                */
+               if (fc->posix_acl &&
+                   !vfsgid_in_group_p(i_gid_into_vfsgid(&init_user_ns, inode)) &&
                    !capable_wrt_inode_uidgid(&init_user_ns, inode, CAP_FSETID))
                        extra_flags |= FUSE_SETXATTR_ACL_KILL_SGID;
 
@@ -108,8 +155,15 @@ int fuse_set_acl(struct user_namespace *mnt_userns, struct dentry *dentry,
        } else {
                ret = fuse_removexattr(inode, name);
        }
-       forget_all_cached_acls(inode);
-       fuse_invalidate_attr(inode);
+
+       if (fc->posix_acl) {
+               /*
+                * Fuse daemons without FUSE_POSIX_ACL never cached POSIX ACLs
+                * and didn't invalidate attributes. Retain that behavior.
+                */
+               forget_all_cached_acls(inode);
+               fuse_invalidate_attr(inode);
+       }
 
        return ret;
 }
index cd1a071..2725fb5 100644 (file)
@@ -1942,7 +1942,8 @@ static const struct inode_operations fuse_dir_inode_operations = {
        .permission     = fuse_permission,
        .getattr        = fuse_getattr,
        .listxattr      = fuse_listxattr,
-       .get_inode_acl  = fuse_get_acl,
+       .get_inode_acl  = fuse_get_inode_acl,
+       .get_acl        = fuse_get_acl,
        .set_acl        = fuse_set_acl,
        .fileattr_get   = fuse_fileattr_get,
        .fileattr_set   = fuse_fileattr_set,
@@ -1964,7 +1965,8 @@ static const struct inode_operations fuse_common_inode_operations = {
        .permission     = fuse_permission,
        .getattr        = fuse_getattr,
        .listxattr      = fuse_listxattr,
-       .get_inode_acl  = fuse_get_acl,
+       .get_inode_acl  = fuse_get_inode_acl,
+       .get_acl        = fuse_get_acl,
        .set_acl        = fuse_set_acl,
        .fileattr_get   = fuse_fileattr_get,
        .fileattr_set   = fuse_fileattr_set,
index c673fae..46797a1 100644 (file)
@@ -1264,11 +1264,11 @@ ssize_t fuse_getxattr(struct inode *inode, const char *name, void *value,
 ssize_t fuse_listxattr(struct dentry *entry, char *list, size_t size);
 int fuse_removexattr(struct inode *inode, const char *name);
 extern const struct xattr_handler *fuse_xattr_handlers[];
-extern const struct xattr_handler *fuse_acl_xattr_handlers[];
-extern const struct xattr_handler *fuse_no_acl_xattr_handlers[];
 
 struct posix_acl;
-struct posix_acl *fuse_get_acl(struct inode *inode, int type, bool rcu);
+struct posix_acl *fuse_get_inode_acl(struct inode *inode, int type, bool rcu);
+struct posix_acl *fuse_get_acl(struct user_namespace *mnt_userns,
+                              struct dentry *dentry, int type);
 int fuse_set_acl(struct user_namespace *mnt_userns, struct dentry *dentry,
                 struct posix_acl *acl, int type);
 
index 6b3beda..de9b9ec 100644 (file)
@@ -311,7 +311,8 @@ void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr,
                fuse_dax_dontcache(inode, attr->flags);
 }
 
-static void fuse_init_inode(struct inode *inode, struct fuse_attr *attr)
+static void fuse_init_inode(struct inode *inode, struct fuse_attr *attr,
+                           struct fuse_conn *fc)
 {
        inode->i_mode = attr->mode & S_IFMT;
        inode->i_size = attr->size;
@@ -333,6 +334,12 @@ static void fuse_init_inode(struct inode *inode, struct fuse_attr *attr)
                                   new_decode_dev(attr->rdev));
        } else
                BUG();
+       /*
+        * Ensure that we don't cache acls for daemons without FUSE_POSIX_ACL
+        * so they see the exact same behavior as before.
+        */
+       if (!fc->posix_acl)
+               inode->i_acl = inode->i_default_acl = ACL_DONT_CACHE;
 }
 
 static int fuse_inode_eq(struct inode *inode, void *_nodeidp)
@@ -372,7 +379,7 @@ struct inode *fuse_iget(struct super_block *sb, u64 nodeid,
                if (!inode)
                        return NULL;
 
-               fuse_init_inode(inode, attr);
+               fuse_init_inode(inode, attr, fc);
                get_fuse_inode(inode)->nodeid = nodeid;
                inode->i_flags |= S_AUTOMOUNT;
                goto done;
@@ -388,7 +395,7 @@ retry:
                if (!fc->writeback_cache || !S_ISREG(attr->mode))
                        inode->i_flags |= S_NOCMTIME;
                inode->i_generation = generation;
-               fuse_init_inode(inode, attr);
+               fuse_init_inode(inode, attr, fc);
                unlock_new_inode(inode);
        } else if (fuse_stale_inode(inode, generation, attr)) {
                /* nodeid was reused, any I/O on the old inode should fail */
@@ -1174,7 +1181,6 @@ static void process_init_reply(struct fuse_mount *fm, struct fuse_args *args,
                        if ((flags & FUSE_POSIX_ACL)) {
                                fc->default_permissions = 1;
                                fc->posix_acl = 1;
-                               fm->sb->s_xattr = fuse_acl_xattr_handlers;
                        }
                        if (flags & FUSE_CACHE_SYMLINKS)
                                fc->cache_symlinks = 1;
@@ -1420,13 +1426,6 @@ static void fuse_sb_defaults(struct super_block *sb)
        if (sb->s_user_ns != &init_user_ns)
                sb->s_iflags |= SB_I_UNTRUSTED_MOUNTER;
        sb->s_flags &= ~(SB_NOSEC | SB_I_VERSION);
-
-       /*
-        * If we are not in the initial user namespace posix
-        * acls must be translated.
-        */
-       if (sb->s_user_ns != &init_user_ns)
-               sb->s_xattr = fuse_no_acl_xattr_handlers;
 }
 
 static int fuse_fill_super_submount(struct super_block *sb,
index 0d3e717..9fe571a 100644 (file)
@@ -203,27 +203,6 @@ static int fuse_xattr_set(const struct xattr_handler *handler,
        return fuse_setxattr(inode, name, value, size, flags, 0);
 }
 
-static bool no_xattr_list(struct dentry *dentry)
-{
-       return false;
-}
-
-static int no_xattr_get(const struct xattr_handler *handler,
-                       struct dentry *dentry, struct inode *inode,
-                       const char *name, void *value, size_t size)
-{
-       return -EOPNOTSUPP;
-}
-
-static int no_xattr_set(const struct xattr_handler *handler,
-                       struct user_namespace *mnt_userns,
-                       struct dentry *dentry, struct inode *nodee,
-                       const char *name, const void *value,
-                       size_t size, int flags)
-{
-       return -EOPNOTSUPP;
-}
-
 static const struct xattr_handler fuse_xattr_handler = {
        .prefix = "",
        .get    = fuse_xattr_get,
@@ -234,33 +213,3 @@ const struct xattr_handler *fuse_xattr_handlers[] = {
        &fuse_xattr_handler,
        NULL
 };
-
-const struct xattr_handler *fuse_acl_xattr_handlers[] = {
-       &posix_acl_access_xattr_handler,
-       &posix_acl_default_xattr_handler,
-       &fuse_xattr_handler,
-       NULL
-};
-
-static const struct xattr_handler fuse_no_acl_access_xattr_handler = {
-       .name  = XATTR_NAME_POSIX_ACL_ACCESS,
-       .flags = ACL_TYPE_ACCESS,
-       .list  = no_xattr_list,
-       .get   = no_xattr_get,
-       .set   = no_xattr_set,
-};
-
-static const struct xattr_handler fuse_no_acl_default_xattr_handler = {
-       .name  = XATTR_NAME_POSIX_ACL_DEFAULT,
-       .flags = ACL_TYPE_ACCESS,
-       .list  = no_xattr_list,
-       .get   = no_xattr_get,
-       .set   = no_xattr_set,
-};
-
-const struct xattr_handler *fuse_no_acl_xattr_handlers[] = {
-       &fuse_no_acl_access_xattr_handler,
-       &fuse_no_acl_default_xattr_handler,
-       &fuse_xattr_handler,
-       NULL
-};
index 7236393..61323de 100644 (file)
@@ -80,6 +80,15 @@ void gfs2_remove_from_ail(struct gfs2_bufdata *bd)
        brelse(bd->bd_bh);
 }
 
+static int __gfs2_writepage(struct page *page, struct writeback_control *wbc,
+                      void *data)
+{
+       struct address_space *mapping = data;
+       int ret = mapping->a_ops->writepage(page, wbc);
+       mapping_set_error(mapping, ret);
+       return ret;
+}
+
 /**
  * gfs2_ail1_start_one - Start I/O on a transaction
  * @sdp: The superblock
@@ -131,7 +140,7 @@ __acquires(&sdp->sd_ail_lock)
                if (!mapping)
                        continue;
                spin_unlock(&sdp->sd_ail_lock);
-               ret = filemap_fdatawrite_wbc(mapping, wbc);
+               ret = write_cache_pages(mapping, wbc, __gfs2_writepage, mapping);
                if (need_resched()) {
                        blk_finish_plug(plug);
                        cond_resched();
index 0ef0703..c0950ed 100644 (file)
@@ -662,6 +662,39 @@ static struct shrinker     nfsd_file_shrinker = {
 };
 
 /**
+ * nfsd_file_cond_queue - conditionally unhash and queue a nfsd_file
+ * @nf: nfsd_file to attempt to queue
+ * @dispose: private list to queue successfully-put objects
+ *
+ * Unhash an nfsd_file, try to get a reference to it, and then put that
+ * reference. If it's the last reference, queue it to the dispose list.
+ */
+static void
+nfsd_file_cond_queue(struct nfsd_file *nf, struct list_head *dispose)
+       __must_hold(RCU)
+{
+       int decrement = 1;
+
+       /* If we raced with someone else unhashing, ignore it */
+       if (!nfsd_file_unhash(nf))
+               return;
+
+       /* If we can't get a reference, ignore it */
+       if (!nfsd_file_get(nf))
+               return;
+
+       /* Extra decrement if we remove from the LRU */
+       if (nfsd_file_lru_remove(nf))
+               ++decrement;
+
+       /* If refcount goes to 0, then put on the dispose list */
+       if (refcount_sub_and_test(decrement, &nf->nf_ref)) {
+               list_add(&nf->nf_lru, dispose);
+               trace_nfsd_file_closing(nf);
+       }
+}
+
+/**
  * nfsd_file_queue_for_close: try to close out any open nfsd_files for an inode
  * @inode:   inode on which to close out nfsd_files
  * @dispose: list on which to gather nfsd_files to close out
@@ -688,30 +721,11 @@ nfsd_file_queue_for_close(struct inode *inode, struct list_head *dispose)
 
        rcu_read_lock();
        do {
-               int decrement = 1;
-
                nf = rhashtable_lookup(&nfsd_file_rhash_tbl, &key,
                                       nfsd_file_rhash_params);
                if (!nf)
                        break;
-
-               /* If we raced with someone else unhashing, ignore it */
-               if (!nfsd_file_unhash(nf))
-                       continue;
-
-               /* If we can't get a reference, ignore it */
-               if (!nfsd_file_get(nf))
-                       continue;
-
-               /* Extra decrement if we remove from the LRU */
-               if (nfsd_file_lru_remove(nf))
-                       ++decrement;
-
-               /* If refcount goes to 0, then put on the dispose list */
-               if (refcount_sub_and_test(decrement, &nf->nf_ref)) {
-                       list_add(&nf->nf_lru, dispose);
-                       trace_nfsd_file_closing(nf);
-               }
+               nfsd_file_cond_queue(nf, dispose);
        } while (1);
        rcu_read_unlock();
 }
@@ -928,11 +942,8 @@ __nfsd_file_cache_purge(struct net *net)
 
                nf = rhashtable_walk_next(&iter);
                while (!IS_ERR_OR_NULL(nf)) {
-                       if (!net || nf->nf_net == net) {
-                               nfsd_file_unhash(nf);
-                               nfsd_file_lru_remove(nf);
-                               list_add(&nf->nf_lru, &dispose);
-                       }
+                       if (!net || nf->nf_net == net)
+                               nfsd_file_cond_queue(nf, &dispose);
                        nf = rhashtable_walk_next(&iter);
                }
 
index b111dc7..095370e 100644 (file)
@@ -208,6 +208,18 @@ struct drm_fb_helper {
         * the smem_start field should always be cleared to zero.
         */
        bool hint_leak_smem_start;
+
+#ifdef CONFIG_FB_DEFERRED_IO
+       /**
+        * @fbdefio:
+        *
+        * Temporary storage for the driver's FB deferred I/O handler. If the
+        * driver uses the DRM fbdev emulation layer, this is set by the core
+        * to a generic deferred I/O handler if a driver is preferring to use
+        * a shadow buffer.
+        */
+       struct fb_deferred_io fbdefio;
+#endif
 };
 
 static inline struct drm_fb_helper *
index 4f8c352..6c2a2f2 100644 (file)
@@ -74,6 +74,7 @@ void drm_vma_offset_remove(struct drm_vma_offset_manager *mgr,
                           struct drm_vma_offset_node *node);
 
 int drm_vma_node_allow(struct drm_vma_offset_node *node, struct drm_file *tag);
+int drm_vma_node_allow_once(struct drm_vma_offset_node *node, struct drm_file *tag);
 void drm_vma_node_revoke(struct drm_vma_offset_node *node,
                         struct drm_file *tag);
 bool drm_vma_node_is_allowed(struct drm_vma_offset_node *node,
index ddb10aa..1f68b49 100644 (file)
 #define LINUX_APPLE_GMUX_H
 
 #include <linux/acpi.h>
+#include <linux/io.h>
+#include <linux/pnp.h>
 
 #define GMUX_ACPI_HID "APP000B"
 
+/*
+ * gmux port offsets. Many of these are not yet used, but may be in the
+ * future, and it's useful to have them documented here anyhow.
+ */
+#define GMUX_PORT_VERSION_MAJOR                0x04
+#define GMUX_PORT_VERSION_MINOR                0x05
+#define GMUX_PORT_VERSION_RELEASE      0x06
+#define GMUX_PORT_SWITCH_DISPLAY       0x10
+#define GMUX_PORT_SWITCH_GET_DISPLAY   0x11
+#define GMUX_PORT_INTERRUPT_ENABLE     0x14
+#define GMUX_PORT_INTERRUPT_STATUS     0x16
+#define GMUX_PORT_SWITCH_DDC           0x28
+#define GMUX_PORT_SWITCH_EXTERNAL      0x40
+#define GMUX_PORT_SWITCH_GET_EXTERNAL  0x41
+#define GMUX_PORT_DISCRETE_POWER       0x50
+#define GMUX_PORT_MAX_BRIGHTNESS       0x70
+#define GMUX_PORT_BRIGHTNESS           0x74
+#define GMUX_PORT_VALUE                        0xc2
+#define GMUX_PORT_READ                 0xd0
+#define GMUX_PORT_WRITE                        0xd4
+
+#define GMUX_MIN_IO_LEN                        (GMUX_PORT_BRIGHTNESS + 4)
+
 #if IS_ENABLED(CONFIG_APPLE_GMUX)
+static inline bool apple_gmux_is_indexed(unsigned long iostart)
+{
+       u16 val;
+
+       outb(0xaa, iostart + 0xcc);
+       outb(0x55, iostart + 0xcd);
+       outb(0x00, iostart + 0xce);
+
+       val = inb(iostart + 0xcc) | (inb(iostart + 0xcd) << 8);
+       if (val == 0x55aa)
+               return true;
+
+       return false;
+}
 
 /**
- * apple_gmux_present() - detect if gmux is built into the machine
+ * apple_gmux_detect() - detect if gmux is built into the machine
+ *
+ * @pnp_dev:     Device to probe or NULL to use the first matching device
+ * @indexed_ret: Returns (by reference) if the gmux is indexed or not
+ *
+ * Detect if a supported gmux device is present by actually probing it.
+ * This avoids the false positives returned on some models by
+ * apple_gmux_present().
+ *
+ * Return: %true if a supported gmux ACPI device is detected and the kernel
+ * was configured with CONFIG_APPLE_GMUX, %false otherwise.
+ */
+static inline bool apple_gmux_detect(struct pnp_dev *pnp_dev, bool *indexed_ret)
+{
+       u8 ver_major, ver_minor, ver_release;
+       struct device *dev = NULL;
+       struct acpi_device *adev;
+       struct resource *res;
+       bool indexed = false;
+       bool ret = false;
+
+       if (!pnp_dev) {
+               adev = acpi_dev_get_first_match_dev(GMUX_ACPI_HID, NULL, -1);
+               if (!adev)
+                       return false;
+
+               dev = get_device(acpi_get_first_physical_node(adev));
+               acpi_dev_put(adev);
+               if (!dev)
+                       return false;
+
+               pnp_dev = to_pnp_dev(dev);
+       }
+
+       res = pnp_get_resource(pnp_dev, IORESOURCE_IO, 0);
+       if (!res || resource_size(res) < GMUX_MIN_IO_LEN)
+               goto out;
+
+       /*
+        * Invalid version information may indicate either that the gmux
+        * device isn't present or that it's a new one that uses indexed io.
+        */
+       ver_major = inb(res->start + GMUX_PORT_VERSION_MAJOR);
+       ver_minor = inb(res->start + GMUX_PORT_VERSION_MINOR);
+       ver_release = inb(res->start + GMUX_PORT_VERSION_RELEASE);
+       if (ver_major == 0xff && ver_minor == 0xff && ver_release == 0xff) {
+               indexed = apple_gmux_is_indexed(res->start);
+               if (!indexed)
+                       goto out;
+       }
+
+       if (indexed_ret)
+               *indexed_ret = indexed;
+
+       ret = true;
+out:
+       put_device(dev);
+       return ret;
+}
+
+/**
+ * apple_gmux_present() - check if gmux ACPI device is present
  *
  * Drivers may use this to activate quirks specific to dual GPU MacBook Pros
  * and Mac Pros, e.g. for deferred probing, runtime pm and backlight.
  *
- * Return: %true if gmux is present and the kernel was configured
+ * Return: %true if gmux ACPI device is present and the kernel was configured
  * with CONFIG_APPLE_GMUX, %false otherwise.
  */
 static inline bool apple_gmux_present(void)
@@ -34,6 +134,11 @@ static inline bool apple_gmux_present(void)
        return false;
 }
 
+static inline bool apple_gmux_detect(struct pnp_dev *pnp_dev, bool *indexed_ret)
+{
+       return false;
+}
+
 #endif /* !CONFIG_APPLE_GMUX */
 
 #endif /* LINUX_APPLE_GMUX_H */
index b3ba046..56189e4 100644 (file)
@@ -336,9 +336,12 @@ struct gdma_queue_spec {
        };
 };
 
+#define MANA_IRQ_NAME_SZ 32
+
 struct gdma_irq_context {
        void (*handler)(void *arg);
        void *arg;
+       char name[MANA_IRQ_NAME_SZ];
 };
 
 struct gdma_context {
index 695eebc..e39fb07 100644 (file)
@@ -422,6 +422,8 @@ extern int iscsi_host_get_max_scsi_cmds(struct Scsi_Host *shost,
 extern struct iscsi_cls_session *
 iscsi_session_setup(struct iscsi_transport *, struct Scsi_Host *shost,
                    uint16_t, int, int, uint32_t, unsigned int);
+void iscsi_session_remove(struct iscsi_cls_session *cls_session);
+void iscsi_session_free(struct iscsi_cls_session *cls_session);
 extern void iscsi_session_teardown(struct iscsi_cls_session *);
 extern void iscsi_session_recovery_timedout(struct iscsi_cls_session *);
 extern int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
index c742469..2d6f80d 100644 (file)
@@ -15,8 +15,7 @@ enum sctp_conntrack {
        SCTP_CONNTRACK_SHUTDOWN_RECD,
        SCTP_CONNTRACK_SHUTDOWN_ACK_SENT,
        SCTP_CONNTRACK_HEARTBEAT_SENT,
-       SCTP_CONNTRACK_HEARTBEAT_ACKED,
-       SCTP_CONNTRACK_DATA_SENT,
+       SCTP_CONNTRACK_HEARTBEAT_ACKED, /* no longer used */
        SCTP_CONNTRACK_MAX
 };
 
index 94e7403..aa805e6 100644 (file)
@@ -94,8 +94,7 @@ enum ctattr_timeout_sctp {
        CTA_TIMEOUT_SCTP_SHUTDOWN_RECD,
        CTA_TIMEOUT_SCTP_SHUTDOWN_ACK_SENT,
        CTA_TIMEOUT_SCTP_HEARTBEAT_SENT,
-       CTA_TIMEOUT_SCTP_HEARTBEAT_ACKED,
-       CTA_TIMEOUT_SCTP_DATA_SENT,
+       CTA_TIMEOUT_SCTP_HEARTBEAT_ACKED, /* no longer used */
        __CTA_TIMEOUT_SCTP_MAX
 };
 #define CTA_TIMEOUT_SCTP_MAX (__CTA_TIMEOUT_SCTP_MAX - 1)
index 5cf81df..727084c 100644 (file)
@@ -808,6 +808,7 @@ struct ufs_hba_monitor {
  * @urgent_bkops_lvl: keeps track of urgent bkops level for device
  * @is_urgent_bkops_lvl_checked: keeps track if the urgent bkops level for
  *  device is known or not.
+ * @wb_mutex: used to serialize devfreq and sysfs write booster toggling
  * @clk_scaling_lock: used to serialize device commands and clock scaling
  * @desc_size: descriptor sizes reported by device
  * @scsi_block_reqs_cnt: reference counting for scsi block requests
@@ -951,6 +952,7 @@ struct ufs_hba {
        enum bkops_status urgent_bkops_lvl;
        bool is_urgent_bkops_lvl_checked;
 
+       struct mutex wb_mutex;
        struct rw_semaphore clk_scaling_lock;
        unsigned char desc_size[QUERY_DESC_IDN_MAX];
        atomic_t scsi_block_reqs_cnt;
index 48568a0..4ac3fe4 100644 (file)
@@ -2393,7 +2393,8 @@ static bool finished_loading(const char *name)
        sched_annotate_sleep();
        mutex_lock(&module_mutex);
        mod = find_module_all(name, strlen(name), true);
-       ret = !mod || mod->state == MODULE_STATE_LIVE;
+       ret = !mod || mod->state == MODULE_STATE_LIVE
+               || mod->state == MODULE_STATE_GOING;
        mutex_unlock(&module_mutex);
 
        return ret;
@@ -2569,20 +2570,35 @@ static int add_unformed_module(struct module *mod)
 
        mod->state = MODULE_STATE_UNFORMED;
 
-again:
        mutex_lock(&module_mutex);
        old = find_module_all(mod->name, strlen(mod->name), true);
        if (old != NULL) {
-               if (old->state != MODULE_STATE_LIVE) {
+               if (old->state == MODULE_STATE_COMING
+                   || old->state == MODULE_STATE_UNFORMED) {
                        /* Wait in case it fails to load. */
                        mutex_unlock(&module_mutex);
                        err = wait_event_interruptible(module_wq,
                                               finished_loading(mod->name));
                        if (err)
                                goto out_unlocked;
-                       goto again;
+
+                       /* The module might have gone in the meantime. */
+                       mutex_lock(&module_mutex);
+                       old = find_module_all(mod->name, strlen(mod->name),
+                                             true);
                }
-               err = -EEXIST;
+
+               /*
+                * We are here only when the same module was being loaded. Do
+                * not try to load it again right now. It prevents long delays
+                * caused by serialized module load failures. It might happen
+                * when more devices of the same type trigger load of
+                * a particular module.
+                */
+               if (old && old->state == MODULE_STATE_LIVE)
+                       err = -EEXIST;
+               else
+                       err = -EBUSY;
                goto out;
        }
        mod_update_bounds(mod);
index bb1ee6d..e838feb 100644 (file)
@@ -8290,12 +8290,18 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
        if (retval)
                goto out_put_task;
 
+       /*
+        * With non-SMP configs, user_cpus_ptr/user_mask isn't used and
+        * alloc_user_cpus_ptr() returns NULL.
+        */
        user_mask = alloc_user_cpus_ptr(NUMA_NO_NODE);
-       if (IS_ENABLED(CONFIG_SMP) && !user_mask) {
+       if (user_mask) {
+               cpumask_copy(user_mask, in_mask);
+       } else if (IS_ENABLED(CONFIG_SMP)) {
                retval = -ENOMEM;
                goto out_put_task;
        }
-       cpumask_copy(user_mask, in_mask);
+
        ac = (struct affinity_context){
                .new_mask  = in_mask,
                .user_mask = user_mask,
index c36aa54..0f87369 100644 (file)
@@ -7229,10 +7229,10 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
        eenv_task_busy_time(&eenv, p, prev_cpu);
 
        for (; pd; pd = pd->next) {
+               unsigned long util_min = p_util_min, util_max = p_util_max;
                unsigned long cpu_cap, cpu_thermal_cap, util;
                unsigned long cur_delta, max_spare_cap = 0;
                unsigned long rq_util_min, rq_util_max;
-               unsigned long util_min, util_max;
                unsigned long prev_spare_cap = 0;
                int max_spare_cap_cpu = -1;
                unsigned long base_energy;
@@ -7251,6 +7251,8 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
                eenv.pd_cap = 0;
 
                for_each_cpu(cpu, cpus) {
+                       struct rq *rq = cpu_rq(cpu);
+
                        eenv.pd_cap += cpu_thermal_cap;
 
                        if (!cpumask_test_cpu(cpu, sched_domain_span(sd)))
@@ -7269,24 +7271,19 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
                         * much capacity we can get out of the CPU; this is
                         * aligned with sched_cpu_util().
                         */
-                       if (uclamp_is_used()) {
-                               if (uclamp_rq_is_idle(cpu_rq(cpu))) {
-                                       util_min = p_util_min;
-                                       util_max = p_util_max;
-                               } else {
-                                       /*
-                                        * Open code uclamp_rq_util_with() except for
-                                        * the clamp() part. Ie: apply max aggregation
-                                        * only. util_fits_cpu() logic requires to
-                                        * operate on non clamped util but must use the
-                                        * max-aggregated uclamp_{min, max}.
-                                        */
-                                       rq_util_min = uclamp_rq_get(cpu_rq(cpu), UCLAMP_MIN);
-                                       rq_util_max = uclamp_rq_get(cpu_rq(cpu), UCLAMP_MAX);
-
-                                       util_min = max(rq_util_min, p_util_min);
-                                       util_max = max(rq_util_max, p_util_max);
-                               }
+                       if (uclamp_is_used() && !uclamp_rq_is_idle(rq)) {
+                               /*
+                                * Open code uclamp_rq_util_with() except for
+                                * the clamp() part. Ie: apply max aggregation
+                                * only. util_fits_cpu() logic requires to
+                                * operate on non clamped util but must use the
+                                * max-aggregated uclamp_{min, max}.
+                                */
+                               rq_util_min = uclamp_rq_get(rq, UCLAMP_MIN);
+                               rq_util_max = uclamp_rq_get(rq, UCLAMP_MAX);
+
+                               util_min = max(rq_util_min, p_util_min);
+                               util_max = max(rq_util_max, p_util_max);
                        }
                        if (!util_fits_cpu(util, util_min, util_max, cpu))
                                continue;
@@ -8871,16 +8868,23 @@ static void update_cpu_capacity(struct sched_domain *sd, int cpu)
         *   * Thermal pressure will impact all cpus in this perf domain
         *     equally.
         */
-       if (static_branch_unlikely(&sched_asym_cpucapacity)) {
+       if (sched_energy_enabled()) {
                unsigned long inv_cap = capacity_orig - thermal_load_avg(rq);
-               struct perf_domain *pd = rcu_dereference(rq->rd->pd);
+               struct perf_domain *pd;
 
+               rcu_read_lock();
+
+               pd = rcu_dereference(rq->rd->pd);
                rq->cpu_capacity_inverted = 0;
 
                for (; pd; pd = pd->next) {
                        struct cpumask *pd_span = perf_domain_span(pd);
                        unsigned long pd_cap_orig, pd_cap;
 
+                       /* We can't be inverted against our own pd */
+                       if (cpumask_test_cpu(cpu_of(rq), pd_span))
+                               continue;
+
                        cpu = cpumask_any(pd_span);
                        pd_cap_orig = arch_scale_cpu_capacity(cpu);
 
@@ -8905,6 +8909,8 @@ static void update_cpu_capacity(struct sched_domain *sd, int cpu)
                                break;
                        }
                }
+
+               rcu_read_unlock();
        }
 
        trace_sched_cpu_capacity_tp(rq);
index 9055e8b..489e15b 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/kernel.h>
 #include <linux/errno.h>
 #include <linux/jiffies.h>
+#include <linux/nospec.h>
 #include <linux/skbuff.h>
 #include <linux/string.h>
 #include <linux/types.h>
@@ -381,6 +382,7 @@ static int validate_nla(const struct nlattr *nla, int maxtype,
        if (type <= 0 || type > maxtype)
                return 0;
 
+       type = array_index_nospec(type, maxtype + 1);
        pt = &policy[type];
 
        BUG_ON(pt->type > NLA_TYPE_MAX);
@@ -596,6 +598,7 @@ static int __nla_validate_parse(const struct nlattr *head, int len, int maxtype,
                        }
                        continue;
                }
+               type = array_index_nospec(type, maxtype + 1);
                if (policy) {
                        int err = validate_nla(nla, maxtype, policy,
                                               validate, extack, depth);
index 5581d22..078a0a4 100644 (file)
@@ -137,12 +137,12 @@ static int ops_init(const struct pernet_operations *ops, struct net *net)
                return 0;
 
        if (ops->id && ops->size) {
-cleanup:
                ng = rcu_dereference_protected(net->gen,
                                               lockdep_is_held(&pernet_ops_rwsem));
                ng->ptr[*ops->id] = NULL;
        }
 
+cleanup:
        kfree(data);
 
 out:
index ce9ff3c..3bb890a 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/slab.h>
 #include <linux/netlink.h>
 #include <linux/hash.h>
+#include <linux/nospec.h>
 
 #include <net/arp.h>
 #include <net/inet_dscp.h>
@@ -1022,6 +1023,7 @@ bool fib_metrics_match(struct fib_config *cfg, struct fib_info *fi)
                if (type > RTAX_MAX)
                        return false;
 
+               type = array_index_nospec(type, RTAX_MAX + 1);
                if (type == RTAX_CC_ALGO) {
                        char tmp[TCP_CA_NAME_MAX];
                        bool ecn_ca = false;
index 7fcfdfd..0e3ee15 100644 (file)
@@ -1,5 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 #include <linux/netlink.h>
+#include <linux/nospec.h>
 #include <linux/rtnetlink.h>
 #include <linux/types.h>
 #include <net/ip.h>
@@ -25,6 +26,7 @@ static int ip_metrics_convert(struct net *net, struct nlattr *fc_mx,
                        return -EINVAL;
                }
 
+               type = array_index_nospec(type, RTAX_MAX + 1);
                if (type == RTAX_CC_ALGO) {
                        char tmp[TCP_CA_NAME_MAX];
 
index 60fd91b..c314fdd 100644 (file)
@@ -547,7 +547,20 @@ int ip6_forward(struct sk_buff *skb)
            pneigh_lookup(&nd_tbl, net, &hdr->daddr, skb->dev, 0)) {
                int proxied = ip6_forward_proxy_check(skb);
                if (proxied > 0) {
-                       hdr->hop_limit--;
+                       /* It's tempting to decrease the hop limit
+                        * here by 1, as we do at the end of the
+                        * function too.
+                        *
+                        * But that would be incorrect, as proxying is
+                        * not forwarding.  The ip6_input function
+                        * will handle this packet locally, and it
+                        * depends on the hop limit being unchanged.
+                        *
+                        * One example is the NDP hop limit, that
+                        * always has to stay 255, but other would be
+                        * similar checks around RA packets, where the
+                        * user can even change the desired limit.
+                        */
                        return ip6_input(skb);
                } else if (proxied < 0) {
                        __IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS);
index fc9e728..45bbe3e 100644 (file)
@@ -544,9 +544,6 @@ static int mctp_sk_init(struct sock *sk)
 
 static void mctp_sk_close(struct sock *sk, long timeout)
 {
-       struct mctp_sock *msk = container_of(sk, struct mctp_sock, sk);
-
-       del_timer_sync(&msk->key_expiry);
        sk_common_release(sk);
 }
 
@@ -580,7 +577,14 @@ static void mctp_sk_unhash(struct sock *sk)
                spin_lock_irqsave(&key->lock, fl2);
                __mctp_key_remove(key, net, fl2, MCTP_TRACE_KEY_CLOSED);
        }
+       sock_set_flag(sk, SOCK_DEAD);
        spin_unlock_irqrestore(&net->mctp.keys_lock, flags);
+
+       /* Since there are no more tag allocations (we have removed all of the
+        * keys), stop any pending expiry events. the timer cannot be re-queued
+        * as the sk is no longer observable
+        */
+       del_timer_sync(&msk->key_expiry);
 }
 
 static struct proto mctp_proto = {
index f9a80b8..f51a05e 100644 (file)
@@ -147,6 +147,7 @@ static struct mctp_sk_key *mctp_key_alloc(struct mctp_sock *msk,
        key->valid = true;
        spin_lock_init(&key->lock);
        refcount_set(&key->refs, 1);
+       sock_hold(key->sk);
 
        return key;
 }
@@ -165,6 +166,7 @@ void mctp_key_unref(struct mctp_sk_key *key)
        mctp_dev_release_key(key->dev, key);
        spin_unlock_irqrestore(&key->lock, flags);
 
+       sock_put(key->sk);
        kfree(key);
 }
 
@@ -177,6 +179,11 @@ static int mctp_key_add(struct mctp_sk_key *key, struct mctp_sock *msk)
 
        spin_lock_irqsave(&net->mctp.keys_lock, flags);
 
+       if (sock_flag(&msk->sk, SOCK_DEAD)) {
+               rc = -EINVAL;
+               goto out_unlock;
+       }
+
        hlist_for_each_entry(tmp, &net->mctp.keys, hlist) {
                if (mctp_key_match(tmp, key->local_addr, key->peer_addr,
                                   key->tag)) {
@@ -198,6 +205,7 @@ static int mctp_key_add(struct mctp_sk_key *key, struct mctp_sock *msk)
                hlist_add_head(&key->sklist, &msk->keys);
        }
 
+out_unlock:
        spin_unlock_irqrestore(&net->mctp.keys_lock, flags);
 
        return rc;
@@ -315,8 +323,8 @@ static int mctp_frag_queue(struct mctp_sk_key *key, struct sk_buff *skb)
 
 static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb)
 {
+       struct mctp_sk_key *key, *any_key = NULL;
        struct net *net = dev_net(skb->dev);
-       struct mctp_sk_key *key;
        struct mctp_sock *msk;
        struct mctp_hdr *mh;
        unsigned long f;
@@ -361,13 +369,11 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb)
                         * key for reassembly - we'll create a more specific
                         * one for future packets if required (ie, !EOM).
                         */
-                       key = mctp_lookup_key(net, skb, MCTP_ADDR_ANY, &f);
-                       if (key) {
-                               msk = container_of(key->sk,
+                       any_key = mctp_lookup_key(net, skb, MCTP_ADDR_ANY, &f);
+                       if (any_key) {
+                               msk = container_of(any_key->sk,
                                                   struct mctp_sock, sk);
-                               spin_unlock_irqrestore(&key->lock, f);
-                               mctp_key_unref(key);
-                               key = NULL;
+                               spin_unlock_irqrestore(&any_key->lock, f);
                        }
                }
 
@@ -419,14 +425,14 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb)
                         * this function.
                         */
                        rc = mctp_key_add(key, msk);
-                       if (rc) {
-                               kfree(key);
-                       } else {
+                       if (!rc)
                                trace_mctp_key_acquire(key);
 
-                               /* we don't need to release key->lock on exit */
-                               mctp_key_unref(key);
-                       }
+                       /* we don't need to release key->lock on exit, so
+                        * clean up here and suppress the unlock via
+                        * setting to NULL
+                        */
+                       mctp_key_unref(key);
                        key = NULL;
 
                } else {
@@ -473,6 +479,8 @@ out_unlock:
                spin_unlock_irqrestore(&key->lock, f);
                mctp_key_unref(key);
        }
+       if (any_key)
+               mctp_key_unref(any_key);
 out:
        if (rc)
                kfree_skb(skb);
index d88b92a..945dd40 100644 (file)
 #include <net/netfilter/nf_conntrack_ecache.h>
 #include <net/netfilter/nf_conntrack_timeout.h>
 
-/* FIXME: Examine ipfilter's timeouts and conntrack transitions more
-   closely.  They're more complex. --RR
-
-   And so for me for SCTP :D -Kiran */
-
 static const char *const sctp_conntrack_names[] = {
-       "NONE",
-       "CLOSED",
-       "COOKIE_WAIT",
-       "COOKIE_ECHOED",
-       "ESTABLISHED",
-       "SHUTDOWN_SENT",
-       "SHUTDOWN_RECD",
-       "SHUTDOWN_ACK_SENT",
-       "HEARTBEAT_SENT",
-       "HEARTBEAT_ACKED",
+       [SCTP_CONNTRACK_NONE]                   = "NONE",
+       [SCTP_CONNTRACK_CLOSED]                 = "CLOSED",
+       [SCTP_CONNTRACK_COOKIE_WAIT]            = "COOKIE_WAIT",
+       [SCTP_CONNTRACK_COOKIE_ECHOED]          = "COOKIE_ECHOED",
+       [SCTP_CONNTRACK_ESTABLISHED]            = "ESTABLISHED",
+       [SCTP_CONNTRACK_SHUTDOWN_SENT]          = "SHUTDOWN_SENT",
+       [SCTP_CONNTRACK_SHUTDOWN_RECD]          = "SHUTDOWN_RECD",
+       [SCTP_CONNTRACK_SHUTDOWN_ACK_SENT]      = "SHUTDOWN_ACK_SENT",
+       [SCTP_CONNTRACK_HEARTBEAT_SENT]         = "HEARTBEAT_SENT",
 };
 
 #define SECS  * HZ
@@ -54,13 +48,11 @@ static const unsigned int sctp_timeouts[SCTP_CONNTRACK_MAX] = {
        [SCTP_CONNTRACK_CLOSED]                 = 10 SECS,
        [SCTP_CONNTRACK_COOKIE_WAIT]            = 3 SECS,
        [SCTP_CONNTRACK_COOKIE_ECHOED]          = 3 SECS,
-       [SCTP_CONNTRACK_ESTABLISHED]            = 5 DAYS,
+       [SCTP_CONNTRACK_ESTABLISHED]            = 210 SECS,
        [SCTP_CONNTRACK_SHUTDOWN_SENT]          = 300 SECS / 1000,
        [SCTP_CONNTRACK_SHUTDOWN_RECD]          = 300 SECS / 1000,
        [SCTP_CONNTRACK_SHUTDOWN_ACK_SENT]      = 3 SECS,
        [SCTP_CONNTRACK_HEARTBEAT_SENT]         = 30 SECS,
-       [SCTP_CONNTRACK_HEARTBEAT_ACKED]        = 210 SECS,
-       [SCTP_CONNTRACK_DATA_SENT]              = 30 SECS,
 };
 
 #define        SCTP_FLAG_HEARTBEAT_VTAG_FAILED 1
@@ -74,8 +66,6 @@ static const unsigned int sctp_timeouts[SCTP_CONNTRACK_MAX] = {
 #define        sSR SCTP_CONNTRACK_SHUTDOWN_RECD
 #define        sSA SCTP_CONNTRACK_SHUTDOWN_ACK_SENT
 #define        sHS SCTP_CONNTRACK_HEARTBEAT_SENT
-#define        sHA SCTP_CONNTRACK_HEARTBEAT_ACKED
-#define        sDS SCTP_CONNTRACK_DATA_SENT
 #define        sIV SCTP_CONNTRACK_MAX
 
 /*
@@ -98,10 +88,6 @@ SHUTDOWN_ACK_SENT - We have seen a SHUTDOWN_ACK chunk in the direction opposite
 CLOSED            - We have seen a SHUTDOWN_COMPLETE chunk in the direction of
                    the SHUTDOWN chunk. Connection is closed.
 HEARTBEAT_SENT    - We have seen a HEARTBEAT in a new flow.
-HEARTBEAT_ACKED   - We have seen a HEARTBEAT-ACK/DATA/SACK in the direction
-                   opposite to that of the HEARTBEAT/DATA chunk. Secondary connection
-                   is established.
-DATA_SENT         - We have seen a DATA/SACK in a new flow.
 */
 
 /* TODO
@@ -115,38 +101,36 @@ cookie echoed to closed.
 */
 
 /* SCTP conntrack state transitions */
-static const u8 sctp_conntracks[2][12][SCTP_CONNTRACK_MAX] = {
+static const u8 sctp_conntracks[2][11][SCTP_CONNTRACK_MAX] = {
        {
 /*     ORIGINAL        */
-/*                  sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA, sDS */
-/* init         */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCW, sHA, sCW},
-/* init_ack     */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL, sHA, sCL},
-/* abort        */ {sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL},
-/* shutdown     */ {sCL, sCL, sCW, sCE, sSS, sSS, sSR, sSA, sCL, sSS, sCL},
-/* shutdown_ack */ {sSA, sCL, sCW, sCE, sES, sSA, sSA, sSA, sSA, sHA, sSA},
-/* error        */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL, sHA, sCL},/* Can't have Stale cookie*/
-/* cookie_echo  */ {sCL, sCL, sCE, sCE, sES, sSS, sSR, sSA, sCL, sHA, sCL},/* 5.2.4 - Big TODO */
-/* cookie_ack   */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL, sHA, sCL},/* Can't come in orig dir */
-/* shutdown_comp*/ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sCL, sCL, sHA, sCL},
-/* heartbeat    */ {sHS, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA, sDS},
-/* heartbeat_ack*/ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA, sDS},
-/* data/sack    */ {sDS, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA, sDS}
+/*                  sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS */
+/* init         */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCW},
+/* init_ack     */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL},
+/* abort        */ {sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL},
+/* shutdown     */ {sCL, sCL, sCW, sCE, sSS, sSS, sSR, sSA, sCL},
+/* shutdown_ack */ {sSA, sCL, sCW, sCE, sES, sSA, sSA, sSA, sSA},
+/* error        */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL},/* Can't have Stale cookie*/
+/* cookie_echo  */ {sCL, sCL, sCE, sCE, sES, sSS, sSR, sSA, sCL},/* 5.2.4 - Big TODO */
+/* cookie_ack   */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL},/* Can't come in orig dir */
+/* shutdown_comp*/ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sCL, sCL},
+/* heartbeat    */ {sHS, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS},
+/* heartbeat_ack*/ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS},
        },
        {
 /*     REPLY   */
-/*                  sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA, sDS */
-/* init         */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sIV, sHA, sIV},/* INIT in sCL Big TODO */
-/* init_ack     */ {sIV, sCW, sCW, sCE, sES, sSS, sSR, sSA, sIV, sHA, sIV},
-/* abort        */ {sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sIV, sCL, sIV},
-/* shutdown     */ {sIV, sCL, sCW, sCE, sSR, sSS, sSR, sSA, sIV, sSR, sIV},
-/* shutdown_ack */ {sIV, sCL, sCW, sCE, sES, sSA, sSA, sSA, sIV, sHA, sIV},
-/* error        */ {sIV, sCL, sCW, sCL, sES, sSS, sSR, sSA, sIV, sHA, sIV},
-/* cookie_echo  */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sIV, sHA, sIV},/* Can't come in reply dir */
-/* cookie_ack   */ {sIV, sCL, sCW, sES, sES, sSS, sSR, sSA, sIV, sHA, sIV},
-/* shutdown_comp*/ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sCL, sIV, sHA, sIV},
-/* heartbeat    */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA, sHA},
-/* heartbeat_ack*/ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHA, sHA, sHA},
-/* data/sack    */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHA, sHA, sHA},
+/*                  sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS */
+/* init         */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sIV},/* INIT in sCL Big TODO */
+/* init_ack     */ {sIV, sCW, sCW, sCE, sES, sSS, sSR, sSA, sIV},
+/* abort        */ {sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sIV},
+/* shutdown     */ {sIV, sCL, sCW, sCE, sSR, sSS, sSR, sSA, sIV},
+/* shutdown_ack */ {sIV, sCL, sCW, sCE, sES, sSA, sSA, sSA, sIV},
+/* error        */ {sIV, sCL, sCW, sCL, sES, sSS, sSR, sSA, sIV},
+/* cookie_echo  */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sIV},/* Can't come in reply dir */
+/* cookie_ack   */ {sIV, sCL, sCW, sES, sES, sSS, sSR, sSA, sIV},
+/* shutdown_comp*/ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sCL, sIV},
+/* heartbeat    */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS},
+/* heartbeat_ack*/ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sES},
        }
 };
 
@@ -160,8 +144,8 @@ static void sctp_print_conntrack(struct seq_file *s, struct nf_conn *ct)
 
 #define for_each_sctp_chunk(skb, sch, _sch, offset, dataoff, count)    \
 for ((offset) = (dataoff) + sizeof(struct sctphdr), (count) = 0;       \
-       (offset) < (skb)->len &&                                        \
-       ((sch) = skb_header_pointer((skb), (offset), sizeof(_sch), &(_sch)));   \
+       ((sch) = skb_header_pointer((skb), (offset), sizeof(_sch), &(_sch))) && \
+       (sch)->length;  \
        (offset) += (ntohs((sch)->length) + 3) & ~3, (count)++)
 
 /* Some validity checks to make sure the chunks are fine */
@@ -258,11 +242,6 @@ static int sctp_new_state(enum ip_conntrack_dir dir,
                pr_debug("SCTP_CID_HEARTBEAT_ACK");
                i = 10;
                break;
-       case SCTP_CID_DATA:
-       case SCTP_CID_SACK:
-               pr_debug("SCTP_CID_DATA/SACK");
-               i = 11;
-               break;
        default:
                /* Other chunks like DATA or SACK do not change the state */
                pr_debug("Unknown chunk type, Will stay in %s\n",
@@ -316,9 +295,7 @@ sctp_new(struct nf_conn *ct, const struct sk_buff *skb,
                                 ih->init_tag);
 
                        ct->proto.sctp.vtag[IP_CT_DIR_REPLY] = ih->init_tag;
-               } else if (sch->type == SCTP_CID_HEARTBEAT ||
-                          sch->type == SCTP_CID_DATA ||
-                          sch->type == SCTP_CID_SACK) {
+               } else if (sch->type == SCTP_CID_HEARTBEAT) {
                        pr_debug("Setting vtag %x for secondary conntrack\n",
                                 sh->vtag);
                        ct->proto.sctp.vtag[IP_CT_DIR_ORIGINAL] = sh->vtag;
@@ -404,19 +381,19 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct,
 
                if (!sctp_new(ct, skb, sh, dataoff))
                        return -NF_ACCEPT;
-       } else {
-               /* Check the verification tag (Sec 8.5) */
-               if (!test_bit(SCTP_CID_INIT, map) &&
-                   !test_bit(SCTP_CID_SHUTDOWN_COMPLETE, map) &&
-                   !test_bit(SCTP_CID_COOKIE_ECHO, map) &&
-                   !test_bit(SCTP_CID_ABORT, map) &&
-                   !test_bit(SCTP_CID_SHUTDOWN_ACK, map) &&
-                   !test_bit(SCTP_CID_HEARTBEAT, map) &&
-                   !test_bit(SCTP_CID_HEARTBEAT_ACK, map) &&
-                   sh->vtag != ct->proto.sctp.vtag[dir]) {
-                       pr_debug("Verification tag check failed\n");
-                       goto out;
-               }
+       }
+
+       /* Check the verification tag (Sec 8.5) */
+       if (!test_bit(SCTP_CID_INIT, map) &&
+           !test_bit(SCTP_CID_SHUTDOWN_COMPLETE, map) &&
+           !test_bit(SCTP_CID_COOKIE_ECHO, map) &&
+           !test_bit(SCTP_CID_ABORT, map) &&
+           !test_bit(SCTP_CID_SHUTDOWN_ACK, map) &&
+           !test_bit(SCTP_CID_HEARTBEAT, map) &&
+           !test_bit(SCTP_CID_HEARTBEAT_ACK, map) &&
+           sh->vtag != ct->proto.sctp.vtag[dir]) {
+               pr_debug("Verification tag check failed\n");
+               goto out;
        }
 
        old_state = new_state = SCTP_CONNTRACK_NONE;
@@ -424,22 +401,29 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct,
        for_each_sctp_chunk (skb, sch, _sch, offset, dataoff, count) {
                /* Special cases of Verification tag check (Sec 8.5.1) */
                if (sch->type == SCTP_CID_INIT) {
-                       /* Sec 8.5.1 (A) */
+                       /* (A) vtag MUST be zero */
                        if (sh->vtag != 0)
                                goto out_unlock;
                } else if (sch->type == SCTP_CID_ABORT) {
-                       /* Sec 8.5.1 (B) */
-                       if (sh->vtag != ct->proto.sctp.vtag[dir] &&
-                           sh->vtag != ct->proto.sctp.vtag[!dir])
+                       /* (B) vtag MUST match own vtag if T flag is unset OR
+                        * MUST match peer's vtag if T flag is set
+                        */
+                       if ((!(sch->flags & SCTP_CHUNK_FLAG_T) &&
+                            sh->vtag != ct->proto.sctp.vtag[dir]) ||
+                           ((sch->flags & SCTP_CHUNK_FLAG_T) &&
+                            sh->vtag != ct->proto.sctp.vtag[!dir]))
                                goto out_unlock;
                } else if (sch->type == SCTP_CID_SHUTDOWN_COMPLETE) {
-                       /* Sec 8.5.1 (C) */
-                       if (sh->vtag != ct->proto.sctp.vtag[dir] &&
-                           sh->vtag != ct->proto.sctp.vtag[!dir] &&
-                           sch->flags & SCTP_CHUNK_FLAG_T)
+                       /* (C) vtag MUST match own vtag if T flag is unset OR
+                        * MUST match peer's vtag if T flag is set
+                        */
+                       if ((!(sch->flags & SCTP_CHUNK_FLAG_T) &&
+                            sh->vtag != ct->proto.sctp.vtag[dir]) ||
+                           ((sch->flags & SCTP_CHUNK_FLAG_T) &&
+                            sh->vtag != ct->proto.sctp.vtag[!dir]))
                                goto out_unlock;
                } else if (sch->type == SCTP_CID_COOKIE_ECHO) {
-                       /* Sec 8.5.1 (D) */
+                       /* (D) vtag must be same as init_vtag as found in INIT_ACK */
                        if (sh->vtag != ct->proto.sctp.vtag[dir])
                                goto out_unlock;
                } else if (sch->type == SCTP_CID_HEARTBEAT) {
@@ -476,11 +460,6 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct,
                        } else if (ct->proto.sctp.flags & SCTP_FLAG_HEARTBEAT_VTAG_FAILED) {
                                ct->proto.sctp.flags &= ~SCTP_FLAG_HEARTBEAT_VTAG_FAILED;
                        }
-               } else if (sch->type == SCTP_CID_DATA || sch->type == SCTP_CID_SACK) {
-                       if (ct->proto.sctp.vtag[dir] == 0) {
-                               pr_debug("Setting vtag %x for dir %d\n", sh->vtag, dir);
-                               ct->proto.sctp.vtag[dir] = sh->vtag;
-                       }
                }
 
                old_state = ct->proto.sctp.state;
@@ -518,8 +497,12 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct,
                }
 
                ct->proto.sctp.state = new_state;
-               if (old_state != new_state)
+               if (old_state != new_state) {
                        nf_conntrack_event_cache(IPCT_PROTOINFO, ct);
+                       if (new_state == SCTP_CONNTRACK_ESTABLISHED &&
+                           !test_and_set_bit(IPS_ASSURED_BIT, &ct->status))
+                               nf_conntrack_event_cache(IPCT_ASSURED, ct);
+               }
        }
        spin_unlock_bh(&ct->lock);
 
@@ -533,14 +516,6 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct,
 
        nf_ct_refresh_acct(ct, ctinfo, skb, timeouts[new_state]);
 
-       if (old_state == SCTP_CONNTRACK_COOKIE_ECHOED &&
-           dir == IP_CT_DIR_REPLY &&
-           new_state == SCTP_CONNTRACK_ESTABLISHED) {
-               pr_debug("Setting assured bit\n");
-               set_bit(IPS_ASSURED_BIT, &ct->status);
-               nf_conntrack_event_cache(IPCT_ASSURED, ct);
-       }
-
        return NF_ACCEPT;
 
 out_unlock:
@@ -701,7 +676,6 @@ sctp_timeout_nla_policy[CTA_TIMEOUT_SCTP_MAX+1] = {
        [CTA_TIMEOUT_SCTP_SHUTDOWN_ACK_SENT]    = { .type = NLA_U32 },
        [CTA_TIMEOUT_SCTP_HEARTBEAT_SENT]       = { .type = NLA_U32 },
        [CTA_TIMEOUT_SCTP_HEARTBEAT_ACKED]      = { .type = NLA_U32 },
-       [CTA_TIMEOUT_SCTP_DATA_SENT]            = { .type = NLA_U32 },
 };
 #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
 
index 0250725..460294b 100644 (file)
@@ -601,8 +601,6 @@ enum nf_ct_sysctl_index {
        NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_SHUTDOWN_RECD,
        NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_SHUTDOWN_ACK_SENT,
        NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_HEARTBEAT_SENT,
-       NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_HEARTBEAT_ACKED,
-       NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_DATA_SENT,
 #endif
 #ifdef CONFIG_NF_CT_PROTO_DCCP
        NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_REQUEST,
@@ -887,18 +885,6 @@ static struct ctl_table nf_ct_sysctl_table[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec_jiffies,
        },
-       [NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_HEARTBEAT_ACKED] = {
-               .procname       = "nf_conntrack_sctp_timeout_heartbeat_acked",
-               .maxlen         = sizeof(unsigned int),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec_jiffies,
-       },
-       [NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_DATA_SENT] = {
-               .procname       = "nf_conntrack_sctp_timeout_data_sent",
-               .maxlen         = sizeof(unsigned int),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec_jiffies,
-       },
 #endif
 #ifdef CONFIG_NF_CT_PROTO_DCCP
        [NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_REQUEST] = {
@@ -1042,8 +1028,6 @@ static void nf_conntrack_standalone_init_sctp_sysctl(struct net *net,
        XASSIGN(SHUTDOWN_RECD, sn);
        XASSIGN(SHUTDOWN_ACK_SENT, sn);
        XASSIGN(HEARTBEAT_SENT, sn);
-       XASSIGN(HEARTBEAT_ACKED, sn);
-       XASSIGN(DATA_SENT, sn);
 #undef XASSIGN
 #endif
 }
index 7325bee..19ea4d3 100644 (file)
@@ -38,10 +38,12 @@ static bool nft_rbtree_interval_start(const struct nft_rbtree_elem *rbe)
        return !nft_rbtree_interval_end(rbe);
 }
 
-static bool nft_rbtree_equal(const struct nft_set *set, const void *this,
-                            const struct nft_rbtree_elem *interval)
+static int nft_rbtree_cmp(const struct nft_set *set,
+                         const struct nft_rbtree_elem *e1,
+                         const struct nft_rbtree_elem *e2)
 {
-       return memcmp(this, nft_set_ext_key(&interval->ext), set->klen) == 0;
+       return memcmp(nft_set_ext_key(&e1->ext), nft_set_ext_key(&e2->ext),
+                     set->klen);
 }
 
 static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
@@ -52,7 +54,6 @@ static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set
        const struct nft_rbtree_elem *rbe, *interval = NULL;
        u8 genmask = nft_genmask_cur(net);
        const struct rb_node *parent;
-       const void *this;
        int d;
 
        parent = rcu_dereference_raw(priv->root.rb_node);
@@ -62,12 +63,11 @@ static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set
 
                rbe = rb_entry(parent, struct nft_rbtree_elem, node);
 
-               this = nft_set_ext_key(&rbe->ext);
-               d = memcmp(this, key, set->klen);
+               d = memcmp(nft_set_ext_key(&rbe->ext), key, set->klen);
                if (d < 0) {
                        parent = rcu_dereference_raw(parent->rb_left);
                        if (interval &&
-                           nft_rbtree_equal(set, this, interval) &&
+                           !nft_rbtree_cmp(set, rbe, interval) &&
                            nft_rbtree_interval_end(rbe) &&
                            nft_rbtree_interval_start(interval))
                                continue;
@@ -215,154 +215,216 @@ static void *nft_rbtree_get(const struct net *net, const struct nft_set *set,
        return rbe;
 }
 
+static int nft_rbtree_gc_elem(const struct nft_set *__set,
+                             struct nft_rbtree *priv,
+                             struct nft_rbtree_elem *rbe)
+{
+       struct nft_set *set = (struct nft_set *)__set;
+       struct rb_node *prev = rb_prev(&rbe->node);
+       struct nft_rbtree_elem *rbe_prev;
+       struct nft_set_gc_batch *gcb;
+
+       gcb = nft_set_gc_batch_check(set, NULL, GFP_ATOMIC);
+       if (!gcb)
+               return -ENOMEM;
+
+       /* search for expired end interval coming before this element. */
+       do {
+               rbe_prev = rb_entry(prev, struct nft_rbtree_elem, node);
+               if (nft_rbtree_interval_end(rbe_prev))
+                       break;
+
+               prev = rb_prev(prev);
+       } while (prev != NULL);
+
+       rb_erase(&rbe_prev->node, &priv->root);
+       rb_erase(&rbe->node, &priv->root);
+       atomic_sub(2, &set->nelems);
+
+       nft_set_gc_batch_add(gcb, rbe);
+       nft_set_gc_batch_complete(gcb);
+
+       return 0;
+}
+
+static bool nft_rbtree_update_first(const struct nft_set *set,
+                                   struct nft_rbtree_elem *rbe,
+                                   struct rb_node *first)
+{
+       struct nft_rbtree_elem *first_elem;
+
+       first_elem = rb_entry(first, struct nft_rbtree_elem, node);
+       /* this element is closest to where the new element is to be inserted:
+        * update the first element for the node list path.
+        */
+       if (nft_rbtree_cmp(set, rbe, first_elem) < 0)
+               return true;
+
+       return false;
+}
+
 static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
                               struct nft_rbtree_elem *new,
                               struct nft_set_ext **ext)
 {
-       bool overlap = false, dup_end_left = false, dup_end_right = false;
+       struct nft_rbtree_elem *rbe, *rbe_le = NULL, *rbe_ge = NULL;
+       struct rb_node *node, *parent, **p, *first = NULL;
        struct nft_rbtree *priv = nft_set_priv(set);
        u8 genmask = nft_genmask_next(net);
-       struct nft_rbtree_elem *rbe;
-       struct rb_node *parent, **p;
-       int d;
+       int d, err;
 
-       /* Detect overlaps as we descend the tree. Set the flag in these cases:
-        *
-        * a1. _ _ __>|  ?_ _ __|  (insert end before existing end)
-        * a2. _ _ ___|  ?_ _ _>|  (insert end after existing end)
-        * a3. _ _ ___? >|_ _ __|  (insert start before existing end)
-        *
-        * and clear it later on, as we eventually reach the points indicated by
-        * '?' above, in the cases described below. We'll always meet these
-        * later, locally, due to tree ordering, and overlaps for the intervals
-        * that are the closest together are always evaluated last.
-        *
-        * b1. _ _ __>|  !_ _ __|  (insert end before existing start)
-        * b2. _ _ ___|  !_ _ _>|  (insert end after existing start)
-        * b3. _ _ ___! >|_ _ __|  (insert start after existing end, as a leaf)
-        *            '--' no nodes falling in this range
-        * b4.          >|_ _   !  (insert start before existing start)
-        *
-        * Case a3. resolves to b3.:
-        * - if the inserted start element is the leftmost, because the '0'
-        *   element in the tree serves as end element
-        * - otherwise, if an existing end is found immediately to the left. If
-        *   there are existing nodes in between, we need to further descend the
-        *   tree before we can conclude the new start isn't causing an overlap
-        *
-        * or to b4., which, preceded by a3., means we already traversed one or
-        * more existing intervals entirely, from the right.
-        *
-        * For a new, rightmost pair of elements, we'll hit cases b3. and b2.,
-        * in that order.
-        *
-        * The flag is also cleared in two special cases:
-        *
-        * b5. |__ _ _!|<_ _ _   (insert start right before existing end)
-        * b6. |__ _ >|!__ _ _   (insert end right after existing start)
-        *
-        * which always happen as last step and imply that no further
-        * overlapping is possible.
-        *
-        * Another special case comes from the fact that start elements matching
-        * an already existing start element are allowed: insertion is not
-        * performed but we return -EEXIST in that case, and the error will be
-        * cleared by the caller if NLM_F_EXCL is not present in the request.
-        * This way, request for insertion of an exact overlap isn't reported as
-        * error to userspace if not desired.
-        *
-        * However, if the existing start matches a pre-existing start, but the
-        * end element doesn't match the corresponding pre-existing end element,
-        * we need to report a partial overlap. This is a local condition that
-        * can be noticed without need for a tracking flag, by checking for a
-        * local duplicated end for a corresponding start, from left and right,
-        * separately.
+       /* Descend the tree to search for an existing element greater than the
+        * key value to insert that is greater than the new element. This is the
+        * first element to walk the ordered elements to find possible overlap.
         */
-
        parent = NULL;
        p = &priv->root.rb_node;
        while (*p != NULL) {
                parent = *p;
                rbe = rb_entry(parent, struct nft_rbtree_elem, node);
-               d = memcmp(nft_set_ext_key(&rbe->ext),
-                          nft_set_ext_key(&new->ext),
-                          set->klen);
+               d = nft_rbtree_cmp(set, rbe, new);
+
                if (d < 0) {
                        p = &parent->rb_left;
-
-                       if (nft_rbtree_interval_start(new)) {
-                               if (nft_rbtree_interval_end(rbe) &&
-                                   nft_set_elem_active(&rbe->ext, genmask) &&
-                                   !nft_set_elem_expired(&rbe->ext) && !*p)
-                                       overlap = false;
-                       } else {
-                               if (dup_end_left && !*p)
-                                       return -ENOTEMPTY;
-
-                               overlap = nft_rbtree_interval_end(rbe) &&
-                                         nft_set_elem_active(&rbe->ext,
-                                                             genmask) &&
-                                         !nft_set_elem_expired(&rbe->ext);
-
-                               if (overlap) {
-                                       dup_end_right = true;
-                                       continue;
-                               }
-                       }
                } else if (d > 0) {
-                       p = &parent->rb_right;
+                       if (!first ||
+                           nft_rbtree_update_first(set, rbe, first))
+                               first = &rbe->node;
 
-                       if (nft_rbtree_interval_end(new)) {
-                               if (dup_end_right && !*p)
-                                       return -ENOTEMPTY;
-
-                               overlap = nft_rbtree_interval_end(rbe) &&
-                                         nft_set_elem_active(&rbe->ext,
-                                                             genmask) &&
-                                         !nft_set_elem_expired(&rbe->ext);
-
-                               if (overlap) {
-                                       dup_end_left = true;
-                                       continue;
-                               }
-                       } else if (nft_set_elem_active(&rbe->ext, genmask) &&
-                                  !nft_set_elem_expired(&rbe->ext)) {
-                               overlap = nft_rbtree_interval_end(rbe);
-                       }
+                       p = &parent->rb_right;
                } else {
-                       if (nft_rbtree_interval_end(rbe) &&
-                           nft_rbtree_interval_start(new)) {
+                       if (nft_rbtree_interval_end(rbe))
                                p = &parent->rb_left;
-
-                               if (nft_set_elem_active(&rbe->ext, genmask) &&
-                                   !nft_set_elem_expired(&rbe->ext))
-                                       overlap = false;
-                       } else if (nft_rbtree_interval_start(rbe) &&
-                                  nft_rbtree_interval_end(new)) {
+                       else
                                p = &parent->rb_right;
+               }
+       }
+
+       if (!first)
+               first = rb_first(&priv->root);
+
+       /* Detect overlap by going through the list of valid tree nodes.
+        * Values stored in the tree are in reversed order, starting from
+        * highest to lowest value.
+        */
+       for (node = first; node != NULL; node = rb_next(node)) {
+               rbe = rb_entry(node, struct nft_rbtree_elem, node);
 
-                               if (nft_set_elem_active(&rbe->ext, genmask) &&
-                                   !nft_set_elem_expired(&rbe->ext))
-                                       overlap = false;
-                       } else if (nft_set_elem_active(&rbe->ext, genmask) &&
-                                  !nft_set_elem_expired(&rbe->ext)) {
-                               *ext = &rbe->ext;
-                               return -EEXIST;
-                       } else {
-                               overlap = false;
-                               if (nft_rbtree_interval_end(rbe))
-                                       p = &parent->rb_left;
-                               else
-                                       p = &parent->rb_right;
+               if (!nft_set_elem_active(&rbe->ext, genmask))
+                       continue;
+
+               /* perform garbage collection to avoid bogus overlap reports. */
+               if (nft_set_elem_expired(&rbe->ext)) {
+                       err = nft_rbtree_gc_elem(set, priv, rbe);
+                       if (err < 0)
+                               return err;
+
+                       continue;
+               }
+
+               d = nft_rbtree_cmp(set, rbe, new);
+               if (d == 0) {
+                       /* Matching end element: no need to look for an
+                        * overlapping greater or equal element.
+                        */
+                       if (nft_rbtree_interval_end(rbe)) {
+                               rbe_le = rbe;
+                               break;
+                       }
+
+                       /* first element that is greater or equal to key value. */
+                       if (!rbe_ge) {
+                               rbe_ge = rbe;
+                               continue;
+                       }
+
+                       /* this is a closer more or equal element, update it. */
+                       if (nft_rbtree_cmp(set, rbe_ge, new) != 0) {
+                               rbe_ge = rbe;
+                               continue;
+                       }
+
+                       /* element is equal to key value, make sure flags are
+                        * the same, an existing more or equal start element
+                        * must not be replaced by more or equal end element.
+                        */
+                       if ((nft_rbtree_interval_start(new) &&
+                            nft_rbtree_interval_start(rbe_ge)) ||
+                           (nft_rbtree_interval_end(new) &&
+                            nft_rbtree_interval_end(rbe_ge))) {
+                               rbe_ge = rbe;
+                               continue;
                        }
+               } else if (d > 0) {
+                       /* annotate element greater than the new element. */
+                       rbe_ge = rbe;
+                       continue;
+               } else if (d < 0) {
+                       /* annotate element less than the new element. */
+                       rbe_le = rbe;
+                       break;
                }
+       }
 
-               dup_end_left = dup_end_right = false;
+       /* - new start element matching existing start element: full overlap
+        *   reported as -EEXIST, cleared by caller if NLM_F_EXCL is not given.
+        */
+       if (rbe_ge && !nft_rbtree_cmp(set, new, rbe_ge) &&
+           nft_rbtree_interval_start(rbe_ge) == nft_rbtree_interval_start(new)) {
+               *ext = &rbe_ge->ext;
+               return -EEXIST;
        }
 
-       if (overlap)
+       /* - new end element matching existing end element: full overlap
+        *   reported as -EEXIST, cleared by caller if NLM_F_EXCL is not given.
+        */
+       if (rbe_le && !nft_rbtree_cmp(set, new, rbe_le) &&
+           nft_rbtree_interval_end(rbe_le) == nft_rbtree_interval_end(new)) {
+               *ext = &rbe_le->ext;
+               return -EEXIST;
+       }
+
+       /* - new start element with existing closest, less or equal key value
+        *   being a start element: partial overlap, reported as -ENOTEMPTY.
+        *   Anonymous sets allow for two consecutive start element since they
+        *   are constant, skip them to avoid bogus overlap reports.
+        */
+       if (!nft_set_is_anonymous(set) && rbe_le &&
+           nft_rbtree_interval_start(rbe_le) && nft_rbtree_interval_start(new))
+               return -ENOTEMPTY;
+
+       /* - new end element with existing closest, less or equal key value
+        *   being a end element: partial overlap, reported as -ENOTEMPTY.
+        */
+       if (rbe_le &&
+           nft_rbtree_interval_end(rbe_le) && nft_rbtree_interval_end(new))
                return -ENOTEMPTY;
 
+       /* - new end element with existing closest, greater or equal key value
+        *   being an end element: partial overlap, reported as -ENOTEMPTY
+        */
+       if (rbe_ge &&
+           nft_rbtree_interval_end(rbe_ge) && nft_rbtree_interval_end(new))
+               return -ENOTEMPTY;
+
+       /* Accepted element: pick insertion point depending on key value */
+       parent = NULL;
+       p = &priv->root.rb_node;
+       while (*p != NULL) {
+               parent = *p;
+               rbe = rb_entry(parent, struct nft_rbtree_elem, node);
+               d = nft_rbtree_cmp(set, rbe, new);
+
+               if (d < 0)
+                       p = &parent->rb_left;
+               else if (d > 0)
+                       p = &parent->rb_right;
+               else if (nft_rbtree_interval_end(rbe))
+                       p = &parent->rb_left;
+               else
+                       p = &parent->rb_right;
+       }
+
        rb_link_node_rcu(&new->node, parent, p);
        rb_insert_color(&new->node, &priv->root);
        return 0;
@@ -501,23 +563,37 @@ static void nft_rbtree_gc(struct work_struct *work)
        struct nft_rbtree *priv;
        struct rb_node *node;
        struct nft_set *set;
+       struct net *net;
+       u8 genmask;
 
        priv = container_of(work, struct nft_rbtree, gc_work.work);
        set  = nft_set_container_of(priv);
+       net  = read_pnet(&set->net);
+       genmask = nft_genmask_cur(net);
 
        write_lock_bh(&priv->lock);
        write_seqcount_begin(&priv->count);
        for (node = rb_first(&priv->root); node != NULL; node = rb_next(node)) {
                rbe = rb_entry(node, struct nft_rbtree_elem, node);
 
+               if (!nft_set_elem_active(&rbe->ext, genmask))
+                       continue;
+
+               /* elements are reversed in the rbtree for historical reasons,
+                * from highest to lowest value, that is why end element is
+                * always visited before the start element.
+                */
                if (nft_rbtree_interval_end(rbe)) {
                        rbe_end = rbe;
                        continue;
                }
                if (!nft_set_elem_expired(&rbe->ext))
                        continue;
-               if (nft_set_elem_mark_busy(&rbe->ext))
+
+               if (nft_set_elem_mark_busy(&rbe->ext)) {
+                       rbe_end = NULL;
                        continue;
+               }
 
                if (rbe_prev) {
                        rb_erase(&rbe_prev->node, &priv->root);
index bca2a47..c642776 100644 (file)
@@ -580,7 +580,9 @@ static int netlink_insert(struct sock *sk, u32 portid)
        if (nlk_sk(sk)->bound)
                goto err;
 
-       nlk_sk(sk)->portid = portid;
+       /* portid can be read locklessly from netlink_getname(). */
+       WRITE_ONCE(nlk_sk(sk)->portid, portid);
+
        sock_hold(sk);
 
        err = __netlink_insert(table, sk);
@@ -1096,9 +1098,11 @@ static int netlink_connect(struct socket *sock, struct sockaddr *addr,
                return -EINVAL;
 
        if (addr->sa_family == AF_UNSPEC) {
-               sk->sk_state    = NETLINK_UNCONNECTED;
-               nlk->dst_portid = 0;
-               nlk->dst_group  = 0;
+               /* paired with READ_ONCE() in netlink_getsockbyportid() */
+               WRITE_ONCE(sk->sk_state, NETLINK_UNCONNECTED);
+               /* dst_portid and dst_group can be read locklessly */
+               WRITE_ONCE(nlk->dst_portid, 0);
+               WRITE_ONCE(nlk->dst_group, 0);
                return 0;
        }
        if (addr->sa_family != AF_NETLINK)
@@ -1119,9 +1123,11 @@ static int netlink_connect(struct socket *sock, struct sockaddr *addr,
                err = netlink_autobind(sock);
 
        if (err == 0) {
-               sk->sk_state    = NETLINK_CONNECTED;
-               nlk->dst_portid = nladdr->nl_pid;
-               nlk->dst_group  = ffs(nladdr->nl_groups);
+               /* paired with READ_ONCE() in netlink_getsockbyportid() */
+               WRITE_ONCE(sk->sk_state, NETLINK_CONNECTED);
+               /* dst_portid and dst_group can be read locklessly */
+               WRITE_ONCE(nlk->dst_portid, nladdr->nl_pid);
+               WRITE_ONCE(nlk->dst_group, ffs(nladdr->nl_groups));
        }
 
        return err;
@@ -1138,10 +1144,12 @@ static int netlink_getname(struct socket *sock, struct sockaddr *addr,
        nladdr->nl_pad = 0;
 
        if (peer) {
-               nladdr->nl_pid = nlk->dst_portid;
-               nladdr->nl_groups = netlink_group_mask(nlk->dst_group);
+               /* Paired with WRITE_ONCE() in netlink_connect() */
+               nladdr->nl_pid = READ_ONCE(nlk->dst_portid);
+               nladdr->nl_groups = netlink_group_mask(READ_ONCE(nlk->dst_group));
        } else {
-               nladdr->nl_pid = nlk->portid;
+               /* Paired with WRITE_ONCE() in netlink_insert() */
+               nladdr->nl_pid = READ_ONCE(nlk->portid);
                netlink_lock_table();
                nladdr->nl_groups = nlk->groups ? nlk->groups[0] : 0;
                netlink_unlock_table();
@@ -1168,8 +1176,9 @@ static struct sock *netlink_getsockbyportid(struct sock *ssk, u32 portid)
 
        /* Don't bother queuing skb if kernel socket has no input function */
        nlk = nlk_sk(sock);
-       if (sock->sk_state == NETLINK_CONNECTED &&
-           nlk->dst_portid != nlk_sk(ssk)->portid) {
+       /* dst_portid and sk_state can be changed in netlink_connect() */
+       if (READ_ONCE(sock->sk_state) == NETLINK_CONNECTED &&
+           READ_ONCE(nlk->dst_portid) != nlk_sk(ssk)->portid) {
                sock_put(sock);
                return ERR_PTR(-ECONNREFUSED);
        }
@@ -1886,8 +1895,9 @@ static int netlink_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
                        goto out;
                netlink_skb_flags |= NETLINK_SKB_DST;
        } else {
-               dst_portid = nlk->dst_portid;
-               dst_group = nlk->dst_group;
+               /* Paired with WRITE_ONCE() in netlink_connect() */
+               dst_portid = READ_ONCE(nlk->dst_portid);
+               dst_group = READ_ONCE(nlk->dst_group);
        }
 
        /* Paired with WRITE_ONCE() in netlink_insert() */
index a8da88d..4e7c968 100644 (file)
@@ -121,6 +121,7 @@ static void nr_heartbeat_expiry(struct timer_list *t)
                   is accepted() it isn't 'dead' so doesn't get removed. */
                if (sock_flag(sk, SOCK_DESTROY) ||
                    (sk->sk_state == TCP_LISTEN && sock_flag(sk, SOCK_DEAD))) {
+                       sock_hold(sk);
                        bh_unlock_sock(sk);
                        nr_destroy_socket(sk);
                        goto out;
index 9a11a49..c322a61 100644 (file)
@@ -1700,7 +1700,6 @@ static void taprio_reset(struct Qdisc *sch)
        int i;
 
        hrtimer_cancel(&q->advance_timer);
-       qdisc_synchronize(sch);
 
        if (q->qdiscs) {
                for (i = 0; i < dev->num_tx_queues; i++)
index 59e653b..6b95d3b 100644 (file)
@@ -73,6 +73,12 @@ int sctp_bind_addr_copy(struct net *net, struct sctp_bind_addr *dest,
                }
        }
 
+       /* If somehow no addresses were found that can be used with this
+        * scope, it's an error.
+        */
+       if (list_empty(&dest->address_list))
+               error = -ENETUNREACH;
+
 out:
        if (error)
                sctp_bind_addr_clean(dest);
index 3b55502..5c7ad30 100644 (file)
@@ -482,6 +482,12 @@ static int x25_listen(struct socket *sock, int backlog)
        int rc = -EOPNOTSUPP;
 
        lock_sock(sk);
+       if (sock->state != SS_UNCONNECTED) {
+               rc = -EINVAL;
+               release_sock(sk);
+               return rc;
+       }
+
        if (sk->sk_state != TCP_LISTEN) {
                memset(&x25_sk(sk)->dest_addr, 0, X25_ADDR_LEN);
                sk->sk_max_ack_backlog = backlog;
index 29bf9c2..3010332 100644 (file)
@@ -142,17 +142,24 @@ pub fn call_printk_cont(args: fmt::Arguments<'_>) {
 macro_rules! print_macro (
     // The non-continuation cases (most of them, e.g. `INFO`).
     ($format_string:path, false, $($arg:tt)+) => (
-        // SAFETY: This hidden macro should only be called by the documented
-        // printing macros which ensure the format string is one of the fixed
-        // ones. All `__LOG_PREFIX`s are null-terminated as they are generated
-        // by the `module!` proc macro or fixed values defined in a kernel
-        // crate.
-        unsafe {
-            $crate::print::call_printk(
-                &$format_string,
-                crate::__LOG_PREFIX,
-                format_args!($($arg)+),
-            );
+        // To remain sound, `arg`s must be expanded outside the `unsafe` block.
+        // Typically one would use a `let` binding for that; however, `format_args!`
+        // takes borrows on the arguments, but does not extend the scope of temporaries.
+        // Therefore, a `match` expression is used to keep them around, since
+        // the scrutinee is kept until the end of the `match`.
+        match format_args!($($arg)+) {
+            // SAFETY: This hidden macro should only be called by the documented
+            // printing macros which ensure the format string is one of the fixed
+            // ones. All `__LOG_PREFIX`s are null-terminated as they are generated
+            // by the `module!` proc macro or fixed values defined in a kernel
+            // crate.
+            args => unsafe {
+                $crate::print::call_printk(
+                    &$format_string,
+                    crate::__LOG_PREFIX,
+                    args,
+                );
+            }
         }
     );
 
old mode 100755 (executable)
new mode 100644 (file)
old mode 100755 (executable)
new mode 100644 (file)
index 5f195ee..5fd1424 100644 (file)
@@ -7,11 +7,6 @@ all:
 uname_M := $(shell uname -m 2>/dev/null || echo not)
 ARCH ?= $(shell echo $(uname_M) | sed -e s/i.86/x86/ -e s/x86_64/x86/)
 
-ifeq (x86,$(ARCH))
-TEST_GEN_FILES += ../../../power/x86/amd_pstate_tracer/amd_pstate_trace.py
-TEST_GEN_FILES += ../../../power/x86/intel_pstate_tracer/intel_pstate_tracer.py
-endif
-
 TEST_PROGS := run.sh
 TEST_FILES := basic.sh tbench.sh gitsource.sh
 
index ea0978f..251794f 100644 (file)
@@ -241,7 +241,7 @@ int main(int argc, char **argv)
        while ((opt = getopt(argc, argv, "hp:t:r")) != -1) {
                switch (opt) {
                case 'p':
-                       reclaim_period_ms = atoi_non_negative("Reclaim period", optarg);
+                       reclaim_period_ms = atoi_positive("Reclaim period", optarg);
                        break;
                case 't':
                        token = atoi_paranoid(optarg);
index dae510c..13c75dc 100644 (file)
@@ -434,6 +434,7 @@ static void *juggle_shinfo_state(void *arg)
 int main(int argc, char *argv[])
 {
        struct timespec min_ts, max_ts, vm_ts;
+       struct kvm_xen_hvm_attr evt_reset;
        struct kvm_vm *vm;
        pthread_t thread;
        bool verbose;
@@ -962,10 +963,8 @@ int main(int argc, char *argv[])
        }
 
  done:
-       struct kvm_xen_hvm_attr evt_reset = {
-               .type = KVM_XEN_ATTR_TYPE_EVTCHN,
-               .u.evtchn.flags = KVM_XEN_EVTCHN_RESET,
-       };
+       evt_reset.type = KVM_XEN_ATTR_TYPE_EVTCHN;
+       evt_reset.u.evtchn.flags = KVM_XEN_EVTCHN_RESET;
        vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &evt_reset);
 
        alarm(0);
index 495ceab..9584eb5 100644 (file)
@@ -336,7 +336,7 @@ static int kvm_vfio_has_attr(struct kvm_device *dev,
        return -ENXIO;
 }
 
-static void kvm_vfio_destroy(struct kvm_device *dev)
+static void kvm_vfio_release(struct kvm_device *dev)
 {
        struct kvm_vfio *kv = dev->private;
        struct kvm_vfio_group *kvg, *tmp;
@@ -355,7 +355,7 @@ static void kvm_vfio_destroy(struct kvm_device *dev)
        kvm_vfio_update_coherency(dev);
 
        kfree(kv);
-       kfree(dev); /* alloc by kvm_ioctl_create_device, free by .destroy */
+       kfree(dev); /* alloc by kvm_ioctl_create_device, free by .release */
 }
 
 static int kvm_vfio_create(struct kvm_device *dev, u32 type);
@@ -363,7 +363,7 @@ static int kvm_vfio_create(struct kvm_device *dev, u32 type);
 static struct kvm_device_ops kvm_vfio_ops = {
        .name = "kvm-vfio",
        .create = kvm_vfio_create,
-       .destroy = kvm_vfio_destroy,
+       .release = kvm_vfio_release,
        .set_attr = kvm_vfio_set_attr,
        .has_attr = kvm_vfio_has_attr,
 };