Merge tag 'x86_urgent_for_v5.15_rc7' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <torvalds@linux-foundation.org>
Sun, 24 Oct 2021 17:00:15 +0000 (07:00 -1000)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sun, 24 Oct 2021 17:00:15 +0000 (07:00 -1000)
Pull x86 fix from Borislav Petkov:
 "A single change adding Dave Hansen to our maintainers team"

* tag 'x86_urgent_for_v5.15_rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  MAINTAINERS: Add Dave Hansen to the x86 maintainer team

210 files changed:
.mailmap
Documentation/networking/devlink/ice.rst
Documentation/networking/mctp.rst
arch/arm64/kvm/hyp/include/nvhe/gfp.h
arch/arm64/kvm/hyp/nvhe/mem_protect.c
arch/arm64/kvm/hyp/nvhe/page_alloc.c
arch/arm64/kvm/mmu.c
arch/nios2/include/asm/irqflags.h
arch/nios2/include/asm/registers.h
arch/powerpc/kernel/idle_book3s.S
arch/powerpc/kernel/smp.c
arch/s390/kvm/gaccess.c
arch/s390/kvm/intercept.c
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/lapic.c
arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/svm/sev.c
arch/x86/kvm/svm/svm.h
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/x86.c
block/blk-cgroup.c
block/partitions/core.c
drivers/acpi/power.c
drivers/acpi/tables.c
drivers/gpu/drm/ast/ast_mode.c
drivers/gpu/drm/kmb/kmb_crtc.c
drivers/gpu/drm/kmb/kmb_drv.c
drivers/gpu/drm/kmb/kmb_drv.h
drivers/gpu/drm/kmb/kmb_dsi.c
drivers/gpu/drm/kmb/kmb_dsi.h
drivers/gpu/drm/kmb/kmb_plane.c
drivers/gpu/drm/kmb/kmb_plane.h
drivers/gpu/drm/msm/adreno/a6xx_gpu.c
drivers/gpu/drm/msm/msm_gpu.h
drivers/gpu/drm/msm/msm_gpu_devfreq.c
drivers/gpu/drm/mxsfb/mxsfb_drv.c
drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
drivers/hv/hyperv_vmbus.h
drivers/isdn/hardware/mISDN/hfcpci.c
drivers/net/can/m_can/m_can_platform.c
drivers/net/can/rcar/rcar_can.c
drivers/net/can/sja1000/peak_pci.c
drivers/net/can/usb/peak_usb/pcan_usb_fd.c
drivers/net/dsa/lantiq_gswip.c
drivers/net/dsa/mt7530.c
drivers/net/ethernet/cavium/thunder/nic_main.c
drivers/net/ethernet/cavium/thunder/nicvf_main.c
drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
drivers/net/ethernet/freescale/enetc/enetc_pf.c
drivers/net/ethernet/hisilicon/hns3/hnae3.c
drivers/net/ethernet/hisilicon/hns3/hnae3.h
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
drivers/net/ethernet/intel/e1000e/e1000.h
drivers/net/ethernet/intel/e1000e/ich8lan.c
drivers/net/ethernet/intel/e1000e/ich8lan.h
drivers/net/ethernet/intel/e1000e/netdev.c
drivers/net/ethernet/intel/ice/ice_common.c
drivers/net/ethernet/intel/ice/ice_devids.h
drivers/net/ethernet/intel/ice/ice_devlink.c
drivers/net/ethernet/intel/ice/ice_flex_pipe.c
drivers/net/ethernet/intel/ice/ice_lib.c
drivers/net/ethernet/intel/ice/ice_main.c
drivers/net/ethernet/intel/ice/ice_sched.c
drivers/net/ethernet/intel/ice/ice_sched.h
drivers/net/ethernet/intel/igc/igc_hw.h
drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
drivers/net/ethernet/mellanox/mlx5/core/lag.c
drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
drivers/net/ethernet/mellanox/mlx5/core/lag_mp.h
drivers/net/ethernet/microchip/sparx5/sparx5_main.c
drivers/net/ethernet/mscc/ocelot_vsc7514.c
drivers/net/ethernet/netronome/nfp/nfp_asm.c
drivers/net/ethernet/sfc/mcdi_port_common.c
drivers/net/ethernet/sfc/ptp.c
drivers/net/ethernet/sfc/siena_sriov.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/hamradio/baycom_epp.c
drivers/net/usb/Kconfig
drivers/net/usb/usbnet.c
drivers/net/vrf.c
drivers/nfc/st95hf/core.c
drivers/of/of_reserved_mem.c
drivers/ptp/ptp_clock.c
drivers/ptp/ptp_kvm_x86.c
drivers/scsi/hosts.c
drivers/scsi/mpi3mr/mpi3mr_os.c
drivers/scsi/qla2xxx/qla_bsg.c
drivers/scsi/qla2xxx/qla_os.c
drivers/scsi/qla2xxx/qla_target.c
drivers/scsi/scsi.c
drivers/scsi/scsi_sysfs.c
drivers/scsi/scsi_transport_iscsi.c
drivers/scsi/sd.c
drivers/scsi/storvsc_drv.c
drivers/scsi/ufs/ufshcd-pci.c
fs/ceph/caps.c
fs/ceph/file.c
fs/ceph/inode.c
fs/ceph/mds_client.c
fs/ceph/super.c
fs/ceph/super.h
fs/fuse/fuse_i.h
fs/fuse/inode.c
fs/fuse/virtio_fs.c
fs/io-wq.c
fs/io_uring.c
fs/kernel_read_file.c
fs/ksmbd/auth.c
fs/ksmbd/connection.c
fs/ksmbd/ksmbd_netlink.h
fs/ksmbd/mgmt/user_config.c
fs/ksmbd/mgmt/user_config.h
fs/ksmbd/smb2misc.c
fs/ksmbd/smb2ops.c
fs/ksmbd/smb2pdu.c
fs/ksmbd/smb2pdu.h
fs/ksmbd/transport_ipc.c
fs/ksmbd/transport_ipc.h
fs/ksmbd/transport_rdma.c
fs/ksmbd/vfs.c
fs/ksmbd/vfs.h
fs/ocfs2/alloc.c
fs/ocfs2/super.c
fs/userfaultfd.c
include/acpi/platform/acgcc.h
include/linux/cpuhotplug.h
include/linux/elfcore.h
include/linux/memory.h
include/linux/mlx5/driver.h
include/linux/secretmem.h
include/linux/trace_recursion.h
include/linux/user_namespace.h
include/net/mctp.h
include/net/sctp/sm.h
include/net/tcp.h
include/uapi/linux/mctp.h
kernel/auditsc.c
kernel/cred.c
kernel/dma/debug.c
kernel/dma/debug.h
kernel/dma/mapping.c
kernel/signal.c
kernel/trace/ftrace.c
kernel/ucount.c
mm/huge_memory.c
mm/memblock.c
mm/mempolicy.c
mm/migrate.c
mm/page_ext.c
mm/slab.c
mm/slub.c
net/bridge/br_private.h
net/bridge/netfilter/ebtables.c
net/can/isotp.c
net/can/j1939/j1939-priv.h
net/can/j1939/main.c
net/can/j1939/transport.c
net/dsa/dsa2.c
net/ipv4/tcp_ipv4.c
net/ipv6/ip6_output.c
net/ipv6/netfilter/ip6t_rt.c
net/ipv6/tcp_ipv6.c
net/netfilter/Kconfig
net/netfilter/ipvs/ip_vs_ctl.c
net/netfilter/nft_chain_filter.c
net/netfilter/xt_IDLETIMER.c
net/sched/act_ct.c
security/keys/process_keys.c
sound/pci/hda/patch_realtek.c
sound/soc/codecs/Kconfig
sound/soc/codecs/cs42l42.c
sound/soc/codecs/cs4341.c
sound/soc/codecs/nau8824.c
sound/soc/codecs/pcm179x-spi.c
sound/soc/codecs/pcm512x.c
sound/soc/codecs/wcd938x.c
sound/soc/codecs/wm8960.c
sound/soc/fsl/fsl_xcvr.c
sound/soc/intel/boards/bytcht_es8316.c
sound/soc/soc-core.c
sound/soc/soc-dapm.c
sound/usb/mixer.c
sound/usb/quirks-table.h
sound/usb/quirks.c
tools/kvm/kvm_stat/kvm_stat
tools/testing/selftests/net/config
tools/testing/selftests/net/fcnal-test.sh
tools/testing/selftests/net/forwarding/Makefile
tools/testing/selftests/net/forwarding/forwarding.config.sample
tools/testing/selftests/net/forwarding/ip6_forward_instats_vrf.sh [new file with mode: 0755]
tools/testing/selftests/net/forwarding/lib.sh
tools/testing/selftests/net/nettest.c
tools/testing/selftests/netfilter/nft_flowtable.sh
tools/testing/selftests/netfilter/nft_nat.sh
tools/testing/selftests/vm/userfaultfd.c
tools/testing/vsock/vsock_diag_test.c

index 6e84911..90e614d 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -33,6 +33,8 @@ Al Viro <viro@zenIV.linux.org.uk>
 Andi Kleen <ak@linux.intel.com> <ak@suse.de>
 Andi Shyti <andi@etezian.org> <andi.shyti@samsung.com>
 Andreas Herrmann <aherrman@de.ibm.com>
+Andrej Shadura <andrew.shadura@collabora.co.uk>
+Andrej Shadura <andrew@shadura.me> <andrew@beldisplaytech.com>
 Andrew Morton <akpm@linux-foundation.org>
 Andrew Murray <amurray@thegoodpenguin.co.uk> <amurray@embedded-bits.co.uk>
 Andrew Murray <amurray@thegoodpenguin.co.uk> <andrew.murray@arm.com>
index a432dc4..5d97cee 100644 (file)
@@ -30,10 +30,11 @@ The ``ice`` driver reports the following versions
         PHY, link, etc.
     * - ``fw.mgmt.api``
       - running
-      - 1.5
-      - 2-digit version number of the API exported over the AdminQ by the
-        management firmware. Used by the driver to identify what commands
-        are supported.
+      - 1.5.1
+      - 3-digit version number (major.minor.patch) of the API exported over
+        the AdminQ by the management firmware. Used by the driver to
+        identify what commands are supported. Historical versions of the
+        kernel only displayed a 2-digit version number (major.minor).
     * - ``fw.mgmt.build``
       - running
       - 0x305d955f
index 6100cdc..fa7730d 100644 (file)
@@ -59,11 +59,11 @@ specified with a ``sockaddr`` type, with a single-byte endpoint address:
     };
 
     struct sockaddr_mctp {
-            unsigned short int smctp_family;
-            int                        smctp_network;
-            struct mctp_addr   smctp_addr;
-            __u8               smctp_type;
-            __u8               smctp_tag;
+            __kernel_sa_family_t smctp_family;
+            unsigned int         smctp_network;
+            struct mctp_addr     smctp_addr;
+            __u8                 smctp_type;
+            __u8                 smctp_tag;
     };
 
     #define MCTP_NET_ANY       0x0
index fb0f523..0a048dc 100644 (file)
@@ -24,6 +24,7 @@ struct hyp_pool {
 
 /* Allocation */
 void *hyp_alloc_pages(struct hyp_pool *pool, unsigned short order);
+void hyp_split_page(struct hyp_page *page);
 void hyp_get_page(struct hyp_pool *pool, void *addr);
 void hyp_put_page(struct hyp_pool *pool, void *addr);
 
index bacd493..34eeb52 100644 (file)
@@ -35,7 +35,18 @@ const u8 pkvm_hyp_id = 1;
 
 static void *host_s2_zalloc_pages_exact(size_t size)
 {
-       return hyp_alloc_pages(&host_s2_pool, get_order(size));
+       void *addr = hyp_alloc_pages(&host_s2_pool, get_order(size));
+
+       hyp_split_page(hyp_virt_to_page(addr));
+
+       /*
+        * The size of concatenated PGDs is always a power of two of PAGE_SIZE,
+        * so there should be no need to free any of the tail pages to make the
+        * allocation exact.
+        */
+       WARN_ON(size != (PAGE_SIZE << get_order(size)));
+
+       return addr;
 }
 
 static void *host_s2_zalloc_page(void *pool)
index 41fc25b..0bd7701 100644 (file)
@@ -152,6 +152,7 @@ static inline void hyp_page_ref_inc(struct hyp_page *p)
 
 static inline int hyp_page_ref_dec_and_test(struct hyp_page *p)
 {
+       BUG_ON(!p->refcount);
        p->refcount--;
        return (p->refcount == 0);
 }
@@ -193,6 +194,20 @@ void hyp_get_page(struct hyp_pool *pool, void *addr)
        hyp_spin_unlock(&pool->lock);
 }
 
+void hyp_split_page(struct hyp_page *p)
+{
+       unsigned short order = p->order;
+       unsigned int i;
+
+       p->order = 0;
+       for (i = 1; i < (1 << order); i++) {
+               struct hyp_page *tail = p + i;
+
+               tail->order = 0;
+               hyp_set_page_refcounted(tail);
+       }
+}
+
 void *hyp_alloc_pages(struct hyp_pool *pool, unsigned short order)
 {
        unsigned short i = order;
index 1a94a7c..69bd173 100644 (file)
@@ -1529,8 +1529,10 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
                 * when updating the PG_mte_tagged page flag, see
                 * sanitise_mte_tags for more details.
                 */
-               if (kvm_has_mte(kvm) && vma->vm_flags & VM_SHARED)
-                       return -EINVAL;
+               if (kvm_has_mte(kvm) && vma->vm_flags & VM_SHARED) {
+                       ret = -EINVAL;
+                       break;
+               }
 
                if (vma->vm_flags & VM_PFNMAP) {
                        /* IO region dirty page logging not allowed */
index b3ec3e5..25acf27 100644 (file)
@@ -9,7 +9,7 @@
 
 static inline unsigned long arch_local_save_flags(void)
 {
-       return RDCTL(CTL_STATUS);
+       return RDCTL(CTL_FSTATUS);
 }
 
 /*
@@ -18,7 +18,7 @@ static inline unsigned long arch_local_save_flags(void)
  */
 static inline void arch_local_irq_restore(unsigned long flags)
 {
-       WRCTL(CTL_STATUS, flags);
+       WRCTL(CTL_FSTATUS, flags);
 }
 
 static inline void arch_local_irq_disable(void)
index 183c720..95b67dd 100644 (file)
@@ -11,7 +11,7 @@
 #endif
 
 /* control register numbers */
-#define CTL_STATUS     0
+#define CTL_FSTATUS    0
 #define CTL_ESTATUS    1
 #define CTL_BSTATUS    2
 #define CTL_IENABLE    3
index abb719b..3d97fb8 100644 (file)
@@ -126,14 +126,16 @@ _GLOBAL(idle_return_gpr_loss)
 /*
  * This is the sequence required to execute idle instructions, as
  * specified in ISA v2.07 (and earlier). MSR[IR] and MSR[DR] must be 0.
- *
- * The 0(r1) slot is used to save r2 in isa206, so use that here.
+ * We have to store a GPR somewhere, ptesync, then reload it, and create
+ * a false dependency on the result of the load. It doesn't matter which
+ * GPR we store, or where we store it. We have already stored r2 to the
+ * stack at -8(r1) in isa206_idle_insn_mayloss, so use that.
  */
 #define IDLE_STATE_ENTER_SEQ_NORET(IDLE_INST)                  \
        /* Magic NAP/SLEEP/WINKLE mode enter sequence */        \
-       std     r2,0(r1);                                       \
+       std     r2,-8(r1);                                      \
        ptesync;                                                \
-       ld      r2,0(r1);                                       \
+       ld      r2,-8(r1);                                      \
 236:   cmpd    cr0,r2,r2;                                      \
        bne     236b;                                           \
        IDLE_INST;                                              \
index 9cc7d3d..605bab4 100644 (file)
@@ -1730,8 +1730,6 @@ void __cpu_die(unsigned int cpu)
 
 void arch_cpu_idle_dead(void)
 {
-       sched_preempt_enable_no_resched();
-
        /*
         * Disable on the down path. This will be re-enabled by
         * start_secondary() via start_secondary_resume() below
index b9f85b2..6af59c5 100644 (file)
@@ -894,6 +894,11 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
 
 /**
  * guest_translate_address - translate guest logical into guest absolute address
+ * @vcpu: virtual cpu
+ * @gva: Guest virtual address
+ * @ar: Access register
+ * @gpa: Guest physical address
+ * @mode: Translation access mode
  *
  * Parameter semantics are the same as the ones from guest_translate.
  * The memory contents at the guest address are not changed.
@@ -934,6 +939,11 @@ int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar,
 
 /**
  * check_gva_range - test a range of guest virtual addresses for accessibility
+ * @vcpu: virtual cpu
+ * @gva: Guest virtual address
+ * @ar: Access register
+ * @length: Length of test range
+ * @mode: Translation access mode
  */
 int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar,
                    unsigned long length, enum gacc_mode mode)
@@ -956,6 +966,7 @@ int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar,
 
 /**
  * kvm_s390_check_low_addr_prot_real - check for low-address protection
+ * @vcpu: virtual cpu
  * @gra: Guest real address
  *
  * Checks whether an address is subject to low-address protection and set
@@ -979,6 +990,7 @@ int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu *vcpu, unsigned long gra)
  * @pgt: pointer to the beginning of the page table for the given address if
  *      successful (return value 0), or to the first invalid DAT entry in
  *      case of exceptions (return value > 0)
+ * @dat_protection: referenced memory is write protected
  * @fake: pgt references contiguous guest memory block, not a pgtable
  */
 static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
index 72b25b7..2bd8f85 100644 (file)
@@ -269,6 +269,7 @@ static int handle_prog(struct kvm_vcpu *vcpu)
 
 /**
  * handle_external_interrupt - used for external interruption interceptions
+ * @vcpu: virtual cpu
  *
  * This interception only occurs if the CPUSTAT_EXT_INT bit was set, or if
  * the new PSW does not have external interrupts disabled. In the first case,
@@ -315,7 +316,8 @@ static int handle_external_interrupt(struct kvm_vcpu *vcpu)
 }
 
 /**
- * Handle MOVE PAGE partial execution interception.
+ * handle_mvpg_pei - Handle MOVE PAGE partial execution interception.
+ * @vcpu: virtual cpu
  *
  * This interception can only happen for guests with DAT disabled and
  * addresses that are currently not mapped in the host. Thus we try to
index f8f48a7..5a0298a 100644 (file)
@@ -702,7 +702,8 @@ struct kvm_vcpu_arch {
 
        struct kvm_pio_request pio;
        void *pio_data;
-       void *guest_ins_data;
+       void *sev_pio_data;
+       unsigned sev_pio_count;
 
        u8 event_exit_inst_len;
 
index 76fb009..d6ac32f 100644 (file)
@@ -2321,13 +2321,14 @@ EXPORT_SYMBOL_GPL(kvm_apic_update_apicv);
 void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
 {
        struct kvm_lapic *apic = vcpu->arch.apic;
+       u64 msr_val;
        int i;
 
        if (!init_event) {
-               vcpu->arch.apic_base = APIC_DEFAULT_PHYS_BASE |
-                                      MSR_IA32_APICBASE_ENABLE;
+               msr_val = APIC_DEFAULT_PHYS_BASE | MSR_IA32_APICBASE_ENABLE;
                if (kvm_vcpu_is_reset_bsp(vcpu))
-                       vcpu->arch.apic_base |= MSR_IA32_APICBASE_BSP;
+                       msr_val |= MSR_IA32_APICBASE_BSP;
+               kvm_lapic_set_base(vcpu, msr_val);
        }
 
        if (!apic)
@@ -2336,11 +2337,9 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
        /* Stop the timer in case it's a reset to an active apic */
        hrtimer_cancel(&apic->lapic_timer.timer);
 
-       if (!init_event) {
-               apic->base_address = APIC_DEFAULT_PHYS_BASE;
-
+       /* The xAPIC ID is set at RESET even if the APIC was already enabled. */
+       if (!init_event)
                kvm_apic_set_xapic_id(apic, vcpu->vcpu_id);
-       }
        kvm_apic_set_version(apic->vcpu);
 
        for (i = 0; i < KVM_APIC_LVT_NUM; i++)
@@ -2481,6 +2480,11 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu, int timer_advance_ns)
                lapic_timer_advance_dynamic = false;
        }
 
+       /*
+        * Stuff the APIC ENABLE bit in lieu of temporarily incrementing
+        * apic_hw_disabled; the full RESET value is set by kvm_lapic_reset().
+        */
+       vcpu->arch.apic_base = MSR_IA32_APICBASE_ENABLE;
        static_branch_inc(&apic_sw_disabled.key); /* sw disabled at reset */
        kvm_iodevice_init(&apic->dev, &apic_mmio_ops);
 
@@ -2942,5 +2946,7 @@ int kvm_apic_accept_events(struct kvm_vcpu *vcpu)
 void kvm_lapic_exit(void)
 {
        static_key_deferred_flush(&apic_hw_disabled);
+       WARN_ON(static_branch_unlikely(&apic_hw_disabled.key));
        static_key_deferred_flush(&apic_sw_disabled);
+       WARN_ON(static_branch_unlikely(&apic_sw_disabled.key));
 }
index 1a64ba5..0cc5890 100644 (file)
@@ -4596,10 +4596,10 @@ static void update_pkru_bitmask(struct kvm_mmu *mmu)
        unsigned bit;
        bool wp;
 
-       if (!is_cr4_pke(mmu)) {
-               mmu->pkru_mask = 0;
+       mmu->pkru_mask = 0;
+
+       if (!is_cr4_pke(mmu))
                return;
-       }
 
        wp = is_cr0_wp(mmu);
 
index c36b5fe..2e4916b 100644 (file)
@@ -618,7 +618,12 @@ static int __sev_launch_update_vmsa(struct kvm *kvm, struct kvm_vcpu *vcpu,
        vmsa.handle = to_kvm_svm(kvm)->sev_info.handle;
        vmsa.address = __sme_pa(svm->vmsa);
        vmsa.len = PAGE_SIZE;
-       return sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_VMSA, &vmsa, error);
+       ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_VMSA, &vmsa, error);
+       if (ret)
+         return ret;
+
+       vcpu->arch.guest_state_protected = true;
+       return 0;
 }
 
 static int sev_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp)
@@ -1479,6 +1484,13 @@ static int sev_receive_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
                goto e_free_trans;
        }
 
+       /*
+        * Flush (on non-coherent CPUs) before RECEIVE_UPDATE_DATA, the PSP
+        * encrypts the written data with the guest's key, and the cache may
+        * contain dirty, unencrypted data.
+        */
+       sev_clflush_pages(guest_page, n);
+
        /* The RECEIVE_UPDATE_DATA command requires C-bit to be always set. */
        data.guest_address = (page_to_pfn(guest_page[0]) << PAGE_SHIFT) + offset;
        data.guest_address |= sev_me_mask;
@@ -2583,7 +2595,7 @@ int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in)
                return -EINVAL;
 
        return kvm_sev_es_string_io(&svm->vcpu, size, port,
-                                   svm->ghcb_sa, svm->ghcb_sa_len, in);
+                                   svm->ghcb_sa, svm->ghcb_sa_len / size, in);
 }
 
 void sev_es_init_vmcb(struct vcpu_svm *svm)
index 128a54b..5d30db5 100644 (file)
@@ -191,7 +191,7 @@ struct vcpu_svm {
 
        /* SEV-ES scratch area support */
        void *ghcb_sa;
-       u64 ghcb_sa_len;
+       u32 ghcb_sa_len;
        bool ghcb_sa_sync;
        bool ghcb_sa_free;
 
index 116b089..7d595ef 100644 (file)
@@ -5562,9 +5562,13 @@ static int handle_encls(struct kvm_vcpu *vcpu)
 
 static int handle_bus_lock_vmexit(struct kvm_vcpu *vcpu)
 {
-       vcpu->run->exit_reason = KVM_EXIT_X86_BUS_LOCK;
-       vcpu->run->flags |= KVM_RUN_X86_BUS_LOCK;
-       return 0;
+       /*
+        * Hardware may or may not set the BUS_LOCK_DETECTED flag on BUS_LOCK
+        * VM-Exits. Unconditionally set the flag here and leave the handling to
+        * vmx_handle_exit().
+        */
+       to_vmx(vcpu)->exit_reason.bus_lock_detected = true;
+       return 1;
 }
 
 /*
@@ -6051,9 +6055,8 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
        int ret = __vmx_handle_exit(vcpu, exit_fastpath);
 
        /*
-        * Even when current exit reason is handled by KVM internally, we
-        * still need to exit to user space when bus lock detected to inform
-        * that there is a bus lock in guest.
+        * Exit to user space when bus lock detected to inform that there is
+        * a bus lock in guest.
         */
        if (to_vmx(vcpu)->exit_reason.bus_lock_detected) {
                if (ret > 0)
@@ -6302,18 +6305,13 @@ static int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
 
                /*
                 * If we are running L2 and L1 has a new pending interrupt
-                * which can be injected, we should re-evaluate
-                * what should be done with this new L1 interrupt.
-                * If L1 intercepts external-interrupts, we should
-                * exit from L2 to L1. Otherwise, interrupt should be
-                * delivered directly to L2.
+                * which can be injected, this may cause a vmexit or it may
+                * be injected into L2.  Either way, this interrupt will be
+                * processed via KVM_REQ_EVENT, not RVI, because we do not use
+                * virtual interrupt delivery to inject L1 interrupts into L2.
                 */
-               if (is_guest_mode(vcpu) && max_irr_updated) {
-                       if (nested_exit_on_intr(vcpu))
-                               kvm_vcpu_exiting_guest_mode(vcpu);
-                       else
-                               kvm_make_request(KVM_REQ_EVENT, vcpu);
-               }
+               if (is_guest_mode(vcpu) && max_irr_updated)
+                       kvm_make_request(KVM_REQ_EVENT, vcpu);
        } else {
                max_irr = kvm_lapic_find_highest_irr(vcpu);
        }
index aabd3a2..b26647a 100644 (file)
@@ -6906,7 +6906,7 @@ static int kernel_pio(struct kvm_vcpu *vcpu, void *pd)
 }
 
 static int emulator_pio_in_out(struct kvm_vcpu *vcpu, int size,
-                              unsigned short port, void *val,
+                              unsigned short port,
                               unsigned int count, bool in)
 {
        vcpu->arch.pio.port = port;
@@ -6914,10 +6914,8 @@ static int emulator_pio_in_out(struct kvm_vcpu *vcpu, int size,
        vcpu->arch.pio.count  = count;
        vcpu->arch.pio.size = size;
 
-       if (!kernel_pio(vcpu, vcpu->arch.pio_data)) {
-               vcpu->arch.pio.count = 0;
+       if (!kernel_pio(vcpu, vcpu->arch.pio_data))
                return 1;
-       }
 
        vcpu->run->exit_reason = KVM_EXIT_IO;
        vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
@@ -6929,26 +6927,39 @@ static int emulator_pio_in_out(struct kvm_vcpu *vcpu, int size,
        return 0;
 }
 
-static int emulator_pio_in(struct kvm_vcpu *vcpu, int size,
-                          unsigned short port, void *val, unsigned int count)
+static int __emulator_pio_in(struct kvm_vcpu *vcpu, int size,
+                            unsigned short port, unsigned int count)
 {
-       int ret;
+       WARN_ON(vcpu->arch.pio.count);
+       memset(vcpu->arch.pio_data, 0, size * count);
+       return emulator_pio_in_out(vcpu, size, port, count, true);
+}
 
-       if (vcpu->arch.pio.count)
-               goto data_avail;
+static void complete_emulator_pio_in(struct kvm_vcpu *vcpu, void *val)
+{
+       int size = vcpu->arch.pio.size;
+       unsigned count = vcpu->arch.pio.count;
+       memcpy(val, vcpu->arch.pio_data, size * count);
+       trace_kvm_pio(KVM_PIO_IN, vcpu->arch.pio.port, size, count, vcpu->arch.pio_data);
+       vcpu->arch.pio.count = 0;
+}
 
-       memset(vcpu->arch.pio_data, 0, size * count);
+static int emulator_pio_in(struct kvm_vcpu *vcpu, int size,
+                          unsigned short port, void *val, unsigned int count)
+{
+       if (vcpu->arch.pio.count) {
+               /* Complete previous iteration.  */
+       } else {
+               int r = __emulator_pio_in(vcpu, size, port, count);
+               if (!r)
+                       return r;
 
-       ret = emulator_pio_in_out(vcpu, size, port, val, count, true);
-       if (ret) {
-data_avail:
-               memcpy(val, vcpu->arch.pio_data, size * count);
-               trace_kvm_pio(KVM_PIO_IN, port, size, count, vcpu->arch.pio_data);
-               vcpu->arch.pio.count = 0;
-               return 1;
+               /* Results already available, fall through.  */
        }
 
-       return 0;
+       WARN_ON(count != vcpu->arch.pio.count);
+       complete_emulator_pio_in(vcpu, val);
+       return 1;
 }
 
 static int emulator_pio_in_emulated(struct x86_emulate_ctxt *ctxt,
@@ -6963,9 +6974,15 @@ static int emulator_pio_out(struct kvm_vcpu *vcpu, int size,
                            unsigned short port, const void *val,
                            unsigned int count)
 {
+       int ret;
+
        memcpy(vcpu->arch.pio_data, val, size * count);
        trace_kvm_pio(KVM_PIO_OUT, port, size, count, vcpu->arch.pio_data);
-       return emulator_pio_in_out(vcpu, size, port, (void *)val, count, false);
+       ret = emulator_pio_in_out(vcpu, size, port, count, false);
+       if (ret)
+                vcpu->arch.pio.count = 0;
+
+        return ret;
 }
 
 static int emulator_pio_out_emulated(struct x86_emulate_ctxt *ctxt,
@@ -9643,14 +9660,14 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
                if (likely(exit_fastpath != EXIT_FASTPATH_REENTER_GUEST))
                        break;
 
-                if (unlikely(kvm_vcpu_exit_request(vcpu))) {
+               if (vcpu->arch.apicv_active)
+                       static_call(kvm_x86_sync_pir_to_irr)(vcpu);
+
+               if (unlikely(kvm_vcpu_exit_request(vcpu))) {
                        exit_fastpath = EXIT_FASTPATH_EXIT_HANDLED;
                        break;
                }
-
-               if (vcpu->arch.apicv_active)
-                       static_call(kvm_x86_sync_pir_to_irr)(vcpu);
-        }
+       }
 
        /*
         * Do this here before restoring debug registers on the host.  And
@@ -11392,7 +11409,8 @@ static int memslot_rmap_alloc(struct kvm_memory_slot *slot,
                int level = i + 1;
                int lpages = __kvm_mmu_slot_lpages(slot, npages, level);
 
-               WARN_ON(slot->arch.rmap[i]);
+               if (slot->arch.rmap[i])
+                       continue;
 
                slot->arch.rmap[i] = kvcalloc(lpages, sz, GFP_KERNEL_ACCOUNT);
                if (!slot->arch.rmap[i]) {
@@ -12367,44 +12385,81 @@ int kvm_sev_es_mmio_read(struct kvm_vcpu *vcpu, gpa_t gpa, unsigned int bytes,
 }
 EXPORT_SYMBOL_GPL(kvm_sev_es_mmio_read);
 
-static int complete_sev_es_emulated_ins(struct kvm_vcpu *vcpu)
+static int kvm_sev_es_outs(struct kvm_vcpu *vcpu, unsigned int size,
+                          unsigned int port);
+
+static int complete_sev_es_emulated_outs(struct kvm_vcpu *vcpu)
 {
-       memcpy(vcpu->arch.guest_ins_data, vcpu->arch.pio_data,
-              vcpu->arch.pio.count * vcpu->arch.pio.size);
-       vcpu->arch.pio.count = 0;
+       int size = vcpu->arch.pio.size;
+       int port = vcpu->arch.pio.port;
 
+       vcpu->arch.pio.count = 0;
+       if (vcpu->arch.sev_pio_count)
+               return kvm_sev_es_outs(vcpu, size, port);
        return 1;
 }
 
 static int kvm_sev_es_outs(struct kvm_vcpu *vcpu, unsigned int size,
-                          unsigned int port, void *data,  unsigned int count)
+                          unsigned int port)
 {
-       int ret;
-
-       ret = emulator_pio_out_emulated(vcpu->arch.emulate_ctxt, size, port,
-                                       data, count);
-       if (ret)
-               return ret;
+       for (;;) {
+               unsigned int count =
+                       min_t(unsigned int, PAGE_SIZE / size, vcpu->arch.sev_pio_count);
+               int ret = emulator_pio_out(vcpu, size, port, vcpu->arch.sev_pio_data, count);
+
+               /* memcpy done already by emulator_pio_out.  */
+               vcpu->arch.sev_pio_count -= count;
+               vcpu->arch.sev_pio_data += count * vcpu->arch.pio.size;
+               if (!ret)
+                       break;
 
-       vcpu->arch.pio.count = 0;
+               /* Emulation done by the kernel.  */
+               if (!vcpu->arch.sev_pio_count)
+                       return 1;
+       }
 
+       vcpu->arch.complete_userspace_io = complete_sev_es_emulated_outs;
        return 0;
 }
 
 static int kvm_sev_es_ins(struct kvm_vcpu *vcpu, unsigned int size,
-                         unsigned int port, void *data, unsigned int count)
+                         unsigned int port);
+
+static void advance_sev_es_emulated_ins(struct kvm_vcpu *vcpu)
 {
-       int ret;
+       unsigned count = vcpu->arch.pio.count;
+       complete_emulator_pio_in(vcpu, vcpu->arch.sev_pio_data);
+       vcpu->arch.sev_pio_count -= count;
+       vcpu->arch.sev_pio_data += count * vcpu->arch.pio.size;
+}
 
-       ret = emulator_pio_in_emulated(vcpu->arch.emulate_ctxt, size, port,
-                                      data, count);
-       if (ret) {
-               vcpu->arch.pio.count = 0;
-       } else {
-               vcpu->arch.guest_ins_data = data;
-               vcpu->arch.complete_userspace_io = complete_sev_es_emulated_ins;
+static int complete_sev_es_emulated_ins(struct kvm_vcpu *vcpu)
+{
+       int size = vcpu->arch.pio.size;
+       int port = vcpu->arch.pio.port;
+
+       advance_sev_es_emulated_ins(vcpu);
+       if (vcpu->arch.sev_pio_count)
+               return kvm_sev_es_ins(vcpu, size, port);
+       return 1;
+}
+
+static int kvm_sev_es_ins(struct kvm_vcpu *vcpu, unsigned int size,
+                         unsigned int port)
+{
+       for (;;) {
+               unsigned int count =
+                       min_t(unsigned int, PAGE_SIZE / size, vcpu->arch.sev_pio_count);
+               if (!__emulator_pio_in(vcpu, size, port, count))
+                       break;
+
+               /* Emulation done by the kernel.  */
+               advance_sev_es_emulated_ins(vcpu);
+               if (!vcpu->arch.sev_pio_count)
+                       return 1;
        }
 
+       vcpu->arch.complete_userspace_io = complete_sev_es_emulated_ins;
        return 0;
 }
 
@@ -12412,8 +12467,10 @@ int kvm_sev_es_string_io(struct kvm_vcpu *vcpu, unsigned int size,
                         unsigned int port, void *data,  unsigned int count,
                         int in)
 {
-       return in ? kvm_sev_es_ins(vcpu, size, port, data, count)
-                 : kvm_sev_es_outs(vcpu, size, port, data, count);
+       vcpu->arch.sev_pio_data = data;
+       vcpu->arch.sev_pio_count = count;
+       return in ? kvm_sev_es_ins(vcpu, size, port)
+                 : kvm_sev_es_outs(vcpu, size, port);
 }
 EXPORT_SYMBOL_GPL(kvm_sev_es_string_io);
 
index 38b9f76..9a1c583 100644 (file)
@@ -1897,10 +1897,11 @@ void blk_cgroup_bio_start(struct bio *bio)
 {
        int rwd = blk_cgroup_io_type(bio), cpu;
        struct blkg_iostat_set *bis;
+       unsigned long flags;
 
        cpu = get_cpu();
        bis = per_cpu_ptr(bio->bi_blkg->iostat_cpu, cpu);
-       u64_stats_update_begin(&bis->sync);
+       flags = u64_stats_update_begin_irqsave(&bis->sync);
 
        /*
         * If the bio is flagged with BIO_CGROUP_ACCT it means this is a split
@@ -1912,7 +1913,7 @@ void blk_cgroup_bio_start(struct bio *bio)
        }
        bis->cur.ios[rwd]++;
 
-       u64_stats_update_end(&bis->sync);
+       u64_stats_update_end_irqrestore(&bis->sync, flags);
        if (cgroup_subsys_on_dfl(io_cgrp_subsys))
                cgroup_rstat_updated(bio->bi_blkg->blkcg->css.cgroup, cpu);
        put_cpu();
index 58c4c36..7bea19d 100644 (file)
@@ -423,6 +423,7 @@ out_del:
        device_del(pdev);
 out_put:
        put_device(pdev);
+       return ERR_PTR(err);
 out_put_disk:
        put_disk(disk);
        return ERR_PTR(err);
index b9863e2..f0ed441 100644 (file)
@@ -1035,13 +1035,8 @@ void acpi_turn_off_unused_power_resources(void)
        list_for_each_entry_reverse(resource, &acpi_power_resource_list, list_node) {
                mutex_lock(&resource->resource_lock);
 
-               /*
-                * Turn off power resources in an unknown state too, because the
-                * platform firmware on some system expects the OS to turn off
-                * power resources without any users unconditionally.
-                */
                if (!resource->ref_count &&
-                   resource->state != ACPI_POWER_RESOURCE_STATE_OFF) {
+                   resource->state == ACPI_POWER_RESOURCE_STATE_ON) {
                        acpi_handle_debug(resource->device.handle, "Turning OFF\n");
                        __acpi_power_off(resource);
                }
index f938373..71419eb 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/earlycpio.h>
 #include <linux/initrd.h>
 #include <linux/security.h>
+#include <linux/kmemleak.h>
 #include "internal.h"
 
 #ifdef CONFIG_ACPI_CUSTOM_DSDT
@@ -601,6 +602,8 @@ void __init acpi_table_upgrade(void)
         */
        arch_reserve_mem_area(acpi_tables_addr, all_tables_size);
 
+       kmemleak_ignore_phys(acpi_tables_addr);
+
        /*
         * early_ioremap only can remap 256k one time. If we map all
         * tables one time, we will hit the limit. Need to map chunks
index 6bfaefa..1e30eae 100644 (file)
@@ -1300,18 +1300,6 @@ static enum drm_mode_status ast_mode_valid(struct drm_connector *connector,
        return flags;
 }
 
-static enum drm_connector_status ast_connector_detect(struct drm_connector
-                                                  *connector, bool force)
-{
-       int r;
-
-       r = ast_get_modes(connector);
-       if (r <= 0)
-               return connector_status_disconnected;
-
-       return connector_status_connected;
-}
-
 static void ast_connector_destroy(struct drm_connector *connector)
 {
        struct ast_connector *ast_connector = to_ast_connector(connector);
@@ -1327,7 +1315,6 @@ static const struct drm_connector_helper_funcs ast_connector_helper_funcs = {
 
 static const struct drm_connector_funcs ast_connector_funcs = {
        .reset = drm_atomic_helper_connector_reset,
-       .detect = ast_connector_detect,
        .fill_modes = drm_helper_probe_single_connector_modes,
        .destroy = ast_connector_destroy,
        .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
@@ -1355,8 +1342,7 @@ static int ast_connector_init(struct drm_device *dev)
        connector->interlace_allowed = 0;
        connector->doublescan_allowed = 0;
 
-       connector->polled = DRM_CONNECTOR_POLL_CONNECT |
-                                               DRM_CONNECTOR_POLL_DISCONNECT;
+       connector->polled = DRM_CONNECTOR_POLL_CONNECT;
 
        drm_connector_attach_encoder(connector, encoder);
 
@@ -1425,8 +1411,6 @@ int ast_mode_config_init(struct ast_private *ast)
 
        drm_mode_config_reset(dev);
 
-       drm_kms_helper_poll_init(dev);
-
        return 0;
 }
 
index 44327bc..06613ff 100644 (file)
@@ -66,7 +66,8 @@ static const struct drm_crtc_funcs kmb_crtc_funcs = {
        .disable_vblank = kmb_crtc_disable_vblank,
 };
 
-static void kmb_crtc_set_mode(struct drm_crtc *crtc)
+static void kmb_crtc_set_mode(struct drm_crtc *crtc,
+                             struct drm_atomic_state *old_state)
 {
        struct drm_device *dev = crtc->dev;
        struct drm_display_mode *m = &crtc->state->adjusted_mode;
@@ -75,7 +76,7 @@ static void kmb_crtc_set_mode(struct drm_crtc *crtc)
        unsigned int val = 0;
 
        /* Initialize mipi */
-       kmb_dsi_mode_set(kmb->kmb_dsi, m, kmb->sys_clk_mhz);
+       kmb_dsi_mode_set(kmb->kmb_dsi, m, kmb->sys_clk_mhz, old_state);
        drm_info(dev,
                 "vfp= %d vbp= %d vsync_len=%d hfp=%d hbp=%d hsync_len=%d\n",
                 m->crtc_vsync_start - m->crtc_vdisplay,
@@ -138,7 +139,7 @@ static void kmb_crtc_atomic_enable(struct drm_crtc *crtc,
        struct kmb_drm_private *kmb = crtc_to_kmb_priv(crtc);
 
        clk_prepare_enable(kmb->kmb_clk.clk_lcd);
-       kmb_crtc_set_mode(crtc);
+       kmb_crtc_set_mode(crtc, state);
        drm_crtc_vblank_on(crtc);
 }
 
@@ -185,11 +186,45 @@ static void kmb_crtc_atomic_flush(struct drm_crtc *crtc,
        spin_unlock_irq(&crtc->dev->event_lock);
 }
 
+static enum drm_mode_status
+               kmb_crtc_mode_valid(struct drm_crtc *crtc,
+                                   const struct drm_display_mode *mode)
+{
+       int refresh;
+       struct drm_device *dev = crtc->dev;
+       int vfp = mode->vsync_start - mode->vdisplay;
+
+       if (mode->vdisplay < KMB_CRTC_MAX_HEIGHT) {
+               drm_dbg(dev, "height = %d less than %d",
+                       mode->vdisplay, KMB_CRTC_MAX_HEIGHT);
+               return MODE_BAD_VVALUE;
+       }
+       if (mode->hdisplay < KMB_CRTC_MAX_WIDTH) {
+               drm_dbg(dev, "width = %d less than %d",
+                       mode->hdisplay, KMB_CRTC_MAX_WIDTH);
+               return MODE_BAD_HVALUE;
+       }
+       refresh = drm_mode_vrefresh(mode);
+       if (refresh < KMB_MIN_VREFRESH || refresh > KMB_MAX_VREFRESH) {
+               drm_dbg(dev, "refresh = %d less than %d or greater than %d",
+                       refresh, KMB_MIN_VREFRESH, KMB_MAX_VREFRESH);
+               return MODE_BAD;
+       }
+
+       if (vfp < KMB_CRTC_MIN_VFP) {
+               drm_dbg(dev, "vfp = %d less than %d", vfp, KMB_CRTC_MIN_VFP);
+               return MODE_BAD;
+       }
+
+       return MODE_OK;
+}
+
 static const struct drm_crtc_helper_funcs kmb_crtc_helper_funcs = {
        .atomic_begin = kmb_crtc_atomic_begin,
        .atomic_enable = kmb_crtc_atomic_enable,
        .atomic_disable = kmb_crtc_atomic_disable,
        .atomic_flush = kmb_crtc_atomic_flush,
+       .mode_valid = kmb_crtc_mode_valid,
 };
 
 int kmb_setup_crtc(struct drm_device *drm)
index 12ce669..961ac6f 100644 (file)
@@ -380,7 +380,7 @@ static irqreturn_t handle_lcd_irq(struct drm_device *dev)
                if (val & LAYER3_DMA_FIFO_UNDERFLOW)
                        drm_dbg(&kmb->drm,
                                "LAYER3:GL1 DMA UNDERFLOW val = 0x%lx", val);
-               if (val & LAYER3_DMA_FIFO_UNDERFLOW)
+               if (val & LAYER3_DMA_FIFO_OVERFLOW)
                        drm_dbg(&kmb->drm,
                                "LAYER3:GL1 DMA OVERFLOW val = 0x%lx", val);
        }
index 69a62e2..bf085e9 100644 (file)
 #define DRIVER_MAJOR                   1
 #define DRIVER_MINOR                   1
 
+/* Platform definitions */
+#define KMB_CRTC_MIN_VFP               4
+#define KMB_CRTC_MAX_WIDTH             1920 /* max width in pixels */
+#define KMB_CRTC_MAX_HEIGHT            1080 /* max height in pixels */
+#define KMB_CRTC_MIN_WIDTH             1920
+#define KMB_CRTC_MIN_HEIGHT            1080
 #define KMB_FB_MAX_WIDTH               1920
 #define KMB_FB_MAX_HEIGHT              1080
 #define KMB_FB_MIN_WIDTH               1
 #define KMB_FB_MIN_HEIGHT              1
-
+#define KMB_MIN_VREFRESH               59    /*vertical refresh in Hz */
+#define KMB_MAX_VREFRESH               60    /*vertical refresh in Hz */
 #define KMB_LCD_DEFAULT_CLK            200000000
 #define KMB_SYS_CLK_MHZ                        500
 
@@ -50,6 +57,7 @@ struct kmb_drm_private {
        spinlock_t                      irq_lock;
        int                             irq_lcd;
        int                             sys_clk_mhz;
+       struct disp_cfg                 init_disp_cfg[KMB_MAX_PLANES];
        struct layer_status             plane_status[KMB_MAX_PLANES];
        int                             kmb_under_flow;
        int                             kmb_flush_done;
index 1793cd3..f607188 100644 (file)
@@ -482,6 +482,10 @@ static u32 mipi_tx_fg_section_cfg(struct kmb_dsi *kmb_dsi,
        return 0;
 }
 
+#define CLK_DIFF_LOW 50
+#define CLK_DIFF_HI 60
+#define SYSCLK_500  500
+
 static void mipi_tx_fg_cfg_regs(struct kmb_dsi *kmb_dsi, u8 frame_gen,
                                struct mipi_tx_frame_timing_cfg *fg_cfg)
 {
@@ -492,7 +496,12 @@ static void mipi_tx_fg_cfg_regs(struct kmb_dsi *kmb_dsi, u8 frame_gen,
        /* 500 Mhz system clock minus 50 to account for the difference in
         * MIPI clock speed in RTL tests
         */
-       sysclk = kmb_dsi->sys_clk_mhz - 50;
+       if (kmb_dsi->sys_clk_mhz == SYSCLK_500) {
+               sysclk = kmb_dsi->sys_clk_mhz - CLK_DIFF_LOW;
+       } else {
+               /* 700 Mhz clk*/
+               sysclk = kmb_dsi->sys_clk_mhz - CLK_DIFF_HI;
+       }
 
        /* PPL-Pixel Packing Layer, LLP-Low Level Protocol
         * Frame genartor timing parameters are clocked on the system clock,
@@ -1322,7 +1331,8 @@ static u32 mipi_tx_init_dphy(struct kmb_dsi *kmb_dsi,
        return 0;
 }
 
-static void connect_lcd_to_mipi(struct kmb_dsi *kmb_dsi)
+static void connect_lcd_to_mipi(struct kmb_dsi *kmb_dsi,
+                               struct drm_atomic_state *old_state)
 {
        struct regmap *msscam;
 
@@ -1331,7 +1341,7 @@ static void connect_lcd_to_mipi(struct kmb_dsi *kmb_dsi)
                dev_dbg(kmb_dsi->dev, "failed to get msscam syscon");
                return;
        }
-
+       drm_atomic_bridge_chain_enable(adv_bridge, old_state);
        /* DISABLE MIPI->CIF CONNECTION */
        regmap_write(msscam, MSS_MIPI_CIF_CFG, 0);
 
@@ -1342,7 +1352,7 @@ static void connect_lcd_to_mipi(struct kmb_dsi *kmb_dsi)
 }
 
 int kmb_dsi_mode_set(struct kmb_dsi *kmb_dsi, struct drm_display_mode *mode,
-                    int sys_clk_mhz)
+                    int sys_clk_mhz, struct drm_atomic_state *old_state)
 {
        u64 data_rate;
 
@@ -1384,18 +1394,13 @@ int kmb_dsi_mode_set(struct kmb_dsi *kmb_dsi, struct drm_display_mode *mode,
                mipi_tx_init_cfg.lane_rate_mbps = data_rate;
        }
 
-       kmb_write_mipi(kmb_dsi, DPHY_ENABLE, 0);
-       kmb_write_mipi(kmb_dsi, DPHY_INIT_CTRL0, 0);
-       kmb_write_mipi(kmb_dsi, DPHY_INIT_CTRL1, 0);
-       kmb_write_mipi(kmb_dsi, DPHY_INIT_CTRL2, 0);
-
        /* Initialize mipi controller */
        mipi_tx_init_cntrl(kmb_dsi, &mipi_tx_init_cfg);
 
        /* Dphy initialization */
        mipi_tx_init_dphy(kmb_dsi, &mipi_tx_init_cfg);
 
-       connect_lcd_to_mipi(kmb_dsi);
+       connect_lcd_to_mipi(kmb_dsi, old_state);
        dev_info(kmb_dsi->dev, "mipi hw initialized");
 
        return 0;
index 66b7c50..09dc887 100644 (file)
@@ -380,7 +380,7 @@ int kmb_dsi_host_bridge_init(struct device *dev);
 struct kmb_dsi *kmb_dsi_init(struct platform_device *pdev);
 void kmb_dsi_host_unregister(struct kmb_dsi *kmb_dsi);
 int kmb_dsi_mode_set(struct kmb_dsi *kmb_dsi, struct drm_display_mode *mode,
-                    int sys_clk_mhz);
+                    int sys_clk_mhz, struct drm_atomic_state *old_state);
 int kmb_dsi_map_mmio(struct kmb_dsi *kmb_dsi);
 int kmb_dsi_clk_init(struct kmb_dsi *kmb_dsi);
 int kmb_dsi_encoder_init(struct drm_device *dev, struct kmb_dsi *kmb_dsi);
index 06b0c42..00404ba 100644 (file)
@@ -67,8 +67,21 @@ static const u32 kmb_formats_v[] = {
 
 static unsigned int check_pixel_format(struct drm_plane *plane, u32 format)
 {
+       struct kmb_drm_private *kmb;
+       struct kmb_plane *kmb_plane = to_kmb_plane(plane);
        int i;
+       int plane_id = kmb_plane->id;
+       struct disp_cfg init_disp_cfg;
 
+       kmb = to_kmb(plane->dev);
+       init_disp_cfg = kmb->init_disp_cfg[plane_id];
+       /* Due to HW limitations, changing pixel format after initial
+        * plane configuration is not supported.
+        */
+       if (init_disp_cfg.format && init_disp_cfg.format != format) {
+               drm_dbg(&kmb->drm, "Cannot change format after initial plane configuration");
+               return -EINVAL;
+       }
        for (i = 0; i < plane->format_count; i++) {
                if (plane->format_types[i] == format)
                        return 0;
@@ -81,11 +94,17 @@ static int kmb_plane_atomic_check(struct drm_plane *plane,
 {
        struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
                                                                                 plane);
+       struct kmb_drm_private *kmb;
+       struct kmb_plane *kmb_plane = to_kmb_plane(plane);
+       int plane_id = kmb_plane->id;
+       struct disp_cfg init_disp_cfg;
        struct drm_framebuffer *fb;
        int ret;
        struct drm_crtc_state *crtc_state;
        bool can_position;
 
+       kmb = to_kmb(plane->dev);
+       init_disp_cfg = kmb->init_disp_cfg[plane_id];
        fb = new_plane_state->fb;
        if (!fb || !new_plane_state->crtc)
                return 0;
@@ -99,6 +118,16 @@ static int kmb_plane_atomic_check(struct drm_plane *plane,
            new_plane_state->crtc_w < KMB_FB_MIN_WIDTH ||
            new_plane_state->crtc_h < KMB_FB_MIN_HEIGHT)
                return -EINVAL;
+
+       /* Due to HW limitations, changing plane height or width after
+        * initial plane configuration is not supported.
+        */
+       if ((init_disp_cfg.width && init_disp_cfg.height) &&
+           (init_disp_cfg.width != fb->width ||
+           init_disp_cfg.height != fb->height)) {
+               drm_dbg(&kmb->drm, "Cannot change plane height or width after initial configuration");
+               return -EINVAL;
+       }
        can_position = (plane->type == DRM_PLANE_TYPE_OVERLAY);
        crtc_state =
                drm_atomic_get_existing_crtc_state(state,
@@ -335,6 +364,7 @@ static void kmb_plane_atomic_update(struct drm_plane *plane,
        unsigned char plane_id;
        int num_planes;
        static dma_addr_t addr[MAX_SUB_PLANES];
+       struct disp_cfg *init_disp_cfg;
 
        if (!plane || !new_plane_state || !old_plane_state)
                return;
@@ -357,7 +387,8 @@ static void kmb_plane_atomic_update(struct drm_plane *plane,
        }
        spin_unlock_irq(&kmb->irq_lock);
 
-       src_w = (new_plane_state->src_w >> 16);
+       init_disp_cfg = &kmb->init_disp_cfg[plane_id];
+       src_w = new_plane_state->src_w >> 16;
        src_h = new_plane_state->src_h >> 16;
        crtc_x = new_plane_state->crtc_x;
        crtc_y = new_plane_state->crtc_y;
@@ -500,6 +531,16 @@ static void kmb_plane_atomic_update(struct drm_plane *plane,
 
        /* Enable DMA */
        kmb_write_lcd(kmb, LCD_LAYERn_DMA_CFG(plane_id), dma_cfg);
+
+       /* Save initial display config */
+       if (!init_disp_cfg->width ||
+           !init_disp_cfg->height ||
+           !init_disp_cfg->format) {
+               init_disp_cfg->width = width;
+               init_disp_cfg->height = height;
+               init_disp_cfg->format = fb->format->format;
+       }
+
        drm_dbg(&kmb->drm, "dma_cfg=0x%x LCD_DMA_CFG=0x%x\n", dma_cfg,
                kmb_read_lcd(kmb, LCD_LAYERn_DMA_CFG(plane_id)));
 
index 6e8d22c..b511440 100644 (file)
@@ -63,6 +63,12 @@ struct layer_status {
        u32 ctrl;
 };
 
+struct disp_cfg {
+       unsigned int width;
+       unsigned int height;
+       unsigned int format;
+};
+
 struct kmb_plane *kmb_plane_init(struct drm_device *drm);
 void kmb_plane_destroy(struct drm_plane *plane);
 #endif /* __KMB_PLANE_H__ */
index 33da25b..267a880 100644 (file)
@@ -1838,6 +1838,13 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
                        adreno_cmp_rev(ADRENO_REV(6, 3, 5, ANY_ID), info->rev)))
                adreno_gpu->base.hw_apriv = true;
 
+       /*
+        * For now only clamp to idle freq for devices where this is known not
+        * to cause power supply issues:
+        */
+       if (info && (info->revn == 618))
+               gpu->clamp_to_idle = true;
+
        a6xx_llc_slices_init(pdev, a6xx_gpu);
 
        ret = a6xx_set_supported_hw(&pdev->dev, config->rev);
index 030f82f..ee25d55 100644 (file)
@@ -203,6 +203,10 @@ struct msm_gpu {
        uint32_t suspend_count;
 
        struct msm_gpu_state *crashstate;
+
+       /* Enable clamping to idle freq when inactive: */
+       bool clamp_to_idle;
+
        /* True if the hardware supports expanded apriv (a650 and newer) */
        bool hw_apriv;
 
index 84e98c0..20006d0 100644 (file)
@@ -200,7 +200,8 @@ void msm_devfreq_idle(struct msm_gpu *gpu)
 
        idle_freq = get_freq(gpu);
 
-       msm_devfreq_target(&gpu->pdev->dev, &target_freq, 0);
+       if (gpu->clamp_to_idle)
+               msm_devfreq_target(&gpu->pdev->dev, &target_freq, 0);
 
        df->idle_time = ktime_get();
        df->idle_freq = idle_freq;
index ec0432f..86d7863 100644 (file)
@@ -173,7 +173,11 @@ static void mxsfb_irq_disable(struct drm_device *drm)
        struct mxsfb_drm_private *mxsfb = drm->dev_private;
 
        mxsfb_enable_axi_clk(mxsfb);
-       mxsfb->crtc.funcs->disable_vblank(&mxsfb->crtc);
+
+       /* Disable and clear VBLANK IRQ */
+       writel(CTRL1_CUR_FRAME_DONE_IRQ_EN, mxsfb->base + LCDC_CTRL1 + REG_CLR);
+       writel(CTRL1_CUR_FRAME_DONE_IRQ, mxsfb->base + LCDC_CTRL1 + REG_CLR);
+
        mxsfb_disable_axi_clk(mxsfb);
 }
 
index 0145129..534dd74 100644 (file)
@@ -590,14 +590,14 @@ static const struct drm_display_mode k101_im2byl02_default_mode = {
        .clock          = 69700,
 
        .hdisplay       = 800,
-       .hsync_start    = 800 + 6,
-       .hsync_end      = 800 + 6 + 15,
-       .htotal         = 800 + 6 + 15 + 16,
+       .hsync_start    = 800 + 52,
+       .hsync_end      = 800 + 52 + 8,
+       .htotal         = 800 + 52 + 8 + 48,
 
        .vdisplay       = 1280,
-       .vsync_start    = 1280 + 8,
-       .vsync_end      = 1280 + 8 + 48,
-       .vtotal         = 1280 + 8 + 48 + 52,
+       .vsync_start    = 1280 + 16,
+       .vsync_end      = 1280 + 16 + 6,
+       .vtotal         = 1280 + 16 + 6 + 15,
 
        .width_mm       = 135,
        .height_mm      = 217,
index 42f3d9d..d030577 100644 (file)
@@ -13,6 +13,7 @@
 #define _HYPERV_VMBUS_H
 
 #include <linux/list.h>
+#include <linux/bitops.h>
 #include <asm/sync_bitops.h>
 #include <asm/hyperv-tlfs.h>
 #include <linux/atomic.h>
index e501cb0..bd087cc 100644 (file)
@@ -1994,14 +1994,14 @@ setup_hw(struct hfc_pci *hc)
        pci_set_master(hc->pdev);
        if (!hc->irq) {
                printk(KERN_WARNING "HFC-PCI: No IRQ for PCI card found\n");
-               return 1;
+               return -EINVAL;
        }
        hc->hw.pci_io =
                (char __iomem *)(unsigned long)hc->pdev->resource[1].start;
 
        if (!hc->hw.pci_io) {
                printk(KERN_WARNING "HFC-PCI: No IO-Mem for PCI card found\n");
-               return 1;
+               return -ENOMEM;
        }
        /* Allocate memory for FIFOS */
        /* the memory needs to be on a 32k boundary within the first 4G */
@@ -2012,7 +2012,7 @@ setup_hw(struct hfc_pci *hc)
        if (!buffer) {
                printk(KERN_WARNING
                       "HFC-PCI: Error allocating memory for FIFO!\n");
-               return 1;
+               return -ENOMEM;
        }
        hc->hw.fifos = buffer;
        pci_write_config_dword(hc->pdev, 0x80, hc->hw.dmahandle);
@@ -2022,7 +2022,7 @@ setup_hw(struct hfc_pci *hc)
                       "HFC-PCI: Error in ioremap for PCI!\n");
                dma_free_coherent(&hc->pdev->dev, 0x8000, hc->hw.fifos,
                                  hc->hw.dmahandle);
-               return 1;
+               return -ENOMEM;
        }
 
        printk(KERN_INFO
index 308d4f2..eee47ba 100644 (file)
@@ -32,8 +32,13 @@ static u32 iomap_read_reg(struct m_can_classdev *cdev, int reg)
 static int iomap_read_fifo(struct m_can_classdev *cdev, int offset, void *val, size_t val_count)
 {
        struct m_can_plat_priv *priv = cdev_to_priv(cdev);
+       void __iomem *src = priv->mram_base + offset;
 
-       ioread32_rep(priv->mram_base + offset, val, val_count);
+       while (val_count--) {
+               *(unsigned int *)val = ioread32(src);
+               val += 4;
+               src += 4;
+       }
 
        return 0;
 }
@@ -51,8 +56,13 @@ static int iomap_write_fifo(struct m_can_classdev *cdev, int offset,
                            const void *val, size_t val_count)
 {
        struct m_can_plat_priv *priv = cdev_to_priv(cdev);
+       void __iomem *dst = priv->mram_base + offset;
 
-       iowrite32_rep(priv->base + offset, val, val_count);
+       while (val_count--) {
+               iowrite32(*(unsigned int *)val, dst);
+               val += 4;
+               dst += 4;
+       }
 
        return 0;
 }
index 00e4533..8999ec9 100644 (file)
@@ -846,10 +846,12 @@ static int __maybe_unused rcar_can_suspend(struct device *dev)
        struct rcar_can_priv *priv = netdev_priv(ndev);
        u16 ctlr;
 
-       if (netif_running(ndev)) {
-               netif_stop_queue(ndev);
-               netif_device_detach(ndev);
-       }
+       if (!netif_running(ndev))
+               return 0;
+
+       netif_stop_queue(ndev);
+       netif_device_detach(ndev);
+
        ctlr = readw(&priv->regs->ctlr);
        ctlr |= RCAR_CAN_CTLR_CANM_HALT;
        writew(ctlr, &priv->regs->ctlr);
@@ -868,6 +870,9 @@ static int __maybe_unused rcar_can_resume(struct device *dev)
        u16 ctlr;
        int err;
 
+       if (!netif_running(ndev))
+               return 0;
+
        err = clk_enable(priv->clk);
        if (err) {
                netdev_err(ndev, "clk_enable() failed, error %d\n", err);
@@ -881,10 +886,9 @@ static int __maybe_unused rcar_can_resume(struct device *dev)
        writew(ctlr, &priv->regs->ctlr);
        priv->can.state = CAN_STATE_ERROR_ACTIVE;
 
-       if (netif_running(ndev)) {
-               netif_device_attach(ndev);
-               netif_start_queue(ndev);
-       }
+       netif_device_attach(ndev);
+       netif_start_queue(ndev);
+
        return 0;
 }
 
index 6db90dc..84f3402 100644 (file)
@@ -752,16 +752,15 @@ static void peak_pci_remove(struct pci_dev *pdev)
                struct net_device *prev_dev = chan->prev_dev;
 
                dev_info(&pdev->dev, "removing device %s\n", dev->name);
+               /* do that only for first channel */
+               if (!prev_dev && chan->pciec_card)
+                       peak_pciec_remove(chan->pciec_card);
                unregister_sja1000dev(dev);
                free_sja1000dev(dev);
                dev = prev_dev;
 
-               if (!dev) {
-                       /* do that only for first channel */
-                       if (chan->pciec_card)
-                               peak_pciec_remove(chan->pciec_card);
+               if (!dev)
                        break;
-               }
                priv = netdev_priv(dev);
                chan = priv->priv;
        }
index b11eaba..09029a3 100644 (file)
@@ -551,11 +551,10 @@ static int pcan_usb_fd_decode_status(struct pcan_usb_fd_if *usb_if,
        } else if (sm->channel_p_w_b & PUCAN_BUS_WARNING) {
                new_state = CAN_STATE_ERROR_WARNING;
        } else {
-               /* no error bit (so, no error skb, back to active state) */
-               dev->can.state = CAN_STATE_ERROR_ACTIVE;
+               /* back to (or still in) ERROR_ACTIVE state */
+               new_state = CAN_STATE_ERROR_ACTIVE;
                pdev->bec.txerr = 0;
                pdev->bec.rxerr = 0;
-               return 0;
        }
 
        /* state hasn't changed */
@@ -568,8 +567,7 @@ static int pcan_usb_fd_decode_status(struct pcan_usb_fd_if *usb_if,
 
        /* allocate an skb to store the error frame */
        skb = alloc_can_err_skb(netdev, &cf);
-       if (skb)
-               can_change_state(netdev, cf, tx_state, rx_state);
+       can_change_state(netdev, cf, tx_state, rx_state);
 
        /* things must be done even in case of OOM */
        if (new_state == CAN_STATE_BUS_OFF)
index 3ff4b7e..dbd4486 100644 (file)
 #define GSWIP_SDMA_PCTRLp(p)           (0xBC0 + ((p) * 0x6))
 #define  GSWIP_SDMA_PCTRL_EN           BIT(0)  /* SDMA Port Enable */
 #define  GSWIP_SDMA_PCTRL_FCEN         BIT(1)  /* Flow Control Enable */
-#define  GSWIP_SDMA_PCTRL_PAUFWD       BIT(1)  /* Pause Frame Forwarding */
+#define  GSWIP_SDMA_PCTRL_PAUFWD       BIT(3)  /* Pause Frame Forwarding */
 
 #define GSWIP_TABLE_ACTIVE_VLAN                0x01
 #define GSWIP_TABLE_VLAN_MAPPING       0x02
index 094737e..9890672 100644 (file)
@@ -1035,9 +1035,6 @@ mt7530_port_enable(struct dsa_switch *ds, int port,
 {
        struct mt7530_priv *priv = ds->priv;
 
-       if (!dsa_is_user_port(ds, port))
-               return 0;
-
        mutex_lock(&priv->reg_mutex);
 
        /* Allow the user port gets connected to the cpu port and also
@@ -1060,9 +1057,6 @@ mt7530_port_disable(struct dsa_switch *ds, int port)
 {
        struct mt7530_priv *priv = ds->priv;
 
-       if (!dsa_is_user_port(ds, port))
-               return;
-
        mutex_lock(&priv->reg_mutex);
 
        /* Clear up all port matrix which could be restored in the next
@@ -3211,7 +3205,7 @@ mt7530_probe(struct mdio_device *mdiodev)
                return -ENOMEM;
 
        priv->ds->dev = &mdiodev->dev;
-       priv->ds->num_ports = DSA_MAX_PORTS;
+       priv->ds->num_ports = MT7530_NUM_PORTS;
 
        /* Use medatek,mcm property to distinguish hardware type that would
         * casues a little bit differences on power-on sequence.
index 691e147..0fbecd0 100644 (file)
@@ -1193,7 +1193,7 @@ static int nic_register_interrupts(struct nicpf *nic)
                dev_err(&nic->pdev->dev,
                        "Request for #%d msix vectors failed, returned %d\n",
                           nic->num_vec, ret);
-               return 1;
+               return ret;
        }
 
        /* Register mailbox interrupt handler */
index d1667b7..a27227a 100644 (file)
@@ -1224,7 +1224,7 @@ static int nicvf_register_misc_interrupt(struct nicvf *nic)
        if (ret < 0) {
                netdev_err(nic->netdev,
                           "Req for #%d msix vectors failed\n", nic->num_vec);
-               return 1;
+               return ret;
        }
 
        sprintf(nic->irq_name[irq], "%s Mbox", "NICVF");
@@ -1243,7 +1243,7 @@ static int nicvf_register_misc_interrupt(struct nicvf *nic)
        if (!nicvf_check_pf_ready(nic)) {
                nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0);
                nicvf_unregister_interrupts(nic);
-               return 1;
+               return -EIO;
        }
 
        return 0;
index 9690e36..910b9f7 100644 (file)
@@ -157,7 +157,7 @@ static const struct {
        { ENETC_PM0_TFRM,   "MAC tx frames" },
        { ENETC_PM0_TFCS,   "MAC tx fcs errors" },
        { ENETC_PM0_TVLAN,  "MAC tx VLAN frames" },
-       { ENETC_PM0_TERR,   "MAC tx frames" },
+       { ENETC_PM0_TERR,   "MAC tx frame errors" },
        { ENETC_PM0_TUCA,   "MAC tx unicast frames" },
        { ENETC_PM0_TMCA,   "MAC tx multicast frames" },
        { ENETC_PM0_TBCA,   "MAC tx broadcast frames" },
index 4c977df..d522bd5 100644 (file)
@@ -517,10 +517,13 @@ static void enetc_port_si_configure(struct enetc_si *si)
 
 static void enetc_configure_port_mac(struct enetc_hw *hw)
 {
+       int tc;
+
        enetc_port_wr(hw, ENETC_PM0_MAXFRM,
                      ENETC_SET_MAXFRM(ENETC_RX_MAXFRM_SIZE));
 
-       enetc_port_wr(hw, ENETC_PTCMSDUR(0), ENETC_MAC_MAXFRM_SIZE);
+       for (tc = 0; tc < 8; tc++)
+               enetc_port_wr(hw, ENETC_PTCMSDUR(tc), ENETC_MAC_MAXFRM_SIZE);
 
        enetc_port_wr(hw, ENETC_PM0_CMD_CFG, ENETC_PM0_CMD_PHY_TX_EN |
                      ENETC_PM0_CMD_TXP | ENETC_PM0_PROMISC);
index eef1b27..67b0bf3 100644 (file)
@@ -10,6 +10,27 @@ static LIST_HEAD(hnae3_ae_algo_list);
 static LIST_HEAD(hnae3_client_list);
 static LIST_HEAD(hnae3_ae_dev_list);
 
+void hnae3_unregister_ae_algo_prepare(struct hnae3_ae_algo *ae_algo)
+{
+       const struct pci_device_id *pci_id;
+       struct hnae3_ae_dev *ae_dev;
+
+       if (!ae_algo)
+               return;
+
+       list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) {
+               if (!hnae3_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))
+                       continue;
+
+               pci_id = pci_match_id(ae_algo->pdev_id_table, ae_dev->pdev);
+               if (!pci_id)
+                       continue;
+               if (IS_ENABLED(CONFIG_PCI_IOV))
+                       pci_disable_sriov(ae_dev->pdev);
+       }
+}
+EXPORT_SYMBOL(hnae3_unregister_ae_algo_prepare);
+
 /* we are keeping things simple and using single lock for all the
  * list. This is a non-critical code so other updations, if happen
  * in parallel, can wait.
index 8ba21d6..d701451 100644 (file)
@@ -853,6 +853,7 @@ struct hnae3_handle {
 int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev);
 void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev);
 
+void hnae3_unregister_ae_algo_prepare(struct hnae3_ae_algo *ae_algo);
 void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo);
 void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo);
 
index 468b8f0..4b886a1 100644 (file)
@@ -1847,7 +1847,6 @@ void hns3_shinfo_pack(struct skb_shared_info *shinfo, __u32 *size)
 
 static int hns3_skb_linearize(struct hns3_enet_ring *ring,
                              struct sk_buff *skb,
-                             u8 max_non_tso_bd_num,
                              unsigned int bd_num)
 {
        /* 'bd_num == UINT_MAX' means the skb' fraglist has a
@@ -1864,8 +1863,7 @@ static int hns3_skb_linearize(struct hns3_enet_ring *ring,
         * will not help.
         */
        if (skb->len > HNS3_MAX_TSO_SIZE ||
-           (!skb_is_gso(skb) && skb->len >
-            HNS3_MAX_NON_TSO_SIZE(max_non_tso_bd_num))) {
+           (!skb_is_gso(skb) && skb->len > HNS3_MAX_NON_TSO_SIZE)) {
                u64_stats_update_begin(&ring->syncp);
                ring->stats.hw_limitation++;
                u64_stats_update_end(&ring->syncp);
@@ -1900,8 +1898,7 @@ static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
                        goto out;
                }
 
-               if (hns3_skb_linearize(ring, skb, max_non_tso_bd_num,
-                                      bd_num))
+               if (hns3_skb_linearize(ring, skb, bd_num))
                        return -ENOMEM;
 
                bd_num = hns3_tx_bd_count(skb->len);
@@ -3258,6 +3255,7 @@ static void hns3_buffer_detach(struct hns3_enet_ring *ring, int i)
 {
        hns3_unmap_buffer(ring, &ring->desc_cb[i]);
        ring->desc[i].addr = 0;
+       ring->desc_cb[i].refill = 0;
 }
 
 static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i,
@@ -3336,6 +3334,7 @@ static int hns3_alloc_and_attach_buffer(struct hns3_enet_ring *ring, int i)
 
        ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma +
                                         ring->desc_cb[i].page_offset);
+       ring->desc_cb[i].refill = 1;
 
        return 0;
 }
@@ -3365,6 +3364,7 @@ static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i,
 {
        hns3_unmap_buffer(ring, &ring->desc_cb[i]);
        ring->desc_cb[i] = *res_cb;
+       ring->desc_cb[i].refill = 1;
        ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma +
                                         ring->desc_cb[i].page_offset);
        ring->desc[i].rx.bd_base_info = 0;
@@ -3373,6 +3373,7 @@ static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i,
 static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i)
 {
        ring->desc_cb[i].reuse_flag = 0;
+       ring->desc_cb[i].refill = 1;
        ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma +
                                         ring->desc_cb[i].page_offset);
        ring->desc[i].rx.bd_base_info = 0;
@@ -3479,10 +3480,14 @@ static int hns3_desc_unused(struct hns3_enet_ring *ring)
        int ntc = ring->next_to_clean;
        int ntu = ring->next_to_use;
 
+       if (unlikely(ntc == ntu && !ring->desc_cb[ntc].refill))
+               return ring->desc_num;
+
        return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu;
 }
 
-static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
+/* Return true if there is any allocation failure */
+static bool hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
                                      int cleand_count)
 {
        struct hns3_desc_cb *desc_cb;
@@ -3507,7 +3512,10 @@ static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
                                hns3_rl_err(ring_to_netdev(ring),
                                            "alloc rx buffer failed: %d\n",
                                            ret);
-                               break;
+
+                               writel(i, ring->tqp->io_base +
+                                      HNS3_RING_RX_RING_HEAD_REG);
+                               return true;
                        }
                        hns3_replace_buffer(ring, ring->next_to_use, &res_cbs);
 
@@ -3520,6 +3528,7 @@ static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
        }
 
        writel(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG);
+       return false;
 }
 
 static bool hns3_can_reuse_page(struct hns3_desc_cb *cb)
@@ -3824,6 +3833,7 @@ static void hns3_rx_ring_move_fw(struct hns3_enet_ring *ring)
 {
        ring->desc[ring->next_to_clean].rx.bd_base_info &=
                cpu_to_le32(~BIT(HNS3_RXD_VLD_B));
+       ring->desc_cb[ring->next_to_clean].refill = 0;
        ring->next_to_clean += 1;
 
        if (unlikely(ring->next_to_clean == ring->desc_num))
@@ -4170,6 +4180,7 @@ int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget,
 {
 #define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
        int unused_count = hns3_desc_unused(ring);
+       bool failure = false;
        int recv_pkts = 0;
        int err;
 
@@ -4178,9 +4189,9 @@ int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget,
        while (recv_pkts < budget) {
                /* Reuse or realloc buffers */
                if (unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
-                       hns3_nic_alloc_rx_buffers(ring, unused_count);
-                       unused_count = hns3_desc_unused(ring) -
-                                       ring->pending_buf;
+                       failure = failure ||
+                               hns3_nic_alloc_rx_buffers(ring, unused_count);
+                       unused_count = 0;
                }
 
                /* Poll one pkt */
@@ -4199,11 +4210,7 @@ int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget,
        }
 
 out:
-       /* Make all data has been write before submit */
-       if (unused_count > 0)
-               hns3_nic_alloc_rx_buffers(ring, unused_count);
-
-       return recv_pkts;
+       return failure ? budget : recv_pkts;
 }
 
 static void hns3_update_rx_int_coalesce(struct hns3_enet_tqp_vector *tqp_vector)
index 6162d9f..f09a61d 100644 (file)
@@ -186,11 +186,9 @@ enum hns3_nic_state {
 
 #define HNS3_MAX_BD_SIZE                       65535
 #define HNS3_MAX_TSO_BD_NUM                    63U
-#define HNS3_MAX_TSO_SIZE \
-       (HNS3_MAX_BD_SIZE * HNS3_MAX_TSO_BD_NUM)
+#define HNS3_MAX_TSO_SIZE                      1048576U
+#define HNS3_MAX_NON_TSO_SIZE                  9728U
 
-#define HNS3_MAX_NON_TSO_SIZE(max_non_tso_bd_num) \
-       (HNS3_MAX_BD_SIZE * (max_non_tso_bd_num))
 
 #define HNS3_VECTOR_GL0_OFFSET                 0x100
 #define HNS3_VECTOR_GL1_OFFSET                 0x200
@@ -332,6 +330,7 @@ struct hns3_desc_cb {
        u32 length;     /* length of the buffer */
 
        u16 reuse_flag;
+       u16 refill;
 
        /* desc type, used by the ring user to mark the type of the priv data */
        u16 type;
index 307c9e8..91cb578 100644 (file)
@@ -137,6 +137,15 @@ static int hclge_ets_sch_mode_validate(struct hclge_dev *hdev,
                                *changed = true;
                        break;
                case IEEE_8021QAZ_TSA_ETS:
+                       /* The hardware will switch to sp mode if bandwidth is
+                        * 0, so limit ets bandwidth must be greater than 0.
+                        */
+                       if (!ets->tc_tx_bw[i]) {
+                               dev_err(&hdev->pdev->dev,
+                                       "tc%u ets bw cannot be 0\n", i);
+                               return -EINVAL;
+                       }
+
                        if (hdev->tm_info.tc_info[i].tc_sch_mode !=
                                HCLGE_SCH_MODE_DWRR)
                                *changed = true;
index bb9b026..93aa7f2 100644 (file)
@@ -1560,8 +1560,11 @@ static int hclge_config_tm_hw_err_int(struct hclge_dev *hdev, bool en)
 
        /* configure TM QCN hw errors */
        hclge_cmd_setup_basic_desc(&desc, HCLGE_TM_QCN_MEM_INT_CFG, false);
-       if (en)
+       desc.data[0] = cpu_to_le32(HCLGE_TM_QCN_ERR_INT_TYPE);
+       if (en) {
+               desc.data[0] |= cpu_to_le32(HCLGE_TM_QCN_FIFO_INT_EN);
                desc.data[1] = cpu_to_le32(HCLGE_TM_QCN_MEM_ERR_INT_EN);
+       }
 
        ret = hclge_cmd_send(&hdev->hw, &desc, 1);
        if (ret)
index 07987fb..d811eee 100644 (file)
@@ -50,6 +50,8 @@
 #define HCLGE_PPP_MPF_ECC_ERR_INT3_EN  0x003F
 #define HCLGE_PPP_MPF_ECC_ERR_INT3_EN_MASK     0x003F
 #define HCLGE_TM_SCH_ECC_ERR_INT_EN    0x3
+#define HCLGE_TM_QCN_ERR_INT_TYPE      0x29
+#define HCLGE_TM_QCN_FIFO_INT_EN       0xFFFF00
 #define HCLGE_TM_QCN_MEM_ERR_INT_EN    0xFFFFFF
 #define HCLGE_NCSI_ERR_INT_EN  0x3
 #define HCLGE_NCSI_ERR_INT_TYPE        0x9
index f5b8d1f..dcd40cc 100644 (file)
@@ -13065,6 +13065,7 @@ static int hclge_init(void)
 
 static void hclge_exit(void)
 {
+       hnae3_unregister_ae_algo_prepare(&ae_algo);
        hnae3_unregister_ae_algo(&ae_algo);
        destroy_workqueue(hclge_wq);
 }
index f314dbd..95074e9 100644 (file)
@@ -752,6 +752,8 @@ static void hclge_tm_pg_info_init(struct hclge_dev *hdev)
                hdev->tm_info.pg_info[i].tc_bit_map = hdev->hw_tc_map;
                for (k = 0; k < hdev->tm_info.num_tc; k++)
                        hdev->tm_info.pg_info[i].tc_dwrr[k] = BW_PERCENT;
+               for (; k < HNAE3_MAX_TC; k++)
+                       hdev->tm_info.pg_info[i].tc_dwrr[k] = 0;
        }
 }
 
index 5fdac86..bef6b98 100644 (file)
@@ -2273,9 +2273,9 @@ static void hclgevf_reset_service_task(struct hclgevf_dev *hdev)
                hdev->reset_attempts = 0;
 
                hdev->last_reset_time = jiffies;
-               while ((hdev->reset_type =
-                       hclgevf_get_reset_level(hdev, &hdev->reset_pending))
-                      != HNAE3_NONE_RESET)
+               hdev->reset_type =
+                       hclgevf_get_reset_level(hdev, &hdev->reset_pending);
+               if (hdev->reset_type != HNAE3_NONE_RESET)
                        hclgevf_reset(hdev);
        } else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED,
                                      &hdev->reset_state)) {
index 5b2143f..3178efd 100644 (file)
@@ -113,7 +113,8 @@ enum e1000_boards {
        board_pch2lan,
        board_pch_lpt,
        board_pch_spt,
-       board_pch_cnp
+       board_pch_cnp,
+       board_pch_tgp
 };
 
 struct e1000_ps_page {
@@ -499,6 +500,7 @@ extern const struct e1000_info e1000_pch2_info;
 extern const struct e1000_info e1000_pch_lpt_info;
 extern const struct e1000_info e1000_pch_spt_info;
 extern const struct e1000_info e1000_pch_cnp_info;
+extern const struct e1000_info e1000_pch_tgp_info;
 extern const struct e1000_info e1000_es2_info;
 
 void e1000e_ptp_init(struct e1000_adapter *adapter);
index 60c582a..5e4fc9b 100644 (file)
@@ -4813,7 +4813,7 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
 static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
 {
        struct e1000_mac_info *mac = &hw->mac;
-       u32 ctrl_ext, txdctl, snoop;
+       u32 ctrl_ext, txdctl, snoop, fflt_dbg;
        s32 ret_val;
        u16 i;
 
@@ -4872,6 +4872,15 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
                snoop = (u32)~(PCIE_NO_SNOOP_ALL);
        e1000e_set_pcie_no_snoop(hw, snoop);
 
+       /* Enable workaround for packet loss issue on TGP PCH
+        * Do not gate DMA clock from the modPHY block
+        */
+       if (mac->type >= e1000_pch_tgp) {
+               fflt_dbg = er32(FFLT_DBG);
+               fflt_dbg |= E1000_FFLT_DBG_DONT_GATE_WAKE_DMA_CLK;
+               ew32(FFLT_DBG, fflt_dbg);
+       }
+
        ctrl_ext = er32(CTRL_EXT);
        ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
        ew32(CTRL_EXT, ctrl_ext);
@@ -5992,3 +6001,23 @@ const struct e1000_info e1000_pch_cnp_info = {
        .phy_ops                = &ich8_phy_ops,
        .nvm_ops                = &spt_nvm_ops,
 };
+
+const struct e1000_info e1000_pch_tgp_info = {
+       .mac                    = e1000_pch_tgp,
+       .flags                  = FLAG_IS_ICH
+                                 | FLAG_HAS_WOL
+                                 | FLAG_HAS_HW_TIMESTAMP
+                                 | FLAG_HAS_CTRLEXT_ON_LOAD
+                                 | FLAG_HAS_AMT
+                                 | FLAG_HAS_FLASH
+                                 | FLAG_HAS_JUMBO_FRAMES
+                                 | FLAG_APME_IN_WUC,
+       .flags2                 = FLAG2_HAS_PHY_STATS
+                                 | FLAG2_HAS_EEE,
+       .pba                    = 26,
+       .max_hw_frame_size      = 9022,
+       .get_variants           = e1000_get_variants_ich8lan,
+       .mac_ops                = &ich8_mac_ops,
+       .phy_ops                = &ich8_phy_ops,
+       .nvm_ops                = &spt_nvm_ops,
+};
index d6a092e..2504b11 100644 (file)
 /* Proprietary Latency Tolerance Reporting PCI Capability */
 #define E1000_PCI_LTR_CAP_LPT          0xA8
 
+/* Don't gate wake DMA clock */
+#define E1000_FFLT_DBG_DONT_GATE_WAKE_DMA_CLK  0x1000
+
 void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw);
 void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
                                                  bool state);
index 900b3ab..ebcb2a3 100644 (file)
@@ -51,6 +51,7 @@ static const struct e1000_info *e1000_info_tbl[] = {
        [board_pch_lpt]         = &e1000_pch_lpt_info,
        [board_pch_spt]         = &e1000_pch_spt_info,
        [board_pch_cnp]         = &e1000_pch_cnp_info,
+       [board_pch_tgp]         = &e1000_pch_tgp_info,
 };
 
 struct e1000_reg_info {
@@ -7896,28 +7897,28 @@ static const struct pci_device_id e1000_pci_tbl[] = {
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_V11), board_pch_cnp },
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_LM12), board_pch_spt },
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_V12), board_pch_spt },
-       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM13), board_pch_cnp },
-       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V13), board_pch_cnp },
-       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM14), board_pch_cnp },
-       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V14), board_pch_cnp },
-       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM15), board_pch_cnp },
-       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V15), board_pch_cnp },
-       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_LM23), board_pch_cnp },
-       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_V23), board_pch_cnp },
-       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM16), board_pch_cnp },
-       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V16), board_pch_cnp },
-       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM17), board_pch_cnp },
-       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V17), board_pch_cnp },
-       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_LM22), board_pch_cnp },
-       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_V22), board_pch_cnp },
-       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM18), board_pch_cnp },
-       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V18), board_pch_cnp },
-       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM19), board_pch_cnp },
-       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V19), board_pch_cnp },
-       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM20), board_pch_cnp },
-       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_V20), board_pch_cnp },
-       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM21), board_pch_cnp },
-       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_V21), board_pch_cnp },
+       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM13), board_pch_tgp },
+       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V13), board_pch_tgp },
+       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM14), board_pch_tgp },
+       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V14), board_pch_tgp },
+       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM15), board_pch_tgp },
+       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V15), board_pch_tgp },
+       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_LM23), board_pch_tgp },
+       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_V23), board_pch_tgp },
+       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM16), board_pch_tgp },
+       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V16), board_pch_tgp },
+       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM17), board_pch_tgp },
+       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V17), board_pch_tgp },
+       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_LM22), board_pch_tgp },
+       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_V22), board_pch_tgp },
+       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM18), board_pch_tgp },
+       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V18), board_pch_tgp },
+       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM19), board_pch_tgp },
+       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V19), board_pch_tgp },
+       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM20), board_pch_tgp },
+       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_V20), board_pch_tgp },
+       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM21), board_pch_tgp },
+       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_V21), board_pch_tgp },
 
        { 0, 0, 0, 0, 0, 0, 0 } /* terminate list */
 };
index 2fb81e3..df5ad4d 100644 (file)
@@ -25,6 +25,8 @@ static enum ice_status ice_set_mac_type(struct ice_hw *hw)
        case ICE_DEV_ID_E810C_BACKPLANE:
        case ICE_DEV_ID_E810C_QSFP:
        case ICE_DEV_ID_E810C_SFP:
+       case ICE_DEV_ID_E810_XXV_BACKPLANE:
+       case ICE_DEV_ID_E810_XXV_QSFP:
        case ICE_DEV_ID_E810_XXV_SFP:
                hw->mac_type = ICE_MAC_E810;
                break;
index 9d81946..ef4392e 100644 (file)
 #define ICE_DEV_ID_E810C_QSFP          0x1592
 /* Intel(R) Ethernet Controller E810-C for SFP */
 #define ICE_DEV_ID_E810C_SFP           0x1593
+/* Intel(R) Ethernet Controller E810-XXV for backplane */
+#define ICE_DEV_ID_E810_XXV_BACKPLANE  0x1599
+/* Intel(R) Ethernet Controller E810-XXV for QSFP */
+#define ICE_DEV_ID_E810_XXV_QSFP       0x159A
 /* Intel(R) Ethernet Controller E810-XXV for SFP */
 #define ICE_DEV_ID_E810_XXV_SFP                0x159B
 /* Intel(R) Ethernet Connection E823-C for backplane */
index 14afce8..da7288b 100644 (file)
@@ -63,7 +63,8 @@ static int ice_info_fw_api(struct ice_pf *pf, struct ice_info_ctx *ctx)
 {
        struct ice_hw *hw = &pf->hw;
 
-       snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u", hw->api_maj_ver, hw->api_min_ver);
+       snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u", hw->api_maj_ver,
+                hw->api_min_ver, hw->api_patch);
 
        return 0;
 }
index 06ac9ba..1ac96dc 100644 (file)
@@ -1668,7 +1668,7 @@ static u16 ice_tunnel_idx_to_entry(struct ice_hw *hw, enum ice_tunnel_type type,
        for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
                if (hw->tnl.tbl[i].valid &&
                    hw->tnl.tbl[i].type == type &&
-                   idx--)
+                   idx-- == 0)
                        return i;
 
        WARN_ON_ONCE(1);
@@ -1828,7 +1828,7 @@ int ice_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
        u16 index;
 
        tnl_type = ti->type == UDP_TUNNEL_TYPE_VXLAN ? TNL_VXLAN : TNL_GENEVE;
-       index = ice_tunnel_idx_to_entry(&pf->hw, idx, tnl_type);
+       index = ice_tunnel_idx_to_entry(&pf->hw, tnl_type, idx);
 
        status = ice_create_tunnel(&pf->hw, index, tnl_type, ntohs(ti->port));
        if (status) {
index dde9802..b718e19 100644 (file)
@@ -2841,6 +2841,7 @@ void ice_napi_del(struct ice_vsi *vsi)
  */
 int ice_vsi_release(struct ice_vsi *vsi)
 {
+       enum ice_status err;
        struct ice_pf *pf;
 
        if (!vsi->back)
@@ -2912,6 +2913,10 @@ int ice_vsi_release(struct ice_vsi *vsi)
 
        ice_fltr_remove_all(vsi);
        ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
+       err = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx);
+       if (err)
+               dev_err(ice_pf_to_dev(vsi->back), "Failed to remove RDMA scheduler config for VSI %u, err %d\n",
+                       vsi->vsi_num, err);
        ice_vsi_delete(vsi);
        ice_vsi_free_q_vectors(vsi);
 
@@ -3092,6 +3097,10 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
        prev_num_q_vectors = ice_vsi_rebuild_get_coalesce(vsi, coalesce);
 
        ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
+       ret = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx);
+       if (ret)
+               dev_err(ice_pf_to_dev(vsi->back), "Failed to remove RDMA scheduler config for VSI %u, err %d\n",
+                       vsi->vsi_num, ret);
        ice_vsi_free_q_vectors(vsi);
 
        /* SR-IOV determines needed MSIX resources all at once instead of per
index 0d6c143..06fa93e 100644 (file)
@@ -4224,6 +4224,9 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
        if (!pf)
                return -ENOMEM;
 
+       /* initialize Auxiliary index to invalid value */
+       pf->aux_idx = -1;
+
        /* set up for high or low DMA */
        err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
        if (err)
@@ -4615,7 +4618,8 @@ static void ice_remove(struct pci_dev *pdev)
 
        ice_aq_cancel_waiting_tasks(pf);
        ice_unplug_aux_dev(pf);
-       ida_free(&ice_aux_ida, pf->aux_idx);
+       if (pf->aux_idx >= 0)
+               ida_free(&ice_aux_ida, pf->aux_idx);
        set_bit(ICE_DOWN, pf->state);
 
        mutex_destroy(&(&pf->hw)->fdir_fltr_lock);
@@ -5016,6 +5020,8 @@ static const struct pci_device_id ice_pci_tbl[] = {
        { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE), 0 },
        { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP), 0 },
        { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP), 0 },
+       { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_BACKPLANE), 0 },
+       { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_QSFP), 0 },
        { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_SFP), 0 },
        { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_BACKPLANE), 0 },
        { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_QSFP), 0 },
index 9f07b66..2d9b102 100644 (file)
@@ -2071,6 +2071,19 @@ enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle)
 }
 
 /**
+ * ice_rm_vsi_rdma_cfg - remove VSI and its RDMA children nodes
+ * @pi: port information structure
+ * @vsi_handle: software VSI handle
+ *
+ * This function clears the VSI and its RDMA children nodes from scheduler tree
+ * for all TCs.
+ */
+enum ice_status ice_rm_vsi_rdma_cfg(struct ice_port_info *pi, u16 vsi_handle)
+{
+       return ice_sched_rm_vsi_cfg(pi, vsi_handle, ICE_SCHED_NODE_OWNER_RDMA);
+}
+
+/**
  * ice_get_agg_info - get the aggregator ID
  * @hw: pointer to the hardware structure
  * @agg_id: aggregator ID
index 9beef8f..fdf7a58 100644 (file)
@@ -89,6 +89,7 @@ enum ice_status
 ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
                  u8 owner, bool enable);
 enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle);
+enum ice_status ice_rm_vsi_rdma_cfg(struct ice_port_info *pi, u16 vsi_handle);
 
 /* Tx scheduler rate limiter functions */
 enum ice_status
index 4461f8b..4e02033 100644 (file)
@@ -22,8 +22,8 @@
 #define IGC_DEV_ID_I220_V                      0x15F7
 #define IGC_DEV_ID_I225_K                      0x3100
 #define IGC_DEV_ID_I225_K2                     0x3101
+#define IGC_DEV_ID_I226_K                      0x3102
 #define IGC_DEV_ID_I225_LMVP                   0x5502
-#define IGC_DEV_ID_I226_K                      0x5504
 #define IGC_DEV_ID_I225_IT                     0x0D9F
 #define IGC_DEV_ID_I226_LM                     0x125B
 #define IGC_DEV_ID_I226_V                      0x125C
index 41684a6..a88a1a4 100644 (file)
@@ -199,6 +199,9 @@ void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv);
 int mlx5e_create_flow_steering(struct mlx5e_priv *priv);
 void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv);
 
+int mlx5e_fs_init(struct mlx5e_priv *priv);
+void mlx5e_fs_cleanup(struct mlx5e_priv *priv);
+
 int mlx5e_add_vlan_trap(struct mlx5e_priv *priv, int  trap_id, int tir_num);
 void mlx5e_remove_vlan_trap(struct mlx5e_priv *priv);
 int mlx5e_add_mac_trap(struct mlx5e_priv *priv, int  trap_id, int tir_num);
index b4e9868..4a13ef5 100644 (file)
@@ -10,6 +10,8 @@
 #include "en_tc.h"
 #include "rep/tc.h"
 #include "rep/neigh.h"
+#include "lag.h"
+#include "lag_mp.h"
 
 struct mlx5e_tc_tun_route_attr {
        struct net_device *out_dev;
index 33de8f0..fb53973 100644 (file)
@@ -141,8 +141,7 @@ static void mlx5e_ipsec_set_swp(struct sk_buff *skb,
         * Pkt: MAC  IP     ESP  IP    L4
         *
         * Transport Mode:
-        * SWP:      OutL3       InL4
-        *           InL3
+        * SWP:      OutL3       OutL4
         * Pkt: MAC  IP     ESP  L4
         *
         * Tunnel(VXLAN TCP/UDP) over Transport Mode
@@ -171,31 +170,35 @@ static void mlx5e_ipsec_set_swp(struct sk_buff *skb,
                return;
 
        if (!xo->inner_ipproto) {
-               eseg->swp_inner_l3_offset = skb_network_offset(skb) / 2;
-               eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;
-               if (skb->protocol == htons(ETH_P_IPV6))
-                       eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
-               if (xo->proto == IPPROTO_UDP)
+               switch (xo->proto) {
+               case IPPROTO_UDP:
+                       eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L4_UDP;
+                       fallthrough;
+               case IPPROTO_TCP:
+                       /* IP | ESP | TCP */
+                       eseg->swp_outer_l4_offset = skb_inner_transport_offset(skb) / 2;
+                       break;
+               default:
+                       break;
+               }
+       } else {
+               /* Tunnel(VXLAN TCP/UDP) over Transport Mode */
+               switch (xo->inner_ipproto) {
+               case IPPROTO_UDP:
                        eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
-               return;
-       }
-
-       /* Tunnel(VXLAN TCP/UDP) over Transport Mode */
-       switch (xo->inner_ipproto) {
-       case IPPROTO_UDP:
-               eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
-               fallthrough;
-       case IPPROTO_TCP:
-               eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
-               eseg->swp_inner_l4_offset = (skb->csum_start + skb->head - skb->data) / 2;
-               if (skb->protocol == htons(ETH_P_IPV6))
-                       eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
-               break;
-       default:
-               break;
+                       fallthrough;
+               case IPPROTO_TCP:
+                       eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
+                       eseg->swp_inner_l4_offset =
+                               (skb->csum_start + skb->head - skb->data) / 2;
+                       if (skb->protocol == htons(ETH_P_IPV6))
+                               eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
+                       break;
+               default:
+                       break;
+               }
        }
 
-       return;
 }
 
 void mlx5e_ipsec_set_iv_esn(struct sk_buff *skb, struct xfrm_state *x,
index c06b4b9..d226cc5 100644 (file)
@@ -1186,10 +1186,6 @@ static int mlx5e_create_vlan_table(struct mlx5e_priv *priv)
        struct mlx5e_flow_table *ft;
        int err;
 
-       priv->fs.vlan = kvzalloc(sizeof(*priv->fs.vlan), GFP_KERNEL);
-       if (!priv->fs.vlan)
-               return -ENOMEM;
-
        ft = &priv->fs.vlan->ft;
        ft->num_groups = 0;
 
@@ -1198,10 +1194,8 @@ static int mlx5e_create_vlan_table(struct mlx5e_priv *priv)
        ft_attr.prio = MLX5E_NIC_PRIO;
 
        ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
-       if (IS_ERR(ft->t)) {
-               err = PTR_ERR(ft->t);
-               goto err_free_t;
-       }
+       if (IS_ERR(ft->t))
+               return PTR_ERR(ft->t);
 
        ft->g = kcalloc(MLX5E_NUM_VLAN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
        if (!ft->g) {
@@ -1221,9 +1215,6 @@ err_free_g:
        kfree(ft->g);
 err_destroy_vlan_table:
        mlx5_destroy_flow_table(ft->t);
-err_free_t:
-       kvfree(priv->fs.vlan);
-       priv->fs.vlan = NULL;
 
        return err;
 }
@@ -1232,7 +1223,6 @@ static void mlx5e_destroy_vlan_table(struct mlx5e_priv *priv)
 {
        mlx5e_del_vlan_rules(priv);
        mlx5e_destroy_flow_table(&priv->fs.vlan->ft);
-       kvfree(priv->fs.vlan);
 }
 
 static void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv)
@@ -1351,3 +1341,17 @@ void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv)
        mlx5e_arfs_destroy_tables(priv);
        mlx5e_ethtool_cleanup_steering(priv);
 }
+
+int mlx5e_fs_init(struct mlx5e_priv *priv)
+{
+       priv->fs.vlan = kvzalloc(sizeof(*priv->fs.vlan), GFP_KERNEL);
+       if (!priv->fs.vlan)
+               return -ENOMEM;
+       return 0;
+}
+
+void mlx5e_fs_cleanup(struct mlx5e_priv *priv)
+{
+       kvfree(priv->fs.vlan);
+       priv->fs.vlan = NULL;
+}
index 09c8b71..41ef6eb 100644 (file)
@@ -4578,6 +4578,12 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
 
        mlx5e_timestamp_init(priv);
 
+       err = mlx5e_fs_init(priv);
+       if (err) {
+               mlx5_core_err(mdev, "FS initialization failed, %d\n", err);
+               return err;
+       }
+
        err = mlx5e_ipsec_init(priv);
        if (err)
                mlx5_core_err(mdev, "IPSec initialization failed, %d\n", err);
@@ -4595,6 +4601,7 @@ static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
        mlx5e_health_destroy_reporters(priv);
        mlx5e_tls_cleanup(priv);
        mlx5e_ipsec_cleanup(priv);
+       mlx5e_fs_cleanup(priv);
 }
 
 static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
index ba81647..129ff7e 100644 (file)
@@ -67,6 +67,8 @@
 #include "lib/fs_chains.h"
 #include "diag/en_tc_tracepoint.h"
 #include <asm/div64.h>
+#include "lag.h"
+#include "lag_mp.h"
 
 #define nic_chains(priv) ((priv)->fs.tc.chains)
 #define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)
index c63d78e..188994d 100644 (file)
@@ -213,19 +213,18 @@ static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs)
        memcpy(&vhdr->h_vlan_encapsulated_proto, skb->data + cpy1_sz, cpy2_sz);
 }
 
-/* If packet is not IP's CHECKSUM_PARTIAL (e.g. icmd packet),
- * need to set L3 checksum flag for IPsec
- */
 static void
 ipsec_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
                            struct mlx5_wqe_eth_seg *eseg)
 {
+       struct xfrm_offload *xo = xfrm_offload(skb);
+
        eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM;
-       if (skb->encapsulation) {
-               eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM;
+       if (xo->inner_ipproto) {
+               eseg->cs_flags |= MLX5_ETH_WQE_L4_INNER_CSUM | MLX5_ETH_WQE_L3_INNER_CSUM;
+       } else if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
+               eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM;
                sq->stats->csum_partial_inner++;
-       } else {
-               sq->stats->csum_partial++;
        }
 }
 
@@ -234,6 +233,11 @@ mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
                            struct mlx5e_accel_tx_state *accel,
                            struct mlx5_wqe_eth_seg *eseg)
 {
+       if (unlikely(mlx5e_ipsec_eseg_meta(eseg))) {
+               ipsec_txwqe_build_eseg_csum(sq, skb, eseg);
+               return;
+       }
+
        if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
                eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM;
                if (skb->encapsulation) {
@@ -249,8 +253,6 @@ mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
                eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM;
                sq->stats->csum_partial++;
 #endif
-       } else if (unlikely(mlx5e_ipsec_eseg_meta(eseg))) {
-               ipsec_txwqe_build_eseg_csum(sq, skb, eseg);
        } else
                sq->stats->csum_none++;
 }
index 985e305..c6cc67c 100644 (file)
@@ -473,10 +473,9 @@ esw_qos_create_rate_group(struct mlx5_eswitch *esw, struct netlink_ext_ack *exta
 
 err_min_rate:
        list_del(&group->list);
-       err = mlx5_destroy_scheduling_element_cmd(esw->dev,
-                                                 SCHEDULING_HIERARCHY_E_SWITCH,
-                                                 group->tsar_ix);
-       if (err)
+       if (mlx5_destroy_scheduling_element_cmd(esw->dev,
+                                               SCHEDULING_HIERARCHY_E_SWITCH,
+                                               group->tsar_ix))
                NL_SET_ERR_MSG_MOD(extack, "E-Switch destroy TSAR for group failed");
 err_sched_elem:
        kfree(group);
index ca5690b..d2105c1 100644 (file)
@@ -442,6 +442,10 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
        if (!mlx5_lag_is_ready(ldev)) {
                do_bond = false;
        } else {
+               /* VF LAG is in multipath mode, ignore bond change requests */
+               if (mlx5_lag_is_multipath(dev0))
+                       return;
+
                tracker = ldev->tracker;
 
                do_bond = tracker.is_bonded && mlx5_lag_check_prereq(ldev);
index f239b35..21fdaf7 100644 (file)
@@ -9,20 +9,23 @@
 #include "eswitch.h"
 #include "lib/mlx5.h"
 
+static bool __mlx5_lag_is_multipath(struct mlx5_lag *ldev)
+{
+       return !!(ldev->flags & MLX5_LAG_FLAG_MULTIPATH);
+}
+
 static bool mlx5_lag_multipath_check_prereq(struct mlx5_lag *ldev)
 {
        if (!mlx5_lag_is_ready(ldev))
                return false;
 
+       if (__mlx5_lag_is_active(ldev) && !__mlx5_lag_is_multipath(ldev))
+               return false;
+
        return mlx5_esw_multipath_prereq(ldev->pf[MLX5_LAG_P1].dev,
                                         ldev->pf[MLX5_LAG_P2].dev);
 }
 
-static bool __mlx5_lag_is_multipath(struct mlx5_lag *ldev)
-{
-       return !!(ldev->flags & MLX5_LAG_FLAG_MULTIPATH);
-}
-
 bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev)
 {
        struct mlx5_lag *ldev;
index 729c839..dea199e 100644 (file)
@@ -24,12 +24,14 @@ struct lag_mp {
 void mlx5_lag_mp_reset(struct mlx5_lag *ldev);
 int mlx5_lag_mp_init(struct mlx5_lag *ldev);
 void mlx5_lag_mp_cleanup(struct mlx5_lag *ldev);
+bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev);
 
 #else /* CONFIG_MLX5_ESWITCH */
 
 static inline void mlx5_lag_mp_reset(struct mlx5_lag *ldev) {};
 static inline int mlx5_lag_mp_init(struct mlx5_lag *ldev) { return 0; }
 static inline void mlx5_lag_mp_cleanup(struct mlx5_lag *ldev) {}
+bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev) { return false; }
 
 #endif /* CONFIG_MLX5_ESWITCH */
 #endif /* __MLX5_LAG_MP_H__ */
index cbece6e..5030dfc 100644 (file)
@@ -758,6 +758,7 @@ static int mchp_sparx5_probe(struct platform_device *pdev)
                        err = dev_err_probe(sparx5->dev, PTR_ERR(serdes),
                                            "port %u: missing serdes\n",
                                            portno);
+                       of_node_put(portnp);
                        goto cleanup_config;
                }
                config->portno = portno;
index 291ae68..d51f799 100644 (file)
@@ -969,6 +969,7 @@ static int mscc_ocelot_init_ports(struct platform_device *pdev,
                target = ocelot_regmap_init(ocelot, res);
                if (IS_ERR(target)) {
                        err = PTR_ERR(target);
+                       of_node_put(portnp);
                        goto out_teardown;
                }
 
index 2643ea5..154399c 100644 (file)
@@ -196,7 +196,7 @@ int swreg_to_unrestricted(swreg dst, swreg lreg, swreg rreg,
        }
 
        reg->dst_lmextn = swreg_lmextn(dst);
-       reg->src_lmextn = swreg_lmextn(lreg) | swreg_lmextn(rreg);
+       reg->src_lmextn = swreg_lmextn(lreg) || swreg_lmextn(rreg);
 
        return 0;
 }
@@ -277,7 +277,7 @@ int swreg_to_restricted(swreg dst, swreg lreg, swreg rreg,
        }
 
        reg->dst_lmextn = swreg_lmextn(dst);
-       reg->src_lmextn = swreg_lmextn(lreg) | swreg_lmextn(rreg);
+       reg->src_lmextn = swreg_lmextn(lreg) || swreg_lmextn(rreg);
 
        return 0;
 }
index 4bd3ef8..c4fe3c4 100644 (file)
@@ -132,16 +132,27 @@ void mcdi_to_ethtool_linkset(u32 media, u32 cap, unsigned long *linkset)
        case MC_CMD_MEDIA_SFP_PLUS:
        case MC_CMD_MEDIA_QSFP_PLUS:
                SET_BIT(FIBRE);
-               if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
+               if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN)) {
                        SET_BIT(1000baseT_Full);
-               if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
-                       SET_BIT(10000baseT_Full);
-               if (cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
+                       SET_BIT(1000baseX_Full);
+               }
+               if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN)) {
+                       SET_BIT(10000baseCR_Full);
+                       SET_BIT(10000baseLR_Full);
+                       SET_BIT(10000baseSR_Full);
+               }
+               if (cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN)) {
                        SET_BIT(40000baseCR4_Full);
-               if (cap & (1 << MC_CMD_PHY_CAP_100000FDX_LBN))
+                       SET_BIT(40000baseSR4_Full);
+               }
+               if (cap & (1 << MC_CMD_PHY_CAP_100000FDX_LBN)) {
                        SET_BIT(100000baseCR4_Full);
-               if (cap & (1 << MC_CMD_PHY_CAP_25000FDX_LBN))
+                       SET_BIT(100000baseSR4_Full);
+               }
+               if (cap & (1 << MC_CMD_PHY_CAP_25000FDX_LBN)) {
                        SET_BIT(25000baseCR_Full);
+                       SET_BIT(25000baseSR_Full);
+               }
                if (cap & (1 << MC_CMD_PHY_CAP_50000FDX_LBN))
                        SET_BIT(50000baseCR2_Full);
                break;
@@ -192,15 +203,19 @@ u32 ethtool_linkset_to_mcdi_cap(const unsigned long *linkset)
                result |= (1 << MC_CMD_PHY_CAP_100FDX_LBN);
        if (TEST_BIT(1000baseT_Half))
                result |= (1 << MC_CMD_PHY_CAP_1000HDX_LBN);
-       if (TEST_BIT(1000baseT_Full) || TEST_BIT(1000baseKX_Full))
+       if (TEST_BIT(1000baseT_Full) || TEST_BIT(1000baseKX_Full) ||
+                       TEST_BIT(1000baseX_Full))
                result |= (1 << MC_CMD_PHY_CAP_1000FDX_LBN);
-       if (TEST_BIT(10000baseT_Full) || TEST_BIT(10000baseKX4_Full))
+       if (TEST_BIT(10000baseT_Full) || TEST_BIT(10000baseKX4_Full) ||
+                       TEST_BIT(10000baseCR_Full) || TEST_BIT(10000baseLR_Full) ||
+                       TEST_BIT(10000baseSR_Full))
                result |= (1 << MC_CMD_PHY_CAP_10000FDX_LBN);
-       if (TEST_BIT(40000baseCR4_Full) || TEST_BIT(40000baseKR4_Full))
+       if (TEST_BIT(40000baseCR4_Full) || TEST_BIT(40000baseKR4_Full) ||
+                       TEST_BIT(40000baseSR4_Full))
                result |= (1 << MC_CMD_PHY_CAP_40000FDX_LBN);
-       if (TEST_BIT(100000baseCR4_Full))
+       if (TEST_BIT(100000baseCR4_Full) || TEST_BIT(100000baseSR4_Full))
                result |= (1 << MC_CMD_PHY_CAP_100000FDX_LBN);
-       if (TEST_BIT(25000baseCR_Full))
+       if (TEST_BIT(25000baseCR_Full) || TEST_BIT(25000baseSR_Full))
                result |= (1 << MC_CMD_PHY_CAP_25000FDX_LBN);
        if (TEST_BIT(50000baseCR2_Full))
                result |= (1 << MC_CMD_PHY_CAP_50000FDX_LBN);
index a39c514..797e518 100644 (file)
@@ -648,7 +648,7 @@ static int efx_ptp_get_attributes(struct efx_nic *efx)
        } else if (rc == -EINVAL) {
                fmt = MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_NANOSECONDS;
        } else if (rc == -EPERM) {
-               netif_info(efx, probe, efx->net_dev, "no PTP support\n");
+               pci_info(efx->pci_dev, "no PTP support\n");
                return rc;
        } else {
                efx_mcdi_display_error(efx, MC_CMD_PTP, sizeof(inbuf),
@@ -824,7 +824,7 @@ static int efx_ptp_disable(struct efx_nic *efx)
         * should only have been called during probe.
         */
        if (rc == -ENOSYS || rc == -EPERM)
-               netif_info(efx, probe, efx->net_dev, "no PTP support\n");
+               pci_info(efx->pci_dev, "no PTP support\n");
        else if (rc)
                efx_mcdi_display_error(efx, MC_CMD_PTP,
                                       MC_CMD_PTP_IN_DISABLE_LEN,
index 83dcfca..441e7f3 100644 (file)
@@ -1057,7 +1057,7 @@ void efx_siena_sriov_probe(struct efx_nic *efx)
                return;
 
        if (efx_siena_sriov_cmd(efx, false, &efx->vi_scale, &count)) {
-               netif_info(efx, probe, efx->net_dev, "no SR-IOV VFs probed\n");
+               pci_info(efx->pci_dev, "no SR-IOV VFs probed\n");
                return;
        }
        if (count > 0 && count > max_vfs)
index eb3b7bf..3d67d1f 100644 (file)
@@ -736,7 +736,7 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
                        config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
                        ptp_v2 = PTP_TCR_TSVER2ENA;
                        snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
-                       if (priv->synopsys_id != DWMAC_CORE_5_10)
+                       if (priv->synopsys_id < DWMAC_CORE_4_10)
                                ts_event_en = PTP_TCR_TSEVNTENA;
                        ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
                        ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
index 775dcf4..6b6f28d 100644 (file)
@@ -623,16 +623,16 @@ static int receive(struct net_device *dev, int cnt)
 
 /* --------------------------------------------------------------------- */
 
-#ifdef __i386__
+#if defined(__i386__) && !defined(CONFIG_UML)
 #include <asm/msr.h>
 #define GETTICK(x)                                             \
 ({                                                             \
        if (boot_cpu_has(X86_FEATURE_TSC))                      \
                x = (unsigned int)rdtsc();                      \
 })
-#else /* __i386__ */
+#else /* __i386__  && !CONFIG_UML */
 #define GETTICK(x)
-#endif /* __i386__ */
+#endif /* __i386__  && !CONFIG_UML */
 
 static void epp_bh(struct work_struct *work)
 {
index f87f175..b554054 100644 (file)
@@ -117,6 +117,7 @@ config USB_LAN78XX
        select PHYLIB
        select MICROCHIP_PHY
        select FIXED_PHY
+       select CRC32
        help
          This option adds support for Microchip LAN78XX based USB 2
          & USB 3 10/100/1000 Ethernet adapters.
index 840c1c2..80432ee 100644 (file)
@@ -1788,6 +1788,10 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
        if (!dev->rx_urb_size)
                dev->rx_urb_size = dev->hard_mtu;
        dev->maxpacket = usb_maxpacket (dev->udev, dev->out, 1);
+       if (dev->maxpacket == 0) {
+               /* that is a broken device */
+               goto out4;
+       }
 
        /* let userspace know we have a random address */
        if (ether_addr_equal(net->dev_addr, node_id))
index bf2fac9..662e261 100644 (file)
@@ -1360,8 +1360,6 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
        bool need_strict = rt6_need_strict(&ipv6_hdr(skb)->daddr);
        bool is_ndisc = ipv6_ndisc_frame(skb);
 
-       nf_reset_ct(skb);
-
        /* loopback, multicast & non-ND link-local traffic; do not push through
         * packet taps again. Reset pkt_type for upper layers to process skb.
         * For strict packets with a source LLA, determine the dst using the
@@ -1424,8 +1422,6 @@ static struct sk_buff *vrf_ip_rcv(struct net_device *vrf_dev,
        skb->skb_iif = vrf_dev->ifindex;
        IPCB(skb)->flags |= IPSKB_L3SLAVE;
 
-       nf_reset_ct(skb);
-
        if (ipv4_is_multicast(ip_hdr(skb)->daddr))
                goto out;
 
index d16cf3f..b23f479 100644 (file)
@@ -1226,11 +1226,9 @@ static int st95hf_remove(struct spi_device *nfc_spi_dev)
                                 &reset_cmd,
                                 ST95HF_RESET_CMD_LEN,
                                 ASYNC);
-       if (result) {
+       if (result)
                dev_err(&spictx->spidev->dev,
                        "ST95HF reset failed in remove() err = %d\n", result);
-               return result;
-       }
 
        /* wait for 3 ms to complete the controller reset process */
        usleep_range(3000, 4000);
@@ -1239,7 +1237,7 @@ static int st95hf_remove(struct spi_device *nfc_spi_dev)
        if (stcontext->st95hf_supply)
                regulator_disable(stcontext->st95hf_supply);
 
-       return result;
+       return 0;
 }
 
 /* Register as SPI protocol driver */
index 59c1390..9da8835 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/sort.h>
 #include <linux/slab.h>
 #include <linux/memblock.h>
+#include <linux/kmemleak.h>
 
 #include "of_private.h"
 
@@ -46,6 +47,7 @@ static int __init early_init_dt_alloc_reserved_memory_arch(phys_addr_t size,
                err = memblock_mark_nomap(base, size);
                if (err)
                        memblock_free(base, size);
+               kmemleak_ignore_phys(base);
        }
 
        return err;
index 4dfc52e..f9b2d66 100644 (file)
@@ -170,6 +170,7 @@ static void ptp_clock_release(struct device *dev)
        struct ptp_clock *ptp = container_of(dev, struct ptp_clock, dev);
 
        ptp_cleanup_pin_groups(ptp);
+       kfree(ptp->vclock_index);
        mutex_destroy(&ptp->tsevq_mux);
        mutex_destroy(&ptp->pincfg_mux);
        mutex_destroy(&ptp->n_vclocks_mux);
@@ -283,15 +284,20 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
        /* Create a posix clock and link it to the device. */
        err = posix_clock_register(&ptp->clock, &ptp->dev);
        if (err) {
+               if (ptp->pps_source)
+                       pps_unregister_source(ptp->pps_source);
+
+               if (ptp->kworker)
+                       kthread_destroy_worker(ptp->kworker);
+
+               put_device(&ptp->dev);
+
                pr_err("failed to create posix clock\n");
-               goto no_clock;
+               return ERR_PTR(err);
        }
 
        return ptp;
 
-no_clock:
-       if (ptp->pps_source)
-               pps_unregister_source(ptp->pps_source);
 no_pps:
        ptp_cleanup_pin_groups(ptp);
 no_pin_groups:
@@ -321,8 +327,6 @@ int ptp_clock_unregister(struct ptp_clock *ptp)
        ptp->defunct = 1;
        wake_up_interruptible(&ptp->tsev_wq);
 
-       kfree(ptp->vclock_index);
-
        if (ptp->kworker) {
                kthread_cancel_delayed_work_sync(&ptp->aux_work);
                kthread_destroy_worker(ptp->kworker);
index d0096cd..4991054 100644 (file)
@@ -31,10 +31,10 @@ int kvm_arch_ptp_init(void)
 
        ret = kvm_hypercall2(KVM_HC_CLOCK_PAIRING, clock_pair_gpa,
                             KVM_CLOCK_PAIRING_WALLCLOCK);
-       if (ret == -KVM_ENOSYS || ret == -KVM_EOPNOTSUPP)
+       if (ret == -KVM_ENOSYS)
                return -ENODEV;
 
-       return 0;
+       return ret;
 }
 
 int kvm_arch_ptp_get_clock(struct timespec64 *ts)
index 3f6f14f..24b72ee 100644 (file)
@@ -220,7 +220,8 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
                goto fail;
        }
 
-       shost->cmd_per_lun = min_t(short, shost->cmd_per_lun,
+       /* Use min_t(int, ...) in case shost->can_queue exceeds SHRT_MAX */
+       shost->cmd_per_lun = min_t(int, shost->cmd_per_lun,
                                   shost->can_queue);
 
        error = scsi_init_sense_cache(shost);
index 2197988..3cae880 100644 (file)
@@ -3736,7 +3736,7 @@ mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        shost->max_lun = -1;
        shost->unique_id = mrioc->id;
 
-       shost->max_channel = 1;
+       shost->max_channel = 0;
        shost->max_id = 0xFFFFFFFF;
 
        if (prot_mask >= 0)
index 4b5d28d..655cf5d 100644 (file)
@@ -431,7 +431,7 @@ done_unmap_sg:
        goto done_free_fcport;
 
 done_free_fcport:
-       if (bsg_request->msgcode == FC_BSG_RPT_ELS)
+       if (bsg_request->msgcode != FC_BSG_RPT_ELS)
                qla2x00_free_fcport(fcport);
 done:
        return rval;
index d2e40aa..836fedc 100644 (file)
@@ -4157,7 +4157,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
                                        ql_dbg_pci(ql_dbg_init, ha->pdev,
                                            0xe0ee, "%s: failed alloc dsd\n",
                                            __func__);
-                                       return 1;
+                                       return -ENOMEM;
                                }
                                ha->dif_bundle_kallocs++;
 
index b3478ed..7d8242c 100644 (file)
@@ -3319,8 +3319,7 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
                        "RESET-RSP online/active/old-count/new-count = %d/%d/%d/%d.\n",
                        vha->flags.online, qla2x00_reset_active(vha),
                        cmd->reset_count, qpair->chip_reset);
-               spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
-               return 0;
+               goto out_unmap_unlock;
        }
 
        /* Does F/W have an IOCBs for this request */
@@ -3445,10 +3444,6 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
        prm.sg = NULL;
        prm.req_cnt = 1;
 
-       /* Calculate number of entries and segments required */
-       if (qlt_pci_map_calc_cnt(&prm) != 0)
-               return -EAGAIN;
-
        if (!qpair->fw_started || (cmd->reset_count != qpair->chip_reset) ||
            (cmd->sess && cmd->sess->deleted)) {
                /*
@@ -3466,6 +3461,10 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
                return 0;
        }
 
+       /* Calculate number of entries and segments required */
+       if (qlt_pci_map_calc_cnt(&prm) != 0)
+               return -EAGAIN;
+
        spin_lock_irqsave(qpair->qp_lock_ptr, flags);
        /* Does F/W have an IOCBs for this request */
        res = qlt_check_reserve_free_req(qpair, prm.req_cnt);
@@ -3870,9 +3869,6 @@ void qlt_free_cmd(struct qla_tgt_cmd *cmd)
 
        BUG_ON(cmd->cmd_in_wq);
 
-       if (cmd->sg_mapped)
-               qlt_unmap_sg(cmd->vha, cmd);
-
        if (!cmd->q_full)
                qlt_decr_num_pend_cmds(cmd->vha);
 
index b241f9e..291ecc3 100644 (file)
@@ -553,8 +553,10 @@ EXPORT_SYMBOL(scsi_device_get);
  */
 void scsi_device_put(struct scsi_device *sdev)
 {
-       module_put(sdev->host->hostt->module);
+       struct module *mod = sdev->host->hostt->module;
+
        put_device(&sdev->sdev_gendev);
+       module_put(mod);
 }
 EXPORT_SYMBOL(scsi_device_put);
 
index 8679325..a35841b 100644 (file)
@@ -449,9 +449,12 @@ static void scsi_device_dev_release_usercontext(struct work_struct *work)
        struct scsi_vpd *vpd_pg80 = NULL, *vpd_pg83 = NULL;
        struct scsi_vpd *vpd_pg0 = NULL, *vpd_pg89 = NULL;
        unsigned long flags;
+       struct module *mod;
 
        sdev = container_of(work, struct scsi_device, ew.work);
 
+       mod = sdev->host->hostt->module;
+
        scsi_dh_release_device(sdev);
 
        parent = sdev->sdev_gendev.parent;
@@ -502,11 +505,17 @@ static void scsi_device_dev_release_usercontext(struct work_struct *work)
 
        if (parent)
                put_device(parent);
+       module_put(mod);
 }
 
 static void scsi_device_dev_release(struct device *dev)
 {
        struct scsi_device *sdp = to_scsi_device(dev);
+
+       /* Set module pointer as NULL in case of module unloading */
+       if (!try_module_get(sdp->host->hostt->module))
+               sdp->host->hostt->module = NULL;
+
        execute_in_process_context(scsi_device_dev_release_usercontext,
                                   &sdp->ew);
 }
index 922e4c7..78343d3 100644 (file)
@@ -2930,8 +2930,6 @@ iscsi_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev)
                        session->recovery_tmo = value;
                break;
        default:
-               err = transport->set_param(conn, ev->u.set_param.param,
-                                          data, ev->u.set_param.len);
                if ((conn->state == ISCSI_CONN_BOUND) ||
                        (conn->state == ISCSI_CONN_UP)) {
                        err = transport->set_param(conn, ev->u.set_param.param,
index 523bf2f..fce6333 100644 (file)
@@ -3683,7 +3683,12 @@ static int sd_resume(struct device *dev)
 static int sd_resume_runtime(struct device *dev)
 {
        struct scsi_disk *sdkp = dev_get_drvdata(dev);
-       struct scsi_device *sdp = sdkp->device;
+       struct scsi_device *sdp;
+
+       if (!sdkp)      /* E.g.: runtime resume at the start of sd_probe() */
+               return 0;
+
+       sdp = sdkp->device;
 
        if (sdp->ignore_media_change) {
                /* clear the device's sense data */
index ebbbc12..9eb1b88 100644 (file)
@@ -1285,11 +1285,15 @@ static void storvsc_on_channel_callback(void *context)
        foreach_vmbus_pkt(desc, channel) {
                struct vstor_packet *packet = hv_pkt_data(desc);
                struct storvsc_cmd_request *request = NULL;
+               u32 pktlen = hv_pkt_datalen(desc);
                u64 rqst_id = desc->trans_id;
+               u32 minlen = rqst_id ? sizeof(struct vstor_packet) -
+                       stor_device->vmscsi_size_delta : sizeof(enum vstor_packet_operation);
 
-               if (hv_pkt_datalen(desc) < sizeof(struct vstor_packet) -
-                               stor_device->vmscsi_size_delta) {
-                       dev_err(&device->device, "Invalid packet len\n");
+               if (pktlen < minlen) {
+                       dev_err(&device->device,
+                               "Invalid pkt: id=%llu, len=%u, minlen=%u\n",
+                               rqst_id, pktlen, minlen);
                        continue;
                }
 
@@ -1302,13 +1306,23 @@ static void storvsc_on_channel_callback(void *context)
                        if (rqst_id == 0) {
                                /*
                                 * storvsc_on_receive() looks at the vstor_packet in the message
-                                * from the ring buffer.  If the operation in the vstor_packet is
-                                * COMPLETE_IO, then we call storvsc_on_io_completion(), and
-                                * dereference the guest memory address.  Make sure we don't call
-                                * storvsc_on_io_completion() with a guest memory address that is
-                                * zero if Hyper-V were to construct and send such a bogus packet.
+                                * from the ring buffer.
+                                *
+                                * - If the operation in the vstor_packet is COMPLETE_IO, then
+                                *   we call storvsc_on_io_completion(), and dereference the
+                                *   guest memory address.  Make sure we don't call
+                                *   storvsc_on_io_completion() with a guest memory address
+                                *   that is zero if Hyper-V were to construct and send such
+                                *   a bogus packet.
+                                *
+                                * - If the operation in the vstor_packet is FCHBA_DATA, then
+                                *   we call cache_wwn(), and access the data payload area of
+                                *   the packet (wwn_packet); however, there is no guarantee
+                                *   that the packet is big enough to contain such area.
+                                *   Future-proof the code by rejecting such a bogus packet.
                                 */
-                               if (packet->operation == VSTOR_OPERATION_COMPLETE_IO) {
+                               if (packet->operation == VSTOR_OPERATION_COMPLETE_IO ||
+                                   packet->operation == VSTOR_OPERATION_FCHBA_DATA) {
                                        dev_err(&device->device, "Invalid packet with ID of 0\n");
                                        continue;
                                }
index 149c1aa..5142455 100644 (file)
@@ -370,20 +370,6 @@ static void ufs_intel_common_exit(struct ufs_hba *hba)
 
 static int ufs_intel_resume(struct ufs_hba *hba, enum ufs_pm_op op)
 {
-       /*
-        * To support S4 (suspend-to-disk) with spm_lvl other than 5, the base
-        * address registers must be restored because the restore kernel can
-        * have used different addresses.
-        */
-       ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
-                     REG_UTP_TRANSFER_REQ_LIST_BASE_L);
-       ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
-                     REG_UTP_TRANSFER_REQ_LIST_BASE_H);
-       ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
-                     REG_UTP_TASK_REQ_LIST_BASE_L);
-       ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
-                     REG_UTP_TASK_REQ_LIST_BASE_H);
-
        if (ufshcd_is_link_hibern8(hba)) {
                int ret = ufshcd_uic_hibern8_exit(hba);
 
@@ -463,6 +449,18 @@ static struct ufs_hba_variant_ops ufs_intel_lkf_hba_vops = {
        .device_reset           = ufs_intel_device_reset,
 };
 
+#ifdef CONFIG_PM_SLEEP
+static int ufshcd_pci_restore(struct device *dev)
+{
+       struct ufs_hba *hba = dev_get_drvdata(dev);
+
+       /* Force a full reset and restore */
+       ufshcd_set_link_off(hba);
+
+       return ufshcd_system_resume(dev);
+}
+#endif
+
 /**
  * ufshcd_pci_shutdown - main function to put the controller in reset state
  * @pdev: pointer to PCI device handle
@@ -546,9 +544,14 @@ ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 }
 
 static const struct dev_pm_ops ufshcd_pci_pm_ops = {
-       SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume)
        SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL)
 #ifdef CONFIG_PM_SLEEP
+       .suspend        = ufshcd_system_suspend,
+       .resume         = ufshcd_system_resume,
+       .freeze         = ufshcd_system_suspend,
+       .thaw           = ufshcd_system_resume,
+       .poweroff       = ufshcd_system_suspend,
+       .restore        = ufshcd_pci_restore,
        .prepare        = ufshcd_suspend_prepare,
        .complete       = ufshcd_resume_complete,
 #endif
index 3e42d04..8f537f1 100644 (file)
@@ -2330,7 +2330,6 @@ retry:
 
 int ceph_fsync(struct file *file, loff_t start, loff_t end, int datasync)
 {
-       struct ceph_file_info *fi = file->private_data;
        struct inode *inode = file->f_mapping->host;
        struct ceph_inode_info *ci = ceph_inode(inode);
        u64 flush_tid;
@@ -2365,14 +2364,9 @@ int ceph_fsync(struct file *file, loff_t start, loff_t end, int datasync)
        if (err < 0)
                ret = err;
 
-       if (errseq_check(&ci->i_meta_err, READ_ONCE(fi->meta_err))) {
-               spin_lock(&file->f_lock);
-               err = errseq_check_and_advance(&ci->i_meta_err,
-                                              &fi->meta_err);
-               spin_unlock(&file->f_lock);
-               if (err < 0)
-                       ret = err;
-       }
+       err = file_check_and_advance_wb_err(file);
+       if (err < 0)
+               ret = err;
 out:
        dout("fsync %p%s result=%d\n", inode, datasync ? " datasync" : "", ret);
        return ret;
index d16fd2d..e61018d 100644 (file)
@@ -233,7 +233,6 @@ static int ceph_init_file_info(struct inode *inode, struct file *file,
 
        spin_lock_init(&fi->rw_contexts_lock);
        INIT_LIST_HEAD(&fi->rw_contexts);
-       fi->meta_err = errseq_sample(&ci->i_meta_err);
        fi->filp_gen = READ_ONCE(ceph_inode_to_client(inode)->filp_gen);
 
        return 0;
index 2df1e12..1c75741 100644 (file)
@@ -541,8 +541,6 @@ struct inode *ceph_alloc_inode(struct super_block *sb)
 
        ceph_fscache_inode_init(ci);
 
-       ci->i_meta_err = 0;
-
        return &ci->vfs_inode;
 }
 
index 7cad180..d64413a 100644 (file)
@@ -1493,7 +1493,6 @@ static void cleanup_session_requests(struct ceph_mds_client *mdsc,
 {
        struct ceph_mds_request *req;
        struct rb_node *p;
-       struct ceph_inode_info *ci;
 
        dout("cleanup_session_requests mds%d\n", session->s_mds);
        mutex_lock(&mdsc->mutex);
@@ -1502,16 +1501,10 @@ static void cleanup_session_requests(struct ceph_mds_client *mdsc,
                                       struct ceph_mds_request, r_unsafe_item);
                pr_warn_ratelimited(" dropping unsafe request %llu\n",
                                    req->r_tid);
-               if (req->r_target_inode) {
-                       /* dropping unsafe change of inode's attributes */
-                       ci = ceph_inode(req->r_target_inode);
-                       errseq_set(&ci->i_meta_err, -EIO);
-               }
-               if (req->r_unsafe_dir) {
-                       /* dropping unsafe directory operation */
-                       ci = ceph_inode(req->r_unsafe_dir);
-                       errseq_set(&ci->i_meta_err, -EIO);
-               }
+               if (req->r_target_inode)
+                       mapping_set_error(req->r_target_inode->i_mapping, -EIO);
+               if (req->r_unsafe_dir)
+                       mapping_set_error(req->r_unsafe_dir->i_mapping, -EIO);
                __unregister_request(mdsc, req);
        }
        /* zero r_attempts, so kick_requests() will re-send requests */
@@ -1678,7 +1671,7 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
                spin_unlock(&mdsc->cap_dirty_lock);
 
                if (dirty_dropped) {
-                       errseq_set(&ci->i_meta_err, -EIO);
+                       mapping_set_error(inode->i_mapping, -EIO);
 
                        if (ci->i_wrbuffer_ref_head == 0 &&
                            ci->i_wr_ref == 0 &&
index 9b1b7f4..fd8742b 100644 (file)
@@ -1002,16 +1002,16 @@ static int ceph_compare_super(struct super_block *sb, struct fs_context *fc)
        struct ceph_fs_client *new = fc->s_fs_info;
        struct ceph_mount_options *fsopt = new->mount_options;
        struct ceph_options *opt = new->client->options;
-       struct ceph_fs_client *other = ceph_sb_to_client(sb);
+       struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
 
        dout("ceph_compare_super %p\n", sb);
 
-       if (compare_mount_options(fsopt, opt, other)) {
+       if (compare_mount_options(fsopt, opt, fsc)) {
                dout("monitor(s)/mount options don't match\n");
                return 0;
        }
        if ((opt->flags & CEPH_OPT_FSID) &&
-           ceph_fsid_compare(&opt->fsid, &other->client->fsid)) {
+           ceph_fsid_compare(&opt->fsid, &fsc->client->fsid)) {
                dout("fsid doesn't match\n");
                return 0;
        }
@@ -1019,6 +1019,17 @@ static int ceph_compare_super(struct super_block *sb, struct fs_context *fc)
                dout("flags differ\n");
                return 0;
        }
+
+       if (fsc->blocklisted && !ceph_test_mount_opt(fsc, CLEANRECOVER)) {
+               dout("client is blocklisted (and CLEANRECOVER is not set)\n");
+               return 0;
+       }
+
+       if (fsc->mount_state == CEPH_MOUNT_SHUTDOWN) {
+               dout("client has been forcibly unmounted\n");
+               return 0;
+       }
+
        return 1;
 }
 
index a40eb14..14f951c 100644 (file)
@@ -429,8 +429,6 @@ struct ceph_inode_info {
 #ifdef CONFIG_CEPH_FSCACHE
        struct fscache_cookie *fscache;
 #endif
-       errseq_t i_meta_err;
-
        struct inode vfs_inode; /* at end */
 };
 
@@ -774,7 +772,6 @@ struct ceph_file_info {
        spinlock_t rw_contexts_lock;
        struct list_head rw_contexts;
 
-       errseq_t meta_err;
        u32 filp_gen;
        atomic_t num_locks;
 };
index 319596d..f55f9f9 100644 (file)
@@ -1121,6 +1121,9 @@ int fuse_init_fs_context_submount(struct fs_context *fsc);
  */
 void fuse_conn_destroy(struct fuse_mount *fm);
 
+/* Drop the connection and free the fuse mount */
+void fuse_mount_destroy(struct fuse_mount *fm);
+
 /**
  * Add connection to control filesystem
  */
index 36cd031..12d49a1 100644 (file)
@@ -457,14 +457,6 @@ static void fuse_send_destroy(struct fuse_mount *fm)
        }
 }
 
-static void fuse_put_super(struct super_block *sb)
-{
-       struct fuse_mount *fm = get_fuse_mount_super(sb);
-
-       fuse_conn_put(fm->fc);
-       kfree(fm);
-}
-
 static void convert_fuse_statfs(struct kstatfs *stbuf, struct fuse_kstatfs *attr)
 {
        stbuf->f_type    = FUSE_SUPER_MAGIC;
@@ -1003,7 +995,6 @@ static const struct super_operations fuse_super_operations = {
        .evict_inode    = fuse_evict_inode,
        .write_inode    = fuse_write_inode,
        .drop_inode     = generic_delete_inode,
-       .put_super      = fuse_put_super,
        .umount_begin   = fuse_umount_begin,
        .statfs         = fuse_statfs,
        .sync_fs        = fuse_sync_fs,
@@ -1424,20 +1415,17 @@ static int fuse_get_tree_submount(struct fs_context *fsc)
        if (!fm)
                return -ENOMEM;
 
+       fm->fc = fuse_conn_get(fc);
        fsc->s_fs_info = fm;
        sb = sget_fc(fsc, NULL, set_anon_super_fc);
-       if (IS_ERR(sb)) {
-               kfree(fm);
+       if (fsc->s_fs_info)
+               fuse_mount_destroy(fm);
+       if (IS_ERR(sb))
                return PTR_ERR(sb);
-       }
-       fm->fc = fuse_conn_get(fc);
 
        /* Initialize superblock, making @mp_fi its root */
        err = fuse_fill_super_submount(sb, mp_fi);
        if (err) {
-               fuse_conn_put(fc);
-               kfree(fm);
-               sb->s_fs_info = NULL;
                deactivate_locked_super(sb);
                return err;
        }
@@ -1569,8 +1557,6 @@ static int fuse_fill_super(struct super_block *sb, struct fs_context *fsc)
 {
        struct fuse_fs_context *ctx = fsc->fs_private;
        int err;
-       struct fuse_conn *fc;
-       struct fuse_mount *fm;
 
        if (!ctx->file || !ctx->rootmode_present ||
            !ctx->user_id_present || !ctx->group_id_present)
@@ -1580,42 +1566,18 @@ static int fuse_fill_super(struct super_block *sb, struct fs_context *fsc)
         * Require mount to happen from the same user namespace which
         * opened /dev/fuse to prevent potential attacks.
         */
-       err = -EINVAL;
        if ((ctx->file->f_op != &fuse_dev_operations) ||
            (ctx->file->f_cred->user_ns != sb->s_user_ns))
-               goto err;
+               return -EINVAL;
        ctx->fudptr = &ctx->file->private_data;
 
-       fc = kmalloc(sizeof(*fc), GFP_KERNEL);
-       err = -ENOMEM;
-       if (!fc)
-               goto err;
-
-       fm = kzalloc(sizeof(*fm), GFP_KERNEL);
-       if (!fm) {
-               kfree(fc);
-               goto err;
-       }
-
-       fuse_conn_init(fc, fm, sb->s_user_ns, &fuse_dev_fiq_ops, NULL);
-       fc->release = fuse_free_conn;
-
-       sb->s_fs_info = fm;
-
        err = fuse_fill_super_common(sb, ctx);
        if (err)
-               goto err_put_conn;
+               return err;
        /* file->private_data shall be visible on all CPUs after this */
        smp_mb();
        fuse_send_init(get_fuse_mount_super(sb));
        return 0;
-
- err_put_conn:
-       fuse_conn_put(fc);
-       kfree(fm);
-       sb->s_fs_info = NULL;
- err:
-       return err;
 }
 
 /*
@@ -1637,22 +1599,40 @@ static int fuse_get_tree(struct fs_context *fsc)
 {
        struct fuse_fs_context *ctx = fsc->fs_private;
        struct fuse_dev *fud;
+       struct fuse_conn *fc;
+       struct fuse_mount *fm;
        struct super_block *sb;
        int err;
 
+       fc = kmalloc(sizeof(*fc), GFP_KERNEL);
+       if (!fc)
+               return -ENOMEM;
+
+       fm = kzalloc(sizeof(*fm), GFP_KERNEL);
+       if (!fm) {
+               kfree(fc);
+               return -ENOMEM;
+       }
+
+       fuse_conn_init(fc, fm, fsc->user_ns, &fuse_dev_fiq_ops, NULL);
+       fc->release = fuse_free_conn;
+
+       fsc->s_fs_info = fm;
+
        if (ctx->fd_present)
                ctx->file = fget(ctx->fd);
 
        if (IS_ENABLED(CONFIG_BLOCK) && ctx->is_bdev) {
                err = get_tree_bdev(fsc, fuse_fill_super);
-               goto out_fput;
+               goto out;
        }
        /*
         * While block dev mount can be initialized with a dummy device fd
         * (found by device name), normal fuse mounts can't
         */
+       err = -EINVAL;
        if (!ctx->file)
-               return -EINVAL;
+               goto out;
 
        /*
         * Allow creating a fuse mount with an already initialized fuse
@@ -1668,7 +1648,9 @@ static int fuse_get_tree(struct fs_context *fsc)
        } else {
                err = get_tree_nodev(fsc, fuse_fill_super);
        }
-out_fput:
+out:
+       if (fsc->s_fs_info)
+               fuse_mount_destroy(fm);
        if (ctx->file)
                fput(ctx->file);
        return err;
@@ -1747,17 +1729,25 @@ static void fuse_sb_destroy(struct super_block *sb)
        struct fuse_mount *fm = get_fuse_mount_super(sb);
        bool last;
 
-       if (fm) {
+       if (sb->s_root) {
                last = fuse_mount_remove(fm);
                if (last)
                        fuse_conn_destroy(fm);
        }
 }
 
+void fuse_mount_destroy(struct fuse_mount *fm)
+{
+       fuse_conn_put(fm->fc);
+       kfree(fm);
+}
+EXPORT_SYMBOL(fuse_mount_destroy);
+
 static void fuse_kill_sb_anon(struct super_block *sb)
 {
        fuse_sb_destroy(sb);
        kill_anon_super(sb);
+       fuse_mount_destroy(get_fuse_mount_super(sb));
 }
 
 static struct file_system_type fuse_fs_type = {
@@ -1775,6 +1765,7 @@ static void fuse_kill_sb_blk(struct super_block *sb)
 {
        fuse_sb_destroy(sb);
        kill_block_super(sb);
+       fuse_mount_destroy(get_fuse_mount_super(sb));
 }
 
 static struct file_system_type fuseblk_fs_type = {
index 0ad89c6..94fc874 100644 (file)
@@ -1394,12 +1394,13 @@ static void virtio_kill_sb(struct super_block *sb)
        bool last;
 
        /* If mount failed, we can still be called without any fc */
-       if (fm) {
+       if (sb->s_root) {
                last = fuse_mount_remove(fm);
                if (last)
                        virtio_fs_conn_destroy(fm);
        }
        kill_anon_super(sb);
+       fuse_mount_destroy(fm);
 }
 
 static int virtio_fs_test_super(struct super_block *sb,
@@ -1455,19 +1456,14 @@ static int virtio_fs_get_tree(struct fs_context *fsc)
 
        fsc->s_fs_info = fm;
        sb = sget_fc(fsc, virtio_fs_test_super, set_anon_super_fc);
-       if (fsc->s_fs_info) {
-               fuse_conn_put(fc);
-               kfree(fm);
-       }
+       if (fsc->s_fs_info)
+               fuse_mount_destroy(fm);
        if (IS_ERR(sb))
                return PTR_ERR(sb);
 
        if (!sb->s_root) {
                err = virtio_fs_fill_super(sb, fsc);
                if (err) {
-                       fuse_conn_put(fc);
-                       kfree(fm);
-                       sb->s_fs_info = NULL;
                        deactivate_locked_super(sb);
                        return err;
                }
index 5bf8aa8..422a7ed 100644 (file)
@@ -253,7 +253,7 @@ static bool io_wqe_create_worker(struct io_wqe *wqe, struct io_wqe_acct *acct)
                pr_warn_once("io-wq is not configured for unbound workers");
 
        raw_spin_lock(&wqe->lock);
-       if (acct->nr_workers == acct->max_workers) {
+       if (acct->nr_workers >= acct->max_workers) {
                raw_spin_unlock(&wqe->lock);
                return true;
        }
@@ -1291,15 +1291,18 @@ int io_wq_max_workers(struct io_wq *wq, int *new_count)
 
        rcu_read_lock();
        for_each_node(node) {
+               struct io_wqe *wqe = wq->wqes[node];
                struct io_wqe_acct *acct;
 
+               raw_spin_lock(&wqe->lock);
                for (i = 0; i < IO_WQ_ACCT_NR; i++) {
-                       acct = &wq->wqes[node]->acct[i];
+                       acct = &wqe->acct[i];
                        prev = max_t(int, acct->max_workers, prev);
                        if (new_count[i])
                                acct->max_workers = new_count[i];
                        new_count[i] = prev;
                }
+               raw_spin_unlock(&wqe->lock);
        }
        rcu_read_unlock();
        return 0;
index e68d278..bc18af5 100644 (file)
@@ -456,6 +456,8 @@ struct io_ring_ctx {
                struct work_struct              exit_work;
                struct list_head                tctx_list;
                struct completion               ref_comp;
+               u32                             iowq_limits[2];
+               bool                            iowq_limits_set;
        };
 };
 
@@ -1368,11 +1370,6 @@ static void io_req_track_inflight(struct io_kiocb *req)
        }
 }
 
-static inline void io_unprep_linked_timeout(struct io_kiocb *req)
-{
-       req->flags &= ~REQ_F_LINK_TIMEOUT;
-}
-
 static struct io_kiocb *__io_prep_linked_timeout(struct io_kiocb *req)
 {
        if (WARN_ON_ONCE(!req->link))
@@ -6983,7 +6980,7 @@ issue_sqe:
                switch (io_arm_poll_handler(req)) {
                case IO_APOLL_READY:
                        if (linked_timeout)
-                               io_unprep_linked_timeout(req);
+                               io_queue_linked_timeout(linked_timeout);
                        goto issue_sqe;
                case IO_APOLL_ABORTED:
                        /*
@@ -9638,7 +9635,16 @@ static int __io_uring_add_tctx_node(struct io_ring_ctx *ctx)
                ret = io_uring_alloc_task_context(current, ctx);
                if (unlikely(ret))
                        return ret;
+
                tctx = current->io_uring;
+               if (ctx->iowq_limits_set) {
+                       unsigned int limits[2] = { ctx->iowq_limits[0],
+                                                  ctx->iowq_limits[1], };
+
+                       ret = io_wq_max_workers(tctx->io_wq, limits);
+                       if (ret)
+                               return ret;
+               }
        }
        if (!xa_load(&tctx->xa, (unsigned long)ctx)) {
                node = kmalloc(sizeof(*node), GFP_KERNEL);
@@ -10643,7 +10649,9 @@ static int io_unregister_iowq_aff(struct io_ring_ctx *ctx)
 
 static int io_register_iowq_max_workers(struct io_ring_ctx *ctx,
                                        void __user *arg)
+       __must_hold(&ctx->uring_lock)
 {
+       struct io_tctx_node *node;
        struct io_uring_task *tctx = NULL;
        struct io_sq_data *sqd = NULL;
        __u32 new_count[2];
@@ -10674,13 +10682,19 @@ static int io_register_iowq_max_workers(struct io_ring_ctx *ctx,
                tctx = current->io_uring;
        }
 
-       ret = -EINVAL;
-       if (!tctx || !tctx->io_wq)
-               goto err;
+       BUILD_BUG_ON(sizeof(new_count) != sizeof(ctx->iowq_limits));
 
-       ret = io_wq_max_workers(tctx->io_wq, new_count);
-       if (ret)
-               goto err;
+       memcpy(ctx->iowq_limits, new_count, sizeof(new_count));
+       ctx->iowq_limits_set = true;
+
+       ret = -EINVAL;
+       if (tctx && tctx->io_wq) {
+               ret = io_wq_max_workers(tctx->io_wq, new_count);
+               if (ret)
+                       goto err;
+       } else {
+               memset(new_count, 0, sizeof(new_count));
+       }
 
        if (sqd) {
                mutex_unlock(&sqd->lock);
@@ -10690,6 +10704,22 @@ static int io_register_iowq_max_workers(struct io_ring_ctx *ctx,
        if (copy_to_user(arg, new_count, sizeof(new_count)))
                return -EFAULT;
 
+       /* that's it for SQPOLL, only the SQPOLL task creates requests */
+       if (sqd)
+               return 0;
+
+       /* now propagate the restriction to all registered users */
+       list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
+               struct io_uring_task *tctx = node->task->io_uring;
+
+               if (WARN_ON_ONCE(!tctx->io_wq))
+                       continue;
+
+               for (i = 0; i < ARRAY_SIZE(new_count); i++)
+                       new_count[i] = ctx->iowq_limits[i];
+               /* ignore errors, it always returns zero anyway */
+               (void)io_wq_max_workers(tctx->io_wq, new_count);
+       }
        return 0;
 err:
        if (sqd) {
index 87aac4c..1b07550 100644 (file)
@@ -178,7 +178,7 @@ int kernel_read_file_from_fd(int fd, loff_t offset, void **buf,
        struct fd f = fdget(fd);
        int ret = -EBADF;
 
-       if (!f.file)
+       if (!f.file || !(f.file->f_mode & FMODE_READ))
                goto out;
 
        ret = kernel_read_file(f.file, offset, buf, buf_size, file_size, id);
index 71c989f..30a92dd 100644 (file)
@@ -298,8 +298,8 @@ int ksmbd_decode_ntlmssp_auth_blob(struct authenticate_message *authblob,
                                   int blob_len, struct ksmbd_session *sess)
 {
        char *domain_name;
-       unsigned int lm_off, nt_off;
-       unsigned short nt_len;
+       unsigned int nt_off, dn_off;
+       unsigned short nt_len, dn_len;
        int ret;
 
        if (blob_len < sizeof(struct authenticate_message)) {
@@ -314,15 +314,17 @@ int ksmbd_decode_ntlmssp_auth_blob(struct authenticate_message *authblob,
                return -EINVAL;
        }
 
-       lm_off = le32_to_cpu(authblob->LmChallengeResponse.BufferOffset);
        nt_off = le32_to_cpu(authblob->NtChallengeResponse.BufferOffset);
        nt_len = le16_to_cpu(authblob->NtChallengeResponse.Length);
+       dn_off = le32_to_cpu(authblob->DomainName.BufferOffset);
+       dn_len = le16_to_cpu(authblob->DomainName.Length);
+
+       if (blob_len < (u64)dn_off + dn_len || blob_len < (u64)nt_off + nt_len)
+               return -EINVAL;
 
        /* TODO : use domain name that imported from configuration file */
-       domain_name = smb_strndup_from_utf16((const char *)authblob +
-                       le32_to_cpu(authblob->DomainName.BufferOffset),
-                       le16_to_cpu(authblob->DomainName.Length), true,
-                       sess->conn->local_nls);
+       domain_name = smb_strndup_from_utf16((const char *)authblob + dn_off,
+                                            dn_len, true, sess->conn->local_nls);
        if (IS_ERR(domain_name))
                return PTR_ERR(domain_name);
 
index 48b18b4..b57a0d8 100644 (file)
@@ -61,6 +61,8 @@ struct ksmbd_conn *ksmbd_conn_alloc(void)
                conn->local_nls = load_nls_default();
        atomic_set(&conn->req_running, 0);
        atomic_set(&conn->r_count, 0);
+       conn->total_credits = 1;
+
        init_waitqueue_head(&conn->req_running_q);
        INIT_LIST_HEAD(&conn->conns_list);
        INIT_LIST_HEAD(&conn->sessions);
index 2fbe2bc..c6718a0 100644 (file)
@@ -211,6 +211,7 @@ struct ksmbd_tree_disconnect_request {
  */
 struct ksmbd_logout_request {
        __s8    account[KSMBD_REQ_MAX_ACCOUNT_NAME_SZ]; /* user account name */
+       __u32   account_flags;
 };
 
 /*
@@ -317,6 +318,7 @@ enum KSMBD_TREE_CONN_STATUS {
 #define KSMBD_USER_FLAG_BAD_UID                BIT(2)
 #define KSMBD_USER_FLAG_BAD_USER       BIT(3)
 #define KSMBD_USER_FLAG_GUEST_ACCOUNT  BIT(4)
+#define KSMBD_USER_FLAG_DELAY_SESSION  BIT(5)
 
 /*
  * Share config flags.
index d21629a..1019d36 100644 (file)
@@ -55,7 +55,7 @@ struct ksmbd_user *ksmbd_alloc_user(struct ksmbd_login_response *resp)
 
 void ksmbd_free_user(struct ksmbd_user *user)
 {
-       ksmbd_ipc_logout_request(user->name);
+       ksmbd_ipc_logout_request(user->name, user->flags);
        kfree(user->name);
        kfree(user->passkey);
        kfree(user);
index b2bb074..aff80b0 100644 (file)
@@ -18,6 +18,7 @@ struct ksmbd_user {
 
        size_t                  passkey_sz;
        char                    *passkey;
+       unsigned int            failed_login_count;
 };
 
 static inline bool user_guest(struct ksmbd_user *user)
index 9edd9c1..030ca57 100644 (file)
@@ -284,11 +284,13 @@ static inline int smb2_ioctl_resp_len(struct smb2_ioctl_req *h)
                le32_to_cpu(h->MaxOutputResponse);
 }
 
-static int smb2_validate_credit_charge(struct smb2_hdr *hdr)
+static int smb2_validate_credit_charge(struct ksmbd_conn *conn,
+                                      struct smb2_hdr *hdr)
 {
-       int req_len = 0, expect_resp_len = 0, calc_credit_num, max_len;
-       int credit_charge = le16_to_cpu(hdr->CreditCharge);
+       unsigned int req_len = 0, expect_resp_len = 0, calc_credit_num, max_len;
+       unsigned short credit_charge = le16_to_cpu(hdr->CreditCharge);
        void *__hdr = hdr;
+       int ret;
 
        switch (hdr->Command) {
        case SMB2_QUERY_INFO:
@@ -310,21 +312,37 @@ static int smb2_validate_credit_charge(struct smb2_hdr *hdr)
                req_len = smb2_ioctl_req_len(__hdr);
                expect_resp_len = smb2_ioctl_resp_len(__hdr);
                break;
-       default:
+       case SMB2_CANCEL:
                return 0;
+       default:
+               req_len = 1;
+               break;
        }
 
-       credit_charge = max(1, credit_charge);
-       max_len = max(req_len, expect_resp_len);
+       credit_charge = max_t(unsigned short, credit_charge, 1);
+       max_len = max_t(unsigned int, req_len, expect_resp_len);
        calc_credit_num = DIV_ROUND_UP(max_len, SMB2_MAX_BUFFER_SIZE);
 
        if (credit_charge < calc_credit_num) {
-               pr_err("Insufficient credit charge, given: %d, needed: %d\n",
-                      credit_charge, calc_credit_num);
+               ksmbd_debug(SMB, "Insufficient credit charge, given: %d, needed: %d\n",
+                           credit_charge, calc_credit_num);
+               return 1;
+       } else if (credit_charge > conn->max_credits) {
+               ksmbd_debug(SMB, "Too large credit charge: %d\n", credit_charge);
                return 1;
        }
 
-       return 0;
+       spin_lock(&conn->credits_lock);
+       if (credit_charge <= conn->total_credits) {
+               conn->total_credits -= credit_charge;
+               ret = 0;
+       } else {
+               ksmbd_debug(SMB, "Insufficient credits granted, given: %u, granted: %u\n",
+                           credit_charge, conn->total_credits);
+               ret = 1;
+       }
+       spin_unlock(&conn->credits_lock);
+       return ret;
 }
 
 int ksmbd_smb2_check_message(struct ksmbd_work *work)
@@ -382,26 +400,20 @@ int ksmbd_smb2_check_message(struct ksmbd_work *work)
                }
        }
 
-       if ((work->conn->vals->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU) &&
-           smb2_validate_credit_charge(hdr)) {
-               work->conn->ops->set_rsp_status(work, STATUS_INVALID_PARAMETER);
-               return 1;
-       }
-
        if (smb2_calc_size(hdr, &clc_len))
                return 1;
 
        if (len != clc_len) {
                /* client can return one byte more due to implied bcc[0] */
                if (clc_len == len + 1)
-                       return 0;
+                       goto validate_credit;
 
                /*
                 * Some windows servers (win2016) will pad also the final
                 * PDU in a compound to 8 bytes.
                 */
                if (ALIGN(clc_len, 8) == len)
-                       return 0;
+                       goto validate_credit;
 
                /*
                 * windows client also pad up to 8 bytes when compounding.
@@ -414,7 +426,7 @@ int ksmbd_smb2_check_message(struct ksmbd_work *work)
                                    "cli req padded more than expected. Length %d not %d for cmd:%d mid:%llu\n",
                                    len, clc_len, command,
                                    le64_to_cpu(hdr->MessageId));
-                       return 0;
+                       goto validate_credit;
                }
 
                ksmbd_debug(SMB,
@@ -425,6 +437,13 @@ int ksmbd_smb2_check_message(struct ksmbd_work *work)
                return 1;
        }
 
+validate_credit:
+       if ((work->conn->vals->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU) &&
+           smb2_validate_credit_charge(work->conn, hdr)) {
+               work->conn->ops->set_rsp_status(work, STATUS_INVALID_PARAMETER);
+               return 1;
+       }
+
        return 0;
 }
 
index b06456e..fb6a65d 100644 (file)
@@ -284,6 +284,7 @@ int init_smb3_11_server(struct ksmbd_conn *conn)
 
 void init_smb2_max_read_size(unsigned int sz)
 {
+       sz = clamp_val(sz, SMB3_MIN_IOSIZE, SMB3_MAX_IOSIZE);
        smb21_server_values.max_read_size = sz;
        smb30_server_values.max_read_size = sz;
        smb302_server_values.max_read_size = sz;
@@ -292,6 +293,7 @@ void init_smb2_max_read_size(unsigned int sz)
 
 void init_smb2_max_write_size(unsigned int sz)
 {
+       sz = clamp_val(sz, SMB3_MIN_IOSIZE, SMB3_MAX_IOSIZE);
        smb21_server_values.max_write_size = sz;
        smb30_server_values.max_write_size = sz;
        smb302_server_values.max_write_size = sz;
@@ -300,6 +302,7 @@ void init_smb2_max_write_size(unsigned int sz)
 
 void init_smb2_max_trans_size(unsigned int sz)
 {
+       sz = clamp_val(sz, SMB3_MIN_IOSIZE, SMB3_MAX_IOSIZE);
        smb21_server_values.max_trans_size = sz;
        smb30_server_values.max_trans_size = sz;
        smb302_server_values.max_trans_size = sz;
index 005aa93..7e448df 100644 (file)
@@ -292,22 +292,6 @@ int init_smb2_neg_rsp(struct ksmbd_work *work)
        return 0;
 }
 
-static int smb2_consume_credit_charge(struct ksmbd_work *work,
-                                     unsigned short credit_charge)
-{
-       struct ksmbd_conn *conn = work->conn;
-       unsigned int rsp_credits = 1;
-
-       if (!conn->total_credits)
-               return 0;
-
-       if (credit_charge > 0)
-               rsp_credits = credit_charge;
-
-       conn->total_credits -= rsp_credits;
-       return rsp_credits;
-}
-
 /**
  * smb2_set_rsp_credits() - set number of credits in response buffer
  * @work:      smb work containing smb response buffer
@@ -317,49 +301,43 @@ int smb2_set_rsp_credits(struct ksmbd_work *work)
        struct smb2_hdr *req_hdr = ksmbd_req_buf_next(work);
        struct smb2_hdr *hdr = ksmbd_resp_buf_next(work);
        struct ksmbd_conn *conn = work->conn;
-       unsigned short credits_requested = le16_to_cpu(req_hdr->CreditRequest);
-       unsigned short credit_charge = 1, credits_granted = 0;
-       unsigned short aux_max, aux_credits, min_credits;
-       int rsp_credit_charge;
+       unsigned short credits_requested;
+       unsigned short credit_charge, credits_granted = 0;
+       unsigned short aux_max, aux_credits;
 
-       if (hdr->Command == SMB2_CANCEL)
-               goto out;
+       if (work->send_no_response)
+               return 0;
 
-       /* get default minimum credits by shifting maximum credits by 4 */
-       min_credits = conn->max_credits >> 4;
+       hdr->CreditCharge = req_hdr->CreditCharge;
 
-       if (conn->total_credits >= conn->max_credits) {
+       if (conn->total_credits > conn->max_credits) {
+               hdr->CreditRequest = 0;
                pr_err("Total credits overflow: %d\n", conn->total_credits);
-               conn->total_credits = min_credits;
-       }
-
-       rsp_credit_charge =
-               smb2_consume_credit_charge(work, le16_to_cpu(req_hdr->CreditCharge));
-       if (rsp_credit_charge < 0)
                return -EINVAL;
+       }
 
-       hdr->CreditCharge = cpu_to_le16(rsp_credit_charge);
+       credit_charge = max_t(unsigned short,
+                             le16_to_cpu(req_hdr->CreditCharge), 1);
+       credits_requested = max_t(unsigned short,
+                                 le16_to_cpu(req_hdr->CreditRequest), 1);
 
-       if (credits_requested > 0) {
-               aux_credits = credits_requested - 1;
-               aux_max = 32;
-               if (hdr->Command == SMB2_NEGOTIATE)
-                       aux_max = 0;
-               aux_credits = (aux_credits < aux_max) ? aux_credits : aux_max;
-               credits_granted = aux_credits + credit_charge;
+       /* according to smb2.credits smbtorture, Windows server
+        * 2016 or later grant up to 8192 credits at once.
+        *
+        * TODO: Need to adjuct CreditRequest value according to
+        * current cpu load
+        */
+       aux_credits = credits_requested - 1;
+       if (hdr->Command == SMB2_NEGOTIATE)
+               aux_max = 0;
+       else
+               aux_max = conn->max_credits - credit_charge;
+       aux_credits = min_t(unsigned short, aux_credits, aux_max);
+       credits_granted = credit_charge + aux_credits;
 
-               /* if credits granted per client is getting bigger than default
-                * minimum credits then we should wrap it up within the limits.
-                */
-               if ((conn->total_credits + credits_granted) > min_credits)
-                       credits_granted = min_credits - conn->total_credits;
-               /*
-                * TODO: Need to adjuct CreditRequest value according to
-                * current cpu load
-                */
-       } else if (conn->total_credits == 0) {
-               credits_granted = 1;
-       }
+       if (conn->max_credits - conn->total_credits < credits_granted)
+               credits_granted = conn->max_credits -
+                       conn->total_credits;
 
        conn->total_credits += credits_granted;
        work->credits_granted += credits_granted;
@@ -368,7 +346,6 @@ int smb2_set_rsp_credits(struct ksmbd_work *work)
                /* Update CreditRequest in last request */
                hdr->CreditRequest = cpu_to_le16(work->credits_granted);
        }
-out:
        ksmbd_debug(SMB,
                    "credits: requested[%d] granted[%d] total_granted[%d]\n",
                    credits_requested, credits_granted,
@@ -472,6 +449,12 @@ bool is_chained_smb2_message(struct ksmbd_work *work)
                        return false;
                }
 
+               if ((u64)get_rfc1002_len(work->response_buf) + MAX_CIFS_SMALL_BUFFER_SIZE >
+                   work->response_sz) {
+                       pr_err("next response offset exceeds response buffer size\n");
+                       return false;
+               }
+
                ksmbd_debug(SMB, "got SMB2 chained command\n");
                init_chained_smb2_rsp(work);
                return true;
@@ -541,7 +524,7 @@ int smb2_allocate_rsp_buf(struct ksmbd_work *work)
 {
        struct smb2_hdr *hdr = work->request_buf;
        size_t small_sz = MAX_CIFS_SMALL_BUFFER_SIZE;
-       size_t large_sz = work->conn->vals->max_trans_size + MAX_SMB2_HDR_SIZE;
+       size_t large_sz = small_sz + work->conn->vals->max_trans_size;
        size_t sz = small_sz;
        int cmd = le16_to_cpu(hdr->Command);
 
@@ -1274,19 +1257,13 @@ static int generate_preauth_hash(struct ksmbd_work *work)
        return 0;
 }
 
-static int decode_negotiation_token(struct ksmbd_work *work,
-                                   struct negotiate_message *negblob)
+static int decode_negotiation_token(struct ksmbd_conn *conn,
+                                   struct negotiate_message *negblob,
+                                   size_t sz)
 {
-       struct ksmbd_conn *conn = work->conn;
-       struct smb2_sess_setup_req *req;
-       int sz;
-
        if (!conn->use_spnego)
                return -EINVAL;
 
-       req = work->request_buf;
-       sz = le16_to_cpu(req->SecurityBufferLength);
-
        if (ksmbd_decode_negTokenInit((char *)negblob, sz, conn)) {
                if (ksmbd_decode_negTokenTarg((char *)negblob, sz, conn)) {
                        conn->auth_mechs |= KSMBD_AUTH_NTLMSSP;
@@ -1298,9 +1275,9 @@ static int decode_negotiation_token(struct ksmbd_work *work,
 }
 
 static int ntlm_negotiate(struct ksmbd_work *work,
-                         struct negotiate_message *negblob)
+                         struct negotiate_message *negblob,
+                         size_t negblob_len)
 {
-       struct smb2_sess_setup_req *req = work->request_buf;
        struct smb2_sess_setup_rsp *rsp = work->response_buf;
        struct challenge_message *chgblob;
        unsigned char *spnego_blob = NULL;
@@ -1309,8 +1286,7 @@ static int ntlm_negotiate(struct ksmbd_work *work,
        int sz, rc;
 
        ksmbd_debug(SMB, "negotiate phase\n");
-       sz = le16_to_cpu(req->SecurityBufferLength);
-       rc = ksmbd_decode_ntlmssp_neg_blob(negblob, sz, work->sess);
+       rc = ksmbd_decode_ntlmssp_neg_blob(negblob, negblob_len, work->sess);
        if (rc)
                return rc;
 
@@ -1378,12 +1354,23 @@ static struct ksmbd_user *session_user(struct ksmbd_conn *conn,
        struct authenticate_message *authblob;
        struct ksmbd_user *user;
        char *name;
-       int sz;
+       unsigned int auth_msg_len, name_off, name_len, secbuf_len;
 
+       secbuf_len = le16_to_cpu(req->SecurityBufferLength);
+       if (secbuf_len < sizeof(struct authenticate_message)) {
+               ksmbd_debug(SMB, "blob len %d too small\n", secbuf_len);
+               return NULL;
+       }
        authblob = user_authblob(conn, req);
-       sz = le32_to_cpu(authblob->UserName.BufferOffset);
-       name = smb_strndup_from_utf16((const char *)authblob + sz,
-                                     le16_to_cpu(authblob->UserName.Length),
+       name_off = le32_to_cpu(authblob->UserName.BufferOffset);
+       name_len = le16_to_cpu(authblob->UserName.Length);
+       auth_msg_len = le16_to_cpu(req->SecurityBufferOffset) + secbuf_len;
+
+       if (auth_msg_len < (u64)name_off + name_len)
+               return NULL;
+
+       name = smb_strndup_from_utf16((const char *)authblob + name_off,
+                                     name_len,
                                      true,
                                      conn->local_nls);
        if (IS_ERR(name)) {
@@ -1629,6 +1616,7 @@ int smb2_sess_setup(struct ksmbd_work *work)
        struct smb2_sess_setup_rsp *rsp = work->response_buf;
        struct ksmbd_session *sess;
        struct negotiate_message *negblob;
+       unsigned int negblob_len, negblob_off;
        int rc = 0;
 
        ksmbd_debug(SMB, "Received request for session setup\n");
@@ -1709,10 +1697,16 @@ int smb2_sess_setup(struct ksmbd_work *work)
        if (sess->state == SMB2_SESSION_EXPIRED)
                sess->state = SMB2_SESSION_IN_PROGRESS;
 
+       negblob_off = le16_to_cpu(req->SecurityBufferOffset);
+       negblob_len = le16_to_cpu(req->SecurityBufferLength);
+       if (negblob_off < (offsetof(struct smb2_sess_setup_req, Buffer) - 4) ||
+           negblob_len < offsetof(struct negotiate_message, NegotiateFlags))
+               return -EINVAL;
+
        negblob = (struct negotiate_message *)((char *)&req->hdr.ProtocolId +
-                       le16_to_cpu(req->SecurityBufferOffset));
+                       negblob_off);
 
-       if (decode_negotiation_token(work, negblob) == 0) {
+       if (decode_negotiation_token(conn, negblob, negblob_len) == 0) {
                if (conn->mechToken)
                        negblob = (struct negotiate_message *)conn->mechToken;
        }
@@ -1736,7 +1730,7 @@ int smb2_sess_setup(struct ksmbd_work *work)
                        sess->Preauth_HashValue = NULL;
                } else if (conn->preferred_auth_mech == KSMBD_AUTH_NTLMSSP) {
                        if (negblob->MessageType == NtLmNegotiate) {
-                               rc = ntlm_negotiate(work, negblob);
+                               rc = ntlm_negotiate(work, negblob, negblob_len);
                                if (rc)
                                        goto out_err;
                                rsp->hdr.Status =
@@ -1796,9 +1790,30 @@ out_err:
                conn->mechToken = NULL;
        }
 
-       if (rc < 0 && sess) {
-               ksmbd_session_destroy(sess);
-               work->sess = NULL;
+       if (rc < 0) {
+               /*
+                * SecurityBufferOffset should be set to zero
+                * in session setup error response.
+                */
+               rsp->SecurityBufferOffset = 0;
+
+               if (sess) {
+                       bool try_delay = false;
+
+                       /*
+                        * To avoid dictionary attacks (repeated session setups rapidly sent) to
+                        * connect to server, ksmbd make a delay of a 5 seconds on session setup
+                        * failure to make it harder to send enough random connection requests
+                        * to break into a server.
+                        */
+                       if (sess->user && sess->user->flags & KSMBD_USER_FLAG_DELAY_SESSION)
+                               try_delay = true;
+
+                       ksmbd_session_destroy(sess);
+                       work->sess = NULL;
+                       if (try_delay)
+                               ssleep(5);
+               }
        }
 
        return rc;
@@ -3779,6 +3794,24 @@ static int verify_info_level(int info_level)
        return 0;
 }
 
+static int smb2_calc_max_out_buf_len(struct ksmbd_work *work,
+                                    unsigned short hdr2_len,
+                                    unsigned int out_buf_len)
+{
+       int free_len;
+
+       if (out_buf_len > work->conn->vals->max_trans_size)
+               return -EINVAL;
+
+       free_len = (int)(work->response_sz -
+                        (get_rfc1002_len(work->response_buf) + 4)) -
+               hdr2_len;
+       if (free_len < 0)
+               return -EINVAL;
+
+       return min_t(int, out_buf_len, free_len);
+}
+
 int smb2_query_dir(struct ksmbd_work *work)
 {
        struct ksmbd_conn *conn = work->conn;
@@ -3855,9 +3888,13 @@ int smb2_query_dir(struct ksmbd_work *work)
        memset(&d_info, 0, sizeof(struct ksmbd_dir_info));
        d_info.wptr = (char *)rsp->Buffer;
        d_info.rptr = (char *)rsp->Buffer;
-       d_info.out_buf_len = (work->response_sz - (get_rfc1002_len(rsp_org) + 4));
-       d_info.out_buf_len = min_t(int, d_info.out_buf_len, le32_to_cpu(req->OutputBufferLength)) -
-               sizeof(struct smb2_query_directory_rsp);
+       d_info.out_buf_len =
+               smb2_calc_max_out_buf_len(work, 8,
+                                         le32_to_cpu(req->OutputBufferLength));
+       if (d_info.out_buf_len < 0) {
+               rc = -EINVAL;
+               goto err_out;
+       }
        d_info.flags = srch_flag;
 
        /*
@@ -4091,12 +4128,11 @@ static int smb2_get_ea(struct ksmbd_work *work, struct ksmbd_file *fp,
                                    le32_to_cpu(req->Flags));
        }
 
-       buf_free_len = work->response_sz -
-                       (get_rfc1002_len(rsp_org) + 4) -
-                       sizeof(struct smb2_query_info_rsp);
-
-       if (le32_to_cpu(req->OutputBufferLength) < buf_free_len)
-               buf_free_len = le32_to_cpu(req->OutputBufferLength);
+       buf_free_len =
+               smb2_calc_max_out_buf_len(work, 8,
+                                         le32_to_cpu(req->OutputBufferLength));
+       if (buf_free_len < 0)
+               return -EINVAL;
 
        rc = ksmbd_vfs_listxattr(path->dentry, &xattr_list);
        if (rc < 0) {
@@ -4407,6 +4443,8 @@ static void get_file_stream_info(struct ksmbd_work *work,
        struct path *path = &fp->filp->f_path;
        ssize_t xattr_list_len;
        int nbytes = 0, streamlen, stream_name_len, next, idx = 0;
+       int buf_free_len;
+       struct smb2_query_info_req *req = ksmbd_req_buf_next(work);
 
        generic_fillattr(file_mnt_user_ns(fp->filp), file_inode(fp->filp),
                         &stat);
@@ -4420,6 +4458,12 @@ static void get_file_stream_info(struct ksmbd_work *work,
                goto out;
        }
 
+       buf_free_len =
+               smb2_calc_max_out_buf_len(work, 8,
+                                         le32_to_cpu(req->OutputBufferLength));
+       if (buf_free_len < 0)
+               goto out;
+
        while (idx < xattr_list_len) {
                stream_name = xattr_list + idx;
                streamlen = strlen(stream_name);
@@ -4444,6 +4488,10 @@ static void get_file_stream_info(struct ksmbd_work *work,
                streamlen = snprintf(stream_buf, streamlen + 1,
                                     ":%s", &stream_name[XATTR_NAME_STREAM_LEN]);
 
+               next = sizeof(struct smb2_file_stream_info) + streamlen * 2;
+               if (next > buf_free_len)
+                       break;
+
                file_info = (struct smb2_file_stream_info *)&rsp->Buffer[nbytes];
                streamlen  = smbConvertToUTF16((__le16 *)file_info->StreamName,
                                               stream_buf, streamlen,
@@ -4454,12 +4502,13 @@ static void get_file_stream_info(struct ksmbd_work *work,
                file_info->StreamSize = cpu_to_le64(stream_name_len);
                file_info->StreamAllocationSize = cpu_to_le64(stream_name_len);
 
-               next = sizeof(struct smb2_file_stream_info) + streamlen;
                nbytes += next;
+               buf_free_len -= next;
                file_info->NextEntryOffset = cpu_to_le32(next);
        }
 
-       if (!S_ISDIR(stat.mode)) {
+       if (!S_ISDIR(stat.mode) &&
+           buf_free_len >= sizeof(struct smb2_file_stream_info) + 7 * 2) {
                file_info = (struct smb2_file_stream_info *)
                        &rsp->Buffer[nbytes];
                streamlen = smbConvertToUTF16((__le16 *)file_info->StreamName,
@@ -6220,8 +6269,7 @@ static noinline int smb2_write_pipe(struct ksmbd_work *work)
            (offsetof(struct smb2_write_req, Buffer) - 4)) {
                data_buf = (char *)&req->Buffer[0];
        } else {
-               if ((le16_to_cpu(req->DataOffset) > get_rfc1002_len(req)) ||
-                   (le16_to_cpu(req->DataOffset) + length > get_rfc1002_len(req))) {
+               if ((u64)le16_to_cpu(req->DataOffset) + length > get_rfc1002_len(req)) {
                        pr_err("invalid write data offset %u, smb_len %u\n",
                               le16_to_cpu(req->DataOffset),
                               get_rfc1002_len(req));
@@ -6379,8 +6427,7 @@ int smb2_write(struct ksmbd_work *work)
                    (offsetof(struct smb2_write_req, Buffer) - 4)) {
                        data_buf = (char *)&req->Buffer[0];
                } else {
-                       if ((le16_to_cpu(req->DataOffset) > get_rfc1002_len(req)) ||
-                           (le16_to_cpu(req->DataOffset) + length > get_rfc1002_len(req))) {
+                       if ((u64)le16_to_cpu(req->DataOffset) + length > get_rfc1002_len(req)) {
                                pr_err("invalid write data offset %u, smb_len %u\n",
                                       le16_to_cpu(req->DataOffset),
                                       get_rfc1002_len(req));
@@ -7023,24 +7070,26 @@ out2:
        return err;
 }
 
-static int fsctl_copychunk(struct ksmbd_work *work, struct smb2_ioctl_req *req,
+static int fsctl_copychunk(struct ksmbd_work *work,
+                          struct copychunk_ioctl_req *ci_req,
+                          unsigned int cnt_code,
+                          unsigned int input_count,
+                          unsigned long long volatile_id,
+                          unsigned long long persistent_id,
                           struct smb2_ioctl_rsp *rsp)
 {
-       struct copychunk_ioctl_req *ci_req;
        struct copychunk_ioctl_rsp *ci_rsp;
        struct ksmbd_file *src_fp = NULL, *dst_fp = NULL;
        struct srv_copychunk *chunks;
        unsigned int i, chunk_count, chunk_count_written = 0;
        unsigned int chunk_size_written = 0;
        loff_t total_size_written = 0;
-       int ret, cnt_code;
+       int ret = 0;
 
-       cnt_code = le32_to_cpu(req->CntCode);
-       ci_req = (struct copychunk_ioctl_req *)&req->Buffer[0];
        ci_rsp = (struct copychunk_ioctl_rsp *)&rsp->Buffer[0];
 
-       rsp->VolatileFileId = req->VolatileFileId;
-       rsp->PersistentFileId = req->PersistentFileId;
+       rsp->VolatileFileId = cpu_to_le64(volatile_id);
+       rsp->PersistentFileId = cpu_to_le64(persistent_id);
        ci_rsp->ChunksWritten =
                cpu_to_le32(ksmbd_server_side_copy_max_chunk_count());
        ci_rsp->ChunkBytesWritten =
@@ -7050,12 +7099,13 @@ static int fsctl_copychunk(struct ksmbd_work *work, struct smb2_ioctl_req *req,
 
        chunks = (struct srv_copychunk *)&ci_req->Chunks[0];
        chunk_count = le32_to_cpu(ci_req->ChunkCount);
+       if (chunk_count == 0)
+               goto out;
        total_size_written = 0;
 
        /* verify the SRV_COPYCHUNK_COPY packet */
        if (chunk_count > ksmbd_server_side_copy_max_chunk_count() ||
-           le32_to_cpu(req->InputCount) <
-            offsetof(struct copychunk_ioctl_req, Chunks) +
+           input_count < offsetof(struct copychunk_ioctl_req, Chunks) +
             chunk_count * sizeof(struct srv_copychunk)) {
                rsp->hdr.Status = STATUS_INVALID_PARAMETER;
                return -EINVAL;
@@ -7076,9 +7126,7 @@ static int fsctl_copychunk(struct ksmbd_work *work, struct smb2_ioctl_req *req,
 
        src_fp = ksmbd_lookup_foreign_fd(work,
                                         le64_to_cpu(ci_req->ResumeKey[0]));
-       dst_fp = ksmbd_lookup_fd_slow(work,
-                                     le64_to_cpu(req->VolatileFileId),
-                                     le64_to_cpu(req->PersistentFileId));
+       dst_fp = ksmbd_lookup_fd_slow(work, volatile_id, persistent_id);
        ret = -EINVAL;
        if (!src_fp ||
            src_fp->persistent_id != le64_to_cpu(ci_req->ResumeKey[1])) {
@@ -7153,8 +7201,8 @@ static __be32 idev_ipv4_address(struct in_device *idev)
 }
 
 static int fsctl_query_iface_info_ioctl(struct ksmbd_conn *conn,
-                                       struct smb2_ioctl_req *req,
-                                       struct smb2_ioctl_rsp *rsp)
+                                       struct smb2_ioctl_rsp *rsp,
+                                       unsigned int out_buf_len)
 {
        struct network_interface_info_ioctl_rsp *nii_rsp = NULL;
        int nbytes = 0;
@@ -7166,6 +7214,12 @@ static int fsctl_query_iface_info_ioctl(struct ksmbd_conn *conn,
 
        rtnl_lock();
        for_each_netdev(&init_net, netdev) {
+               if (out_buf_len <
+                   nbytes + sizeof(struct network_interface_info_ioctl_rsp)) {
+                       rtnl_unlock();
+                       return -ENOSPC;
+               }
+
                if (netdev->type == ARPHRD_LOOPBACK)
                        continue;
 
@@ -7245,11 +7299,6 @@ static int fsctl_query_iface_info_ioctl(struct ksmbd_conn *conn,
        if (nii_rsp)
                nii_rsp->Next = 0;
 
-       if (!nbytes) {
-               rsp->hdr.Status = STATUS_BUFFER_TOO_SMALL;
-               return -EINVAL;
-       }
-
        rsp->PersistentFileId = cpu_to_le64(SMB2_NO_FID);
        rsp->VolatileFileId = cpu_to_le64(SMB2_NO_FID);
        return nbytes;
@@ -7257,11 +7306,16 @@ static int fsctl_query_iface_info_ioctl(struct ksmbd_conn *conn,
 
 static int fsctl_validate_negotiate_info(struct ksmbd_conn *conn,
                                         struct validate_negotiate_info_req *neg_req,
-                                        struct validate_negotiate_info_rsp *neg_rsp)
+                                        struct validate_negotiate_info_rsp *neg_rsp,
+                                        unsigned int in_buf_len)
 {
        int ret = 0;
        int dialect;
 
+       if (in_buf_len < sizeof(struct validate_negotiate_info_req) +
+                       le16_to_cpu(neg_req->DialectCount) * sizeof(__le16))
+               return -EINVAL;
+
        dialect = ksmbd_lookup_dialect_by_id(neg_req->Dialects,
                                             neg_req->DialectCount);
        if (dialect == BAD_PROT_ID || dialect != conn->dialect) {
@@ -7295,7 +7349,7 @@ err_out:
 static int fsctl_query_allocated_ranges(struct ksmbd_work *work, u64 id,
                                        struct file_allocated_range_buffer *qar_req,
                                        struct file_allocated_range_buffer *qar_rsp,
-                                       int in_count, int *out_count)
+                                       unsigned int in_count, unsigned int *out_count)
 {
        struct ksmbd_file *fp;
        loff_t start, length;
@@ -7322,7 +7376,8 @@ static int fsctl_query_allocated_ranges(struct ksmbd_work *work, u64 id,
 }
 
 static int fsctl_pipe_transceive(struct ksmbd_work *work, u64 id,
-                                int out_buf_len, struct smb2_ioctl_req *req,
+                                unsigned int out_buf_len,
+                                struct smb2_ioctl_req *req,
                                 struct smb2_ioctl_rsp *rsp)
 {
        struct ksmbd_rpc_command *rpc_resp;
@@ -7436,8 +7491,7 @@ int smb2_ioctl(struct ksmbd_work *work)
 {
        struct smb2_ioctl_req *req;
        struct smb2_ioctl_rsp *rsp, *rsp_org;
-       int cnt_code, nbytes = 0;
-       int out_buf_len;
+       unsigned int cnt_code, nbytes = 0, out_buf_len, in_buf_len;
        u64 id = KSMBD_NO_FID;
        struct ksmbd_conn *conn = work->conn;
        int ret = 0;
@@ -7465,8 +7519,14 @@ int smb2_ioctl(struct ksmbd_work *work)
        }
 
        cnt_code = le32_to_cpu(req->CntCode);
-       out_buf_len = le32_to_cpu(req->MaxOutputResponse);
-       out_buf_len = min(KSMBD_IPC_MAX_PAYLOAD, out_buf_len);
+       ret = smb2_calc_max_out_buf_len(work, 48,
+                                       le32_to_cpu(req->MaxOutputResponse));
+       if (ret < 0) {
+               rsp->hdr.Status = STATUS_INVALID_PARAMETER;
+               goto out;
+       }
+       out_buf_len = (unsigned int)ret;
+       in_buf_len = le32_to_cpu(req->InputCount);
 
        switch (cnt_code) {
        case FSCTL_DFS_GET_REFERRALS:
@@ -7494,6 +7554,7 @@ int smb2_ioctl(struct ksmbd_work *work)
                break;
        }
        case FSCTL_PIPE_TRANSCEIVE:
+               out_buf_len = min_t(u32, KSMBD_IPC_MAX_PAYLOAD, out_buf_len);
                nbytes = fsctl_pipe_transceive(work, id, out_buf_len, req, rsp);
                break;
        case FSCTL_VALIDATE_NEGOTIATE_INFO:
@@ -7502,9 +7563,16 @@ int smb2_ioctl(struct ksmbd_work *work)
                        goto out;
                }
 
+               if (in_buf_len < sizeof(struct validate_negotiate_info_req))
+                       return -EINVAL;
+
+               if (out_buf_len < sizeof(struct validate_negotiate_info_rsp))
+                       return -EINVAL;
+
                ret = fsctl_validate_negotiate_info(conn,
                        (struct validate_negotiate_info_req *)&req->Buffer[0],
-                       (struct validate_negotiate_info_rsp *)&rsp->Buffer[0]);
+                       (struct validate_negotiate_info_rsp *)&rsp->Buffer[0],
+                       in_buf_len);
                if (ret < 0)
                        goto out;
 
@@ -7513,9 +7581,10 @@ int smb2_ioctl(struct ksmbd_work *work)
                rsp->VolatileFileId = cpu_to_le64(SMB2_NO_FID);
                break;
        case FSCTL_QUERY_NETWORK_INTERFACE_INFO:
-               nbytes = fsctl_query_iface_info_ioctl(conn, req, rsp);
-               if (nbytes < 0)
+               ret = fsctl_query_iface_info_ioctl(conn, rsp, out_buf_len);
+               if (ret < 0)
                        goto out;
+               nbytes = ret;
                break;
        case FSCTL_REQUEST_RESUME_KEY:
                if (out_buf_len < sizeof(struct resume_key_ioctl_rsp)) {
@@ -7540,15 +7609,33 @@ int smb2_ioctl(struct ksmbd_work *work)
                        goto out;
                }
 
+               if (in_buf_len < sizeof(struct copychunk_ioctl_req)) {
+                       ret = -EINVAL;
+                       goto out;
+               }
+
                if (out_buf_len < sizeof(struct copychunk_ioctl_rsp)) {
                        ret = -EINVAL;
                        goto out;
                }
 
                nbytes = sizeof(struct copychunk_ioctl_rsp);
-               fsctl_copychunk(work, req, rsp);
+               rsp->VolatileFileId = req->VolatileFileId;
+               rsp->PersistentFileId = req->PersistentFileId;
+               fsctl_copychunk(work,
+                               (struct copychunk_ioctl_req *)&req->Buffer[0],
+                               le32_to_cpu(req->CntCode),
+                               le32_to_cpu(req->InputCount),
+                               le64_to_cpu(req->VolatileFileId),
+                               le64_to_cpu(req->PersistentFileId),
+                               rsp);
                break;
        case FSCTL_SET_SPARSE:
+               if (in_buf_len < sizeof(struct file_sparse)) {
+                       ret = -EINVAL;
+                       goto out;
+               }
+
                ret = fsctl_set_sparse(work, id,
                                       (struct file_sparse *)&req->Buffer[0]);
                if (ret < 0)
@@ -7567,6 +7654,11 @@ int smb2_ioctl(struct ksmbd_work *work)
                        goto out;
                }
 
+               if (in_buf_len < sizeof(struct file_zero_data_information)) {
+                       ret = -EINVAL;
+                       goto out;
+               }
+
                zero_data =
                        (struct file_zero_data_information *)&req->Buffer[0];
 
@@ -7586,6 +7678,11 @@ int smb2_ioctl(struct ksmbd_work *work)
                break;
        }
        case FSCTL_QUERY_ALLOCATED_RANGES:
+               if (in_buf_len < sizeof(struct file_allocated_range_buffer)) {
+                       ret = -EINVAL;
+                       goto out;
+               }
+
                ret = fsctl_query_allocated_ranges(work, id,
                        (struct file_allocated_range_buffer *)&req->Buffer[0],
                        (struct file_allocated_range_buffer *)&rsp->Buffer[0],
@@ -7626,6 +7723,11 @@ int smb2_ioctl(struct ksmbd_work *work)
                struct duplicate_extents_to_file *dup_ext;
                loff_t src_off, dst_off, length, cloned;
 
+               if (in_buf_len < sizeof(struct duplicate_extents_to_file)) {
+                       ret = -EINVAL;
+                       goto out;
+               }
+
                dup_ext = (struct duplicate_extents_to_file *)&req->Buffer[0];
 
                fp_in = ksmbd_lookup_fd_slow(work, dup_ext->VolatileFileHandle,
@@ -7696,6 +7798,8 @@ out:
                rsp->hdr.Status = STATUS_OBJECT_NAME_NOT_FOUND;
        else if (ret == -EOPNOTSUPP)
                rsp->hdr.Status = STATUS_NOT_SUPPORTED;
+       else if (ret == -ENOSPC)
+               rsp->hdr.Status = STATUS_BUFFER_TOO_SMALL;
        else if (ret < 0 || rsp->hdr.Status == 0)
                rsp->hdr.Status = STATUS_INVALID_PARAMETER;
        smb2_set_err_rsp(work);
index a6dec5e..ff5a2f0 100644 (file)
 #define SMB21_DEFAULT_IOSIZE   (1024 * 1024)
 #define SMB3_DEFAULT_IOSIZE    (4 * 1024 * 1024)
 #define SMB3_DEFAULT_TRANS_SIZE        (1024 * 1024)
+#define SMB3_MIN_IOSIZE        (64 * 1024)
+#define SMB3_MAX_IOSIZE        (8 * 1024 * 1024)
 
 /*
  * SMB2 Header Definition
index 44aea33..1acf189 100644 (file)
@@ -601,7 +601,7 @@ int ksmbd_ipc_tree_disconnect_request(unsigned long long session_id,
        return ret;
 }
 
-int ksmbd_ipc_logout_request(const char *account)
+int ksmbd_ipc_logout_request(const char *account, int flags)
 {
        struct ksmbd_ipc_msg *msg;
        struct ksmbd_logout_request *req;
@@ -616,6 +616,7 @@ int ksmbd_ipc_logout_request(const char *account)
 
        msg->type = KSMBD_EVENT_LOGOUT_REQUEST;
        req = (struct ksmbd_logout_request *)msg->payload;
+       req->account_flags = flags;
        strscpy(req->account, account, KSMBD_REQ_MAX_ACCOUNT_NAME_SZ);
 
        ret = ipc_msg_send(msg);
index 9eacc89..5e5b90a 100644 (file)
@@ -25,7 +25,7 @@ ksmbd_ipc_tree_connect_request(struct ksmbd_session *sess,
                               struct sockaddr *peer_addr);
 int ksmbd_ipc_tree_disconnect_request(unsigned long long session_id,
                                      unsigned long long connect_id);
-int ksmbd_ipc_logout_request(const char *account);
+int ksmbd_ipc_logout_request(const char *account, int flags);
 struct ksmbd_share_config_response *
 ksmbd_ipc_share_config_request(const char *name);
 struct ksmbd_spnego_authen_response *
index 3a7fa23..a2fd5a4 100644 (file)
@@ -549,6 +549,10 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
 
        switch (recvmsg->type) {
        case SMB_DIRECT_MSG_NEGOTIATE_REQ:
+               if (wc->byte_len < sizeof(struct smb_direct_negotiate_req)) {
+                       put_empty_recvmsg(t, recvmsg);
+                       return;
+               }
                t->negotiation_requested = true;
                t->full_packet_received = true;
                wake_up_interruptible(&t->wait_status);
@@ -556,10 +560,23 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
        case SMB_DIRECT_MSG_DATA_TRANSFER: {
                struct smb_direct_data_transfer *data_transfer =
                        (struct smb_direct_data_transfer *)recvmsg->packet;
-               int data_length = le32_to_cpu(data_transfer->data_length);
+               unsigned int data_length;
                int avail_recvmsg_count, receive_credits;
 
+               if (wc->byte_len <
+                   offsetof(struct smb_direct_data_transfer, padding)) {
+                       put_empty_recvmsg(t, recvmsg);
+                       return;
+               }
+
+               data_length = le32_to_cpu(data_transfer->data_length);
                if (data_length) {
+                       if (wc->byte_len < sizeof(struct smb_direct_data_transfer) +
+                           (u64)data_length) {
+                               put_empty_recvmsg(t, recvmsg);
+                               return;
+                       }
+
                        if (t->full_packet_received)
                                recvmsg->first_segment = true;
 
@@ -568,7 +585,7 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
                        else
                                t->full_packet_received = true;
 
-                       enqueue_reassembly(t, recvmsg, data_length);
+                       enqueue_reassembly(t, recvmsg, (int)data_length);
                        wake_up_interruptible(&t->wait_reassembly_queue);
 
                        spin_lock(&t->receive_credit_lock);
index b419542..835b384 100644 (file)
@@ -1023,7 +1023,7 @@ int ksmbd_vfs_zero_data(struct ksmbd_work *work, struct ksmbd_file *fp,
 
 int ksmbd_vfs_fqar_lseek(struct ksmbd_file *fp, loff_t start, loff_t length,
                         struct file_allocated_range_buffer *ranges,
-                        int in_count, int *out_count)
+                        unsigned int in_count, unsigned int *out_count)
 {
        struct file *f = fp->filp;
        struct inode *inode = file_inode(fp->filp);
index 7b1dcaa..b0d5b8f 100644 (file)
@@ -166,7 +166,7 @@ int ksmbd_vfs_zero_data(struct ksmbd_work *work, struct ksmbd_file *fp,
 struct file_allocated_range_buffer;
 int ksmbd_vfs_fqar_lseek(struct ksmbd_file *fp, loff_t start, loff_t length,
                         struct file_allocated_range_buffer *ranges,
-                        int in_count, int *out_count);
+                        unsigned int in_count, unsigned int *out_count);
 int ksmbd_vfs_unlink(struct user_namespace *user_ns,
                     struct dentry *dir, struct dentry *dentry);
 void *ksmbd_vfs_init_kstat(char **p, struct ksmbd_kstat *ksmbd_kstat);
index f1cc825..5d9ae17 100644 (file)
@@ -7045,7 +7045,7 @@ void ocfs2_set_inode_data_inline(struct inode *inode, struct ocfs2_dinode *di)
 int ocfs2_convert_inline_data_to_extents(struct inode *inode,
                                         struct buffer_head *di_bh)
 {
-       int ret, i, has_data, num_pages = 0;
+       int ret, has_data, num_pages = 0;
        int need_free = 0;
        u32 bit_off, num;
        handle_t *handle;
@@ -7054,26 +7054,17 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
        struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
        struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
        struct ocfs2_alloc_context *data_ac = NULL;
-       struct page **pages = NULL;
-       loff_t end = osb->s_clustersize;
+       struct page *page = NULL;
        struct ocfs2_extent_tree et;
        int did_quota = 0;
 
        has_data = i_size_read(inode) ? 1 : 0;
 
        if (has_data) {
-               pages = kcalloc(ocfs2_pages_per_cluster(osb->sb),
-                               sizeof(struct page *), GFP_NOFS);
-               if (pages == NULL) {
-                       ret = -ENOMEM;
-                       mlog_errno(ret);
-                       return ret;
-               }
-
                ret = ocfs2_reserve_clusters(osb, 1, &data_ac);
                if (ret) {
                        mlog_errno(ret);
-                       goto free_pages;
+                       goto out;
                }
        }
 
@@ -7093,7 +7084,8 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
        }
 
        if (has_data) {
-               unsigned int page_end;
+               unsigned int page_end = min_t(unsigned, PAGE_SIZE,
+                                                       osb->s_clustersize);
                u64 phys;
 
                ret = dquot_alloc_space_nodirty(inode,
@@ -7117,15 +7109,8 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
                 */
                block = phys = ocfs2_clusters_to_blocks(inode->i_sb, bit_off);
 
-               /*
-                * Non sparse file systems zero on extend, so no need
-                * to do that now.
-                */
-               if (!ocfs2_sparse_alloc(osb) &&
-                   PAGE_SIZE < osb->s_clustersize)
-                       end = PAGE_SIZE;
-
-               ret = ocfs2_grab_eof_pages(inode, 0, end, pages, &num_pages);
+               ret = ocfs2_grab_eof_pages(inode, 0, page_end, &page,
+                                          &num_pages);
                if (ret) {
                        mlog_errno(ret);
                        need_free = 1;
@@ -7136,20 +7121,15 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
                 * This should populate the 1st page for us and mark
                 * it up to date.
                 */
-               ret = ocfs2_read_inline_data(inode, pages[0], di_bh);
+               ret = ocfs2_read_inline_data(inode, page, di_bh);
                if (ret) {
                        mlog_errno(ret);
                        need_free = 1;
                        goto out_unlock;
                }
 
-               page_end = PAGE_SIZE;
-               if (PAGE_SIZE > osb->s_clustersize)
-                       page_end = osb->s_clustersize;
-
-               for (i = 0; i < num_pages; i++)
-                       ocfs2_map_and_dirty_page(inode, handle, 0, page_end,
-                                                pages[i], i > 0, &phys);
+               ocfs2_map_and_dirty_page(inode, handle, 0, page_end, page, 0,
+                                        &phys);
        }
 
        spin_lock(&oi->ip_lock);
@@ -7180,8 +7160,8 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
        }
 
 out_unlock:
-       if (pages)
-               ocfs2_unlock_and_free_pages(pages, num_pages);
+       if (page)
+               ocfs2_unlock_and_free_pages(&page, num_pages);
 
 out_commit:
        if (ret < 0 && did_quota)
@@ -7205,8 +7185,6 @@ out_commit:
 out:
        if (data_ac)
                ocfs2_free_alloc_context(data_ac);
-free_pages:
-       kfree(pages);
        return ret;
 }
 
index c86bd4e..5c914ce 100644 (file)
@@ -2167,11 +2167,17 @@ static int ocfs2_initialize_super(struct super_block *sb,
        }
 
        if (ocfs2_clusterinfo_valid(osb)) {
+               /*
+                * ci_stack and ci_cluster in ocfs2_cluster_info may not be null
+                * terminated, so make sure no overflow happens here by using
+                * memcpy. Destination strings will always be null terminated
+                * because osb is allocated using kzalloc.
+                */
                osb->osb_stackflags =
                        OCFS2_RAW_SB(di)->s_cluster_info.ci_stackflags;
-               strlcpy(osb->osb_cluster_stack,
+               memcpy(osb->osb_cluster_stack,
                       OCFS2_RAW_SB(di)->s_cluster_info.ci_stack,
-                      OCFS2_STACK_LABEL_LEN + 1);
+                      OCFS2_STACK_LABEL_LEN);
                if (strlen(osb->osb_cluster_stack) != OCFS2_STACK_LABEL_LEN) {
                        mlog(ML_ERROR,
                             "couldn't mount because of an invalid "
@@ -2180,9 +2186,9 @@ static int ocfs2_initialize_super(struct super_block *sb,
                        status = -EINVAL;
                        goto bail;
                }
-               strlcpy(osb->osb_cluster_name,
+               memcpy(osb->osb_cluster_name,
                        OCFS2_RAW_SB(di)->s_cluster_info.ci_cluster,
-                       OCFS2_CLUSTER_NAME_LEN + 1);
+                       OCFS2_CLUSTER_NAME_LEN);
        } else {
                /* The empty string is identical with classic tools that
                 * don't know about s_cluster_info. */
index 003f0d3..22bf14a 100644 (file)
@@ -1827,9 +1827,15 @@ static int userfaultfd_writeprotect(struct userfaultfd_ctx *ctx,
        if (mode_wp && mode_dontwake)
                return -EINVAL;
 
-       ret = mwriteprotect_range(ctx->mm, uffdio_wp.range.start,
-                                 uffdio_wp.range.len, mode_wp,
-                                 &ctx->mmap_changing);
+       if (mmget_not_zero(ctx->mm)) {
+               ret = mwriteprotect_range(ctx->mm, uffdio_wp.range.start,
+                                         uffdio_wp.range.len, mode_wp,
+                                         &ctx->mmap_changing);
+               mmput(ctx->mm);
+       } else {
+               return -ESRCH;
+       }
+
        if (ret)
                return ret;
 
index fb172a0..20ecb00 100644 (file)
@@ -22,9 +22,14 @@ typedef __builtin_va_list va_list;
 #define va_arg(v, l)            __builtin_va_arg(v, l)
 #define va_copy(d, s)           __builtin_va_copy(d, s)
 #else
+#ifdef __KERNEL__
 #include <linux/stdarg.h>
-#endif
-#endif
+#else
+/* Used to build acpi tools */
+#include <stdarg.h>
+#endif /* __KERNEL__ */
+#endif /* ACPI_USE_BUILTIN_STDARG */
+#endif /* ! va_arg */
 
 #define ACPI_INLINE             __inline__
 
index 832d8a7..9919110 100644 (file)
@@ -72,6 +72,8 @@ enum cpuhp_state {
        CPUHP_SLUB_DEAD,
        CPUHP_DEBUG_OBJ_DEAD,
        CPUHP_MM_WRITEBACK_DEAD,
+       /* Must be after CPUHP_MM_VMSTAT_DEAD */
+       CPUHP_MM_DEMOTION_DEAD,
        CPUHP_MM_VMSTAT_DEAD,
        CPUHP_SOFTIRQ_DEAD,
        CPUHP_NET_MVNETA_DEAD,
@@ -240,6 +242,8 @@ enum cpuhp_state {
        CPUHP_AP_BASE_CACHEINFO_ONLINE,
        CPUHP_AP_ONLINE_DYN,
        CPUHP_AP_ONLINE_DYN_END         = CPUHP_AP_ONLINE_DYN + 30,
+       /* Must be after CPUHP_AP_ONLINE_DYN for node_states[N_CPU] update */
+       CPUHP_AP_MM_DEMOTION_ONLINE,
        CPUHP_AP_X86_HPET_ONLINE,
        CPUHP_AP_X86_KVM_CLK_ONLINE,
        CPUHP_AP_DTPM_CPU_ONLINE,
index 2aaa157..957ebec 100644 (file)
@@ -109,7 +109,7 @@ static inline int elf_core_copy_task_fpregs(struct task_struct *t, struct pt_reg
 #endif
 }
 
-#if defined(CONFIG_UM) || defined(CONFIG_IA64)
+#if (defined(CONFIG_UML) && defined(CONFIG_X86_32)) || defined(CONFIG_IA64)
 /*
  * These functions parameterize elf_core_dump in fs/binfmt_elf.c to write out
  * extra segments containing the gate DSO contents.  Dumping its
index 7efc0a7..182c606 100644 (file)
@@ -160,7 +160,10 @@ int walk_dynamic_memory_groups(int nid, walk_memory_groups_func_t func,
 #define register_hotmemory_notifier(nb)                register_memory_notifier(nb)
 #define unregister_hotmemory_notifier(nb)      unregister_memory_notifier(nb)
 #else
-#define hotplug_memory_notifier(fn, pri)       ({ 0; })
+static inline int hotplug_memory_notifier(notifier_fn_t fn, int pri)
+{
+       return 0;
+}
 /* These aren't inline functions due to a GCC bug. */
 #define register_hotmemory_notifier(nb)    ({ (void)(nb); 0; })
 #define unregister_hotmemory_notifier(nb)  ({ (void)(nb); })
index e234174..f17d210 100644 (file)
@@ -1138,7 +1138,6 @@ int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev);
 int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev);
 bool mlx5_lag_is_roce(struct mlx5_core_dev *dev);
 bool mlx5_lag_is_sriov(struct mlx5_core_dev *dev);
-bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev);
 bool mlx5_lag_is_active(struct mlx5_core_dev *dev);
 bool mlx5_lag_is_master(struct mlx5_core_dev *dev);
 bool mlx5_lag_is_shared_fdb(struct mlx5_core_dev *dev);
index 21c3771..988528b 100644 (file)
@@ -23,7 +23,7 @@ static inline bool page_is_secretmem(struct page *page)
        mapping = (struct address_space *)
                ((unsigned long)page->mapping & ~PAGE_MAPPING_FLAGS);
 
-       if (mapping != page->mapping)
+       if (!mapping || mapping != page->mapping)
                return false;
 
        return mapping->a_ops == &secretmem_aops;
index a9f9c57..fe95f09 100644 (file)
  *  When function tracing occurs, the following steps are made:
  *   If arch does not support a ftrace feature:
  *    call internal function (uses INTERNAL bits) which calls...
- *   If callback is registered to the "global" list, the list
- *    function is called and recursion checks the GLOBAL bits.
- *    then this function calls...
  *   The function callback, which can use the FTRACE bits to
  *    check for recursion.
- *
- * Now if the arch does not support a feature, and it calls
- * the global list function which calls the ftrace callback
- * all three of these steps will do a recursion protection.
- * There's no reason to do one if the previous caller already
- * did. The recursion that we are protecting against will
- * go through the same steps again.
- *
- * To prevent the multiple recursion checks, if a recursion
- * bit is set that is higher than the MAX bit of the current
- * check, then we know that the check was made by the previous
- * caller, and we can skip the current check.
  */
 enum {
        /* Function recursion bits */
@@ -40,12 +25,14 @@ enum {
        TRACE_FTRACE_NMI_BIT,
        TRACE_FTRACE_IRQ_BIT,
        TRACE_FTRACE_SIRQ_BIT,
+       TRACE_FTRACE_TRANSITION_BIT,
 
-       /* INTERNAL_BITs must be greater than FTRACE_BITs */
+       /* Internal use recursion bits */
        TRACE_INTERNAL_BIT,
        TRACE_INTERNAL_NMI_BIT,
        TRACE_INTERNAL_IRQ_BIT,
        TRACE_INTERNAL_SIRQ_BIT,
+       TRACE_INTERNAL_TRANSITION_BIT,
 
        TRACE_BRANCH_BIT,
 /*
@@ -86,12 +73,6 @@ enum {
         */
        TRACE_GRAPH_NOTRACE_BIT,
 
-       /*
-        * When transitioning between context, the preempt_count() may
-        * not be correct. Allow for a single recursion to cover this case.
-        */
-       TRACE_TRANSITION_BIT,
-
        /* Used to prevent recursion recording from recursing. */
        TRACE_RECORD_RECURSION_BIT,
 };
@@ -113,12 +94,10 @@ enum {
 #define TRACE_CONTEXT_BITS     4
 
 #define TRACE_FTRACE_START     TRACE_FTRACE_BIT
-#define TRACE_FTRACE_MAX       ((1 << (TRACE_FTRACE_START + TRACE_CONTEXT_BITS)) - 1)
 
 #define TRACE_LIST_START       TRACE_INTERNAL_BIT
-#define TRACE_LIST_MAX         ((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1)
 
-#define TRACE_CONTEXT_MASK     TRACE_LIST_MAX
+#define TRACE_CONTEXT_MASK     ((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1)
 
 /*
  * Used for setting context
@@ -132,6 +111,7 @@ enum {
        TRACE_CTX_IRQ,
        TRACE_CTX_SOFTIRQ,
        TRACE_CTX_NORMAL,
+       TRACE_CTX_TRANSITION,
 };
 
 static __always_inline int trace_get_context_bit(void)
@@ -160,45 +140,34 @@ extern void ftrace_record_recursion(unsigned long ip, unsigned long parent_ip);
 #endif
 
 static __always_inline int trace_test_and_set_recursion(unsigned long ip, unsigned long pip,
-                                                       int start, int max)
+                                                       int start)
 {
        unsigned int val = READ_ONCE(current->trace_recursion);
        int bit;
 
-       /* A previous recursion check was made */
-       if ((val & TRACE_CONTEXT_MASK) > max)
-               return 0;
-
        bit = trace_get_context_bit() + start;
        if (unlikely(val & (1 << bit))) {
                /*
                 * It could be that preempt_count has not been updated during
                 * a switch between contexts. Allow for a single recursion.
                 */
-               bit = TRACE_TRANSITION_BIT;
+               bit = TRACE_CTX_TRANSITION + start;
                if (val & (1 << bit)) {
                        do_ftrace_record_recursion(ip, pip);
                        return -1;
                }
-       } else {
-               /* Normal check passed, clear the transition to allow it again */
-               val &= ~(1 << TRACE_TRANSITION_BIT);
        }
 
        val |= 1 << bit;
        current->trace_recursion = val;
        barrier();
 
-       return bit + 1;
+       return bit;
 }
 
 static __always_inline void trace_clear_recursion(int bit)
 {
-       if (!bit)
-               return;
-
        barrier();
-       bit--;
        trace_recursion_clear(bit);
 }
 
@@ -214,7 +183,7 @@ static __always_inline void trace_clear_recursion(int bit)
 static __always_inline int ftrace_test_recursion_trylock(unsigned long ip,
                                                         unsigned long parent_ip)
 {
-       return trace_test_and_set_recursion(ip, parent_ip, TRACE_FTRACE_START, TRACE_FTRACE_MAX);
+       return trace_test_and_set_recursion(ip, parent_ip, TRACE_FTRACE_START);
 }
 
 /**
index eb70cab..33a4240 100644 (file)
@@ -127,6 +127,8 @@ static inline long get_ucounts_value(struct ucounts *ucounts, enum ucount_type t
 
 long inc_rlimit_ucounts(struct ucounts *ucounts, enum ucount_type type, long v);
 bool dec_rlimit_ucounts(struct ucounts *ucounts, enum ucount_type type, long v);
+long inc_rlimit_get_ucounts(struct ucounts *ucounts, enum ucount_type type);
+void dec_rlimit_put_ucounts(struct ucounts *ucounts, enum ucount_type type);
 bool is_ucounts_overlimit(struct ucounts *ucounts, enum ucount_type type, unsigned long max);
 
 static inline void set_rlimit_ucount_max(struct user_namespace *ns,
index a824d47..ffd2c23 100644 (file)
@@ -54,7 +54,7 @@ struct mctp_sock {
        struct sock     sk;
 
        /* bind() params */
-       int             bind_net;
+       unsigned int    bind_net;
        mctp_eid_t      bind_addr;
        __u8            bind_type;
 
index 2eb6d7c..f37c7a5 100644 (file)
@@ -384,11 +384,11 @@ sctp_vtag_verify(const struct sctp_chunk *chunk,
         * Verification Tag value does not match the receiver's own
         * tag value, the receiver shall silently discard the packet...
         */
-        if (ntohl(chunk->sctp_hdr->vtag) == asoc->c.my_vtag)
-                return 1;
+       if (ntohl(chunk->sctp_hdr->vtag) != asoc->c.my_vtag)
+               return 0;
 
        chunk->transport->encap_port = SCTP_INPUT_CB(chunk->skb)->encap_port;
-       return 0;
+       return 1;
 }
 
 /* Check VTAG of the packet matches the sender's own tag and the T bit is
index 3166dc1..60c3845 100644 (file)
@@ -1576,6 +1576,7 @@ struct tcp_md5sig_key {
        u8                      keylen;
        u8                      family; /* AF_INET or AF_INET6 */
        u8                      prefixlen;
+       u8                      flags;
        union tcp_md5_addr      addr;
        int                     l3index; /* set if key added with L3 scope */
        u8                      key[TCP_MD5SIG_MAXKEYLEN];
@@ -1621,10 +1622,10 @@ struct tcp_md5sig_pool {
 int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
                        const struct sock *sk, const struct sk_buff *skb);
 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
-                  int family, u8 prefixlen, int l3index,
+                  int family, u8 prefixlen, int l3index, u8 flags,
                   const u8 *newkey, u8 newkeylen, gfp_t gfp);
 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr,
-                  int family, u8 prefixlen, int l3index);
+                  int family, u8 prefixlen, int l3index, u8 flags);
 struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
                                         const struct sock *addr_sk);
 
index 52b54d1..6acd4cc 100644 (file)
@@ -10,6 +10,7 @@
 #define __UAPI_MCTP_H
 
 #include <linux/types.h>
+#include <linux/socket.h>
 
 typedef __u8                   mctp_eid_t;
 
@@ -18,11 +19,13 @@ struct mctp_addr {
 };
 
 struct sockaddr_mctp {
-       unsigned short int      smctp_family;
-       int                     smctp_network;
+       __kernel_sa_family_t    smctp_family;
+       __u16                   __smctp_pad0;
+       unsigned int            smctp_network;
        struct mctp_addr        smctp_addr;
        __u8                    smctp_type;
        __u8                    smctp_tag;
+       __u8                    __smctp_pad1;
 };
 
 #define MCTP_NET_ANY           0x0
index 8dd73a6..b1cb1db 100644 (file)
@@ -657,7 +657,7 @@ static int audit_filter_rules(struct task_struct *tsk,
                        result = audit_comparator(audit_loginuid_set(tsk), f->op, f->val);
                        break;
                case AUDIT_SADDR_FAM:
-                       if (ctx->sockaddr)
+                       if (ctx && ctx->sockaddr)
                                result = audit_comparator(ctx->sockaddr->ss_family,
                                                          f->op, f->val);
                        break;
index f784e08..1ae0b49 100644 (file)
@@ -225,8 +225,6 @@ struct cred *cred_alloc_blank(void)
 #ifdef CONFIG_DEBUG_CREDENTIALS
        new->magic = CRED_MAGIC;
 #endif
-       new->ucounts = get_ucounts(&init_ucounts);
-
        if (security_cred_alloc_blank(new, GFP_KERNEL_ACCOUNT) < 0)
                goto error;
 
@@ -501,7 +499,7 @@ int commit_creds(struct cred *new)
                inc_rlimit_ucounts(new->ucounts, UCOUNT_RLIMIT_NPROC, 1);
        rcu_assign_pointer(task->real_cred, new);
        rcu_assign_pointer(task->cred, new);
-       if (new->user != old->user)
+       if (new->user != old->user || new->user_ns != old->user_ns)
                dec_rlimit_ucounts(old->ucounts, UCOUNT_RLIMIT_NPROC, 1);
        alter_cred_subscribers(old, -2);
 
@@ -669,7 +667,7 @@ int set_cred_ucounts(struct cred *new)
 {
        struct task_struct *task = current;
        const struct cred *old = task->real_cred;
-       struct ucounts *old_ucounts = new->ucounts;
+       struct ucounts *new_ucounts, *old_ucounts = new->ucounts;
 
        if (new->user == old->user && new->user_ns == old->user_ns)
                return 0;
@@ -681,9 +679,10 @@ int set_cred_ucounts(struct cred *new)
        if (old_ucounts && old_ucounts->ns == new->user_ns && uid_eq(old_ucounts->uid, new->euid))
                return 0;
 
-       if (!(new->ucounts = alloc_ucounts(new->user_ns, new->euid)))
+       if (!(new_ucounts = alloc_ucounts(new->user_ns, new->euid)))
                return -EAGAIN;
 
+       new->ucounts = new_ucounts;
        if (old_ucounts)
                put_ucounts(old_ucounts);
 
index 95445bd..7a14ca2 100644 (file)
@@ -552,7 +552,7 @@ static void active_cacheline_remove(struct dma_debug_entry *entry)
  * Wrapper function for adding an entry to the hash.
  * This function takes care of locking itself.
  */
-static void add_dma_entry(struct dma_debug_entry *entry)
+static void add_dma_entry(struct dma_debug_entry *entry, unsigned long attrs)
 {
        struct hash_bucket *bucket;
        unsigned long flags;
@@ -566,7 +566,7 @@ static void add_dma_entry(struct dma_debug_entry *entry)
        if (rc == -ENOMEM) {
                pr_err("cacheline tracking ENOMEM, dma-debug disabled\n");
                global_disable = true;
-       } else if (rc == -EEXIST) {
+       } else if (rc == -EEXIST && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
                err_printk(entry->dev, entry,
                        "cacheline tracking EEXIST, overlapping mappings aren't supported\n");
        }
@@ -1191,7 +1191,8 @@ void debug_dma_map_single(struct device *dev, const void *addr,
 EXPORT_SYMBOL(debug_dma_map_single);
 
 void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
-                       size_t size, int direction, dma_addr_t dma_addr)
+                       size_t size, int direction, dma_addr_t dma_addr,
+                       unsigned long attrs)
 {
        struct dma_debug_entry *entry;
 
@@ -1222,7 +1223,7 @@ void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
                check_for_illegal_area(dev, addr, size);
        }
 
-       add_dma_entry(entry);
+       add_dma_entry(entry, attrs);
 }
 
 void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
@@ -1280,7 +1281,8 @@ void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
 }
 
 void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
-                     int nents, int mapped_ents, int direction)
+                     int nents, int mapped_ents, int direction,
+                     unsigned long attrs)
 {
        struct dma_debug_entry *entry;
        struct scatterlist *s;
@@ -1289,6 +1291,12 @@ void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
        if (unlikely(dma_debug_disabled()))
                return;
 
+       for_each_sg(sg, s, nents, i) {
+               check_for_stack(dev, sg_page(s), s->offset);
+               if (!PageHighMem(sg_page(s)))
+                       check_for_illegal_area(dev, sg_virt(s), s->length);
+       }
+
        for_each_sg(sg, s, mapped_ents, i) {
                entry = dma_entry_alloc();
                if (!entry)
@@ -1304,15 +1312,9 @@ void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
                entry->sg_call_ents   = nents;
                entry->sg_mapped_ents = mapped_ents;
 
-               check_for_stack(dev, sg_page(s), s->offset);
-
-               if (!PageHighMem(sg_page(s))) {
-                       check_for_illegal_area(dev, sg_virt(s), sg_dma_len(s));
-               }
-
                check_sg_segment(dev, s);
 
-               add_dma_entry(entry);
+               add_dma_entry(entry, attrs);
        }
 }
 
@@ -1368,7 +1370,8 @@ void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
 }
 
 void debug_dma_alloc_coherent(struct device *dev, size_t size,
-                             dma_addr_t dma_addr, void *virt)
+                             dma_addr_t dma_addr, void *virt,
+                             unsigned long attrs)
 {
        struct dma_debug_entry *entry;
 
@@ -1398,7 +1401,7 @@ void debug_dma_alloc_coherent(struct device *dev, size_t size,
        else
                entry->pfn = page_to_pfn(virt_to_page(virt));
 
-       add_dma_entry(entry);
+       add_dma_entry(entry, attrs);
 }
 
 void debug_dma_free_coherent(struct device *dev, size_t size,
@@ -1429,7 +1432,8 @@ void debug_dma_free_coherent(struct device *dev, size_t size,
 }
 
 void debug_dma_map_resource(struct device *dev, phys_addr_t addr, size_t size,
-                           int direction, dma_addr_t dma_addr)
+                           int direction, dma_addr_t dma_addr,
+                           unsigned long attrs)
 {
        struct dma_debug_entry *entry;
 
@@ -1449,7 +1453,7 @@ void debug_dma_map_resource(struct device *dev, phys_addr_t addr, size_t size,
        entry->direction        = direction;
        entry->map_err_type     = MAP_ERR_NOT_CHECKED;
 
-       add_dma_entry(entry);
+       add_dma_entry(entry, attrs);
 }
 
 void debug_dma_unmap_resource(struct device *dev, dma_addr_t dma_addr,
index 83643b3..f525197 100644 (file)
 #ifdef CONFIG_DMA_API_DEBUG
 extern void debug_dma_map_page(struct device *dev, struct page *page,
                               size_t offset, size_t size,
-                              int direction, dma_addr_t dma_addr);
+                              int direction, dma_addr_t dma_addr,
+                              unsigned long attrs);
 
 extern void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
                                 size_t size, int direction);
 
 extern void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
-                            int nents, int mapped_ents, int direction);
+                            int nents, int mapped_ents, int direction,
+                            unsigned long attrs);
 
 extern void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
                               int nelems, int dir);
 
 extern void debug_dma_alloc_coherent(struct device *dev, size_t size,
-                                    dma_addr_t dma_addr, void *virt);
+                                    dma_addr_t dma_addr, void *virt,
+                                    unsigned long attrs);
 
 extern void debug_dma_free_coherent(struct device *dev, size_t size,
                                    void *virt, dma_addr_t addr);
 
 extern void debug_dma_map_resource(struct device *dev, phys_addr_t addr,
                                   size_t size, int direction,
-                                  dma_addr_t dma_addr);
+                                  dma_addr_t dma_addr,
+                                  unsigned long attrs);
 
 extern void debug_dma_unmap_resource(struct device *dev, dma_addr_t dma_addr,
                                     size_t size, int direction);
@@ -53,7 +57,8 @@ extern void debug_dma_sync_sg_for_device(struct device *dev,
 #else /* CONFIG_DMA_API_DEBUG */
 static inline void debug_dma_map_page(struct device *dev, struct page *page,
                                      size_t offset, size_t size,
-                                     int direction, dma_addr_t dma_addr)
+                                     int direction, dma_addr_t dma_addr,
+                                     unsigned long attrs)
 {
 }
 
@@ -63,7 +68,8 @@ static inline void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
 }
 
 static inline void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
-                                   int nents, int mapped_ents, int direction)
+                                   int nents, int mapped_ents, int direction,
+                                   unsigned long attrs)
 {
 }
 
@@ -74,7 +80,8 @@ static inline void debug_dma_unmap_sg(struct device *dev,
 }
 
 static inline void debug_dma_alloc_coherent(struct device *dev, size_t size,
-                                           dma_addr_t dma_addr, void *virt)
+                                           dma_addr_t dma_addr, void *virt,
+                                           unsigned long attrs)
 {
 }
 
@@ -85,7 +92,8 @@ static inline void debug_dma_free_coherent(struct device *dev, size_t size,
 
 static inline void debug_dma_map_resource(struct device *dev, phys_addr_t addr,
                                          size_t size, int direction,
-                                         dma_addr_t dma_addr)
+                                         dma_addr_t dma_addr,
+                                         unsigned long attrs)
 {
 }
 
index 06fec55..8349a9f 100644 (file)
@@ -156,7 +156,7 @@ dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page,
                addr = dma_direct_map_page(dev, page, offset, size, dir, attrs);
        else
                addr = ops->map_page(dev, page, offset, size, dir, attrs);
-       debug_dma_map_page(dev, page, offset, size, dir, addr);
+       debug_dma_map_page(dev, page, offset, size, dir, addr, attrs);
 
        return addr;
 }
@@ -195,7 +195,7 @@ static int __dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
                ents = ops->map_sg(dev, sg, nents, dir, attrs);
 
        if (ents > 0)
-               debug_dma_map_sg(dev, sg, nents, ents, dir);
+               debug_dma_map_sg(dev, sg, nents, ents, dir, attrs);
        else if (WARN_ON_ONCE(ents != -EINVAL && ents != -ENOMEM &&
                              ents != -EIO))
                return -EIO;
@@ -249,12 +249,12 @@ EXPORT_SYMBOL(dma_map_sg_attrs);
  * Returns 0 on success or a negative error code on error. The following
  * error codes are supported with the given meaning:
  *
- *   -EINVAL - An invalid argument, unaligned access or other error
- *            in usage. Will not succeed if retried.
- *   -ENOMEM - Insufficient resources (like memory or IOVA space) to
- *            complete the mapping. Should succeed if retried later.
- *   -EIO    - Legacy error code with an unknown meaning. eg. this is
- *            returned if a lower level call returned DMA_MAPPING_ERROR.
+ *   -EINVAL   An invalid argument, unaligned access or other error
+ *             in usage. Will not succeed if retried.
+ *   -ENOMEM   Insufficient resources (like memory or IOVA space) to
+ *             complete the mapping. Should succeed if retried later.
+ *   -EIO      Legacy error code with an unknown meaning. eg. this is
+ *             returned if a lower level call returned DMA_MAPPING_ERROR.
  */
 int dma_map_sgtable(struct device *dev, struct sg_table *sgt,
                    enum dma_data_direction dir, unsigned long attrs)
@@ -305,7 +305,7 @@ dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr,
        else if (ops->map_resource)
                addr = ops->map_resource(dev, phys_addr, size, dir, attrs);
 
-       debug_dma_map_resource(dev, phys_addr, size, dir, addr);
+       debug_dma_map_resource(dev, phys_addr, size, dir, addr, attrs);
        return addr;
 }
 EXPORT_SYMBOL(dma_map_resource);
@@ -510,7 +510,7 @@ void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
        else
                return NULL;
 
-       debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
+       debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr, attrs);
        return cpu_addr;
 }
 EXPORT_SYMBOL(dma_alloc_attrs);
@@ -566,7 +566,7 @@ struct page *dma_alloc_pages(struct device *dev, size_t size,
        struct page *page = __dma_alloc_pages(dev, size, dma_handle, dir, gfp);
 
        if (page)
-               debug_dma_map_page(dev, page, 0, size, dir, *dma_handle);
+               debug_dma_map_page(dev, page, 0, size, dir, *dma_handle, 0);
        return page;
 }
 EXPORT_SYMBOL_GPL(dma_alloc_pages);
@@ -644,7 +644,7 @@ struct sg_table *dma_alloc_noncontiguous(struct device *dev, size_t size,
 
        if (sgt) {
                sgt->nents = 1;
-               debug_dma_map_sg(dev, sgt->sgl, sgt->orig_nents, 1, dir);
+               debug_dma_map_sg(dev, sgt->sgl, sgt->orig_nents, 1, dir, attrs);
        }
        return sgt;
 }
index 952741f..487bf4f 100644 (file)
@@ -426,22 +426,10 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t gfp_flags,
         */
        rcu_read_lock();
        ucounts = task_ucounts(t);
-       sigpending = inc_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING, 1);
-       switch (sigpending) {
-       case 1:
-               if (likely(get_ucounts(ucounts)))
-                       break;
-               fallthrough;
-       case LONG_MAX:
-               /*
-                * we need to decrease the ucount in the userns tree on any
-                * failure to avoid counts leaking.
-                */
-               dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING, 1);
-               rcu_read_unlock();
-               return NULL;
-       }
+       sigpending = inc_rlimit_get_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING);
        rcu_read_unlock();
+       if (!sigpending)
+               return NULL;
 
        if (override_rlimit || likely(sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) {
                q = kmem_cache_alloc(sigqueue_cachep, gfp_flags);
@@ -450,8 +438,7 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t gfp_flags,
        }
 
        if (unlikely(q == NULL)) {
-               if (dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING, 1))
-                       put_ucounts(ucounts);
+               dec_rlimit_put_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING);
        } else {
                INIT_LIST_HEAD(&q->list);
                q->flags = sigqueue_flags;
@@ -464,8 +451,8 @@ static void __sigqueue_free(struct sigqueue *q)
 {
        if (q->flags & SIGQUEUE_PREALLOC)
                return;
-       if (q->ucounts && dec_rlimit_ucounts(q->ucounts, UCOUNT_RLIMIT_SIGPENDING, 1)) {
-               put_ucounts(q->ucounts);
+       if (q->ucounts) {
+               dec_rlimit_put_ucounts(q->ucounts, UCOUNT_RLIMIT_SIGPENDING);
                q->ucounts = NULL;
        }
        kmem_cache_free(sigqueue_cachep, q);
index 7efbc8a..635fbdc 100644 (file)
@@ -6977,7 +6977,7 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
        struct ftrace_ops *op;
        int bit;
 
-       bit = trace_test_and_set_recursion(ip, parent_ip, TRACE_LIST_START, TRACE_LIST_MAX);
+       bit = trace_test_and_set_recursion(ip, parent_ip, TRACE_LIST_START);
        if (bit < 0)
                return;
 
@@ -7052,7 +7052,7 @@ static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip,
 {
        int bit;
 
-       bit = trace_test_and_set_recursion(ip, parent_ip, TRACE_LIST_START, TRACE_LIST_MAX);
+       bit = trace_test_and_set_recursion(ip, parent_ip, TRACE_LIST_START);
        if (bit < 0)
                return;
 
index bb51849..eb03f3c 100644 (file)
@@ -284,6 +284,55 @@ bool dec_rlimit_ucounts(struct ucounts *ucounts, enum ucount_type type, long v)
        return (new == 0);
 }
 
+static void do_dec_rlimit_put_ucounts(struct ucounts *ucounts,
+                               struct ucounts *last, enum ucount_type type)
+{
+       struct ucounts *iter, *next;
+       for (iter = ucounts; iter != last; iter = next) {
+               long dec = atomic_long_add_return(-1, &iter->ucount[type]);
+               WARN_ON_ONCE(dec < 0);
+               next = iter->ns->ucounts;
+               if (dec == 0)
+                       put_ucounts(iter);
+       }
+}
+
+void dec_rlimit_put_ucounts(struct ucounts *ucounts, enum ucount_type type)
+{
+       do_dec_rlimit_put_ucounts(ucounts, NULL, type);
+}
+
+long inc_rlimit_get_ucounts(struct ucounts *ucounts, enum ucount_type type)
+{
+       /* Caller must hold a reference to ucounts */
+       struct ucounts *iter;
+       long dec, ret = 0;
+
+       for (iter = ucounts; iter; iter = iter->ns->ucounts) {
+               long max = READ_ONCE(iter->ns->ucount_max[type]);
+               long new = atomic_long_add_return(1, &iter->ucount[type]);
+               if (new < 0 || new > max)
+                       goto unwind;
+               if (iter == ucounts)
+                       ret = new;
+               /*
+                * Grab an extra ucount reference for the caller when
+                * the rlimit count was previously 0.
+                */
+               if (new != 1)
+                       continue;
+               if (!get_ucounts(iter))
+                       goto dec_unwind;
+       }
+       return ret;
+dec_unwind:
+       dec = atomic_long_add_return(-1, &iter->ucount[type]);
+       WARN_ON_ONCE(dec < 0);
+unwind:
+       do_dec_rlimit_put_ucounts(ucounts, iter, type);
+       return 0;
+}
+
 bool is_ucounts_overlimit(struct ucounts *ucounts, enum ucount_type type, unsigned long max)
 {
        struct ucounts *iter;
index 5e9ef0f..92192cb 100644 (file)
@@ -2700,12 +2700,14 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
                if (mapping) {
                        int nr = thp_nr_pages(head);
 
-                       if (PageSwapBacked(head))
+                       if (PageSwapBacked(head)) {
                                __mod_lruvec_page_state(head, NR_SHMEM_THPS,
                                                        -nr);
-                       else
+                       } else {
                                __mod_lruvec_page_state(head, NR_FILE_THPS,
                                                        -nr);
+                               filemap_nr_thps_dec(mapping);
+                       }
                }
 
                __split_huge_page(page, list, end);
index 5c3503c..5096500 100644 (file)
@@ -932,16 +932,14 @@ int __init_memblock memblock_mark_mirror(phys_addr_t base, phys_addr_t size)
  * covered by the memory map. The struct page representing NOMAP memory
  * frames in the memory map will be PageReserved()
  *
+ * Note: if the memory being marked %MEMBLOCK_NOMAP was allocated from
+ * memblock, the caller must inform kmemleak to ignore that memory
+ *
  * Return: 0 on success, -errno on failure.
  */
 int __init_memblock memblock_mark_nomap(phys_addr_t base, phys_addr_t size)
 {
-       int ret = memblock_setclr_flag(base, size, 1, MEMBLOCK_NOMAP);
-
-       if (!ret)
-               kmemleak_free_part_phys(base, size);
-
-       return ret;
+       return memblock_setclr_flag(base, size, 1, MEMBLOCK_NOMAP);
 }
 
 /**
@@ -1692,7 +1690,7 @@ void __init memblock_cap_memory_range(phys_addr_t base, phys_addr_t size)
        if (!size)
                return;
 
-       if (memblock.memory.cnt <= 1) {
+       if (!memblock_memory->total_size) {
                pr_warn("%s: No memory registered yet\n", __func__);
                return;
        }
index 1592b08..d12e060 100644 (file)
@@ -856,16 +856,6 @@ static long do_set_mempolicy(unsigned short mode, unsigned short flags,
                goto out;
        }
 
-       if (flags & MPOL_F_NUMA_BALANCING) {
-               if (new && new->mode == MPOL_BIND) {
-                       new->flags |= (MPOL_F_MOF | MPOL_F_MORON);
-               } else {
-                       ret = -EINVAL;
-                       mpol_put(new);
-                       goto out;
-               }
-       }
-
        ret = mpol_set_nodemask(new, nodes, scratch);
        if (ret) {
                mpol_put(new);
@@ -1458,7 +1448,11 @@ static inline int sanitize_mpol_flags(int *mode, unsigned short *flags)
                return -EINVAL;
        if ((*flags & MPOL_F_STATIC_NODES) && (*flags & MPOL_F_RELATIVE_NODES))
                return -EINVAL;
-
+       if (*flags & MPOL_F_NUMA_BALANCING) {
+               if (*mode != MPOL_BIND)
+                       return -EINVAL;
+               *flags |= (MPOL_F_MOF | MPOL_F_MORON);
+       }
        return 0;
 }
 
index a6a7743..1852d78 100644 (file)
@@ -3066,7 +3066,7 @@ void migrate_vma_finalize(struct migrate_vma *migrate)
 EXPORT_SYMBOL(migrate_vma_finalize);
 #endif /* CONFIG_DEVICE_PRIVATE */
 
-#if defined(CONFIG_MEMORY_HOTPLUG)
+#if defined(CONFIG_HOTPLUG_CPU)
 /* Disable reclaim-based migration. */
 static void __disable_all_migrate_targets(void)
 {
@@ -3209,25 +3209,6 @@ static void set_migration_target_nodes(void)
 }
 
 /*
- * React to hotplug events that might affect the migration targets
- * like events that online or offline NUMA nodes.
- *
- * The ordering is also currently dependent on which nodes have
- * CPUs.  That means we need CPU on/offline notification too.
- */
-static int migration_online_cpu(unsigned int cpu)
-{
-       set_migration_target_nodes();
-       return 0;
-}
-
-static int migration_offline_cpu(unsigned int cpu)
-{
-       set_migration_target_nodes();
-       return 0;
-}
-
-/*
  * This leaves migrate-on-reclaim transiently disabled between
  * the MEM_GOING_OFFLINE and MEM_OFFLINE events.  This runs
  * whether reclaim-based migration is enabled or not, which
@@ -3239,8 +3220,18 @@ static int migration_offline_cpu(unsigned int cpu)
  * set_migration_target_nodes().
  */
 static int __meminit migrate_on_reclaim_callback(struct notifier_block *self,
-                                                unsigned long action, void *arg)
+                                                unsigned long action, void *_arg)
 {
+       struct memory_notify *arg = _arg;
+
+       /*
+        * Only update the node migration order when a node is
+        * changing status, like online->offline.  This avoids
+        * the overhead of synchronize_rcu() in most cases.
+        */
+       if (arg->status_change_nid < 0)
+               return notifier_from_errno(0);
+
        switch (action) {
        case MEM_GOING_OFFLINE:
                /*
@@ -3274,13 +3265,31 @@ static int __meminit migrate_on_reclaim_callback(struct notifier_block *self,
        return notifier_from_errno(0);
 }
 
+/*
+ * React to hotplug events that might affect the migration targets
+ * like events that online or offline NUMA nodes.
+ *
+ * The ordering is also currently dependent on which nodes have
+ * CPUs.  That means we need CPU on/offline notification too.
+ */
+static int migration_online_cpu(unsigned int cpu)
+{
+       set_migration_target_nodes();
+       return 0;
+}
+
+static int migration_offline_cpu(unsigned int cpu)
+{
+       set_migration_target_nodes();
+       return 0;
+}
+
 static int __init migrate_on_reclaim_init(void)
 {
        int ret;
 
-       ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "migrate on reclaim",
-                               migration_online_cpu,
-                               migration_offline_cpu);
+       ret = cpuhp_setup_state_nocalls(CPUHP_MM_DEMOTION_DEAD, "mm/demotion:offline",
+                                       NULL, migration_offline_cpu);
        /*
         * In the unlikely case that this fails, the automatic
         * migration targets may become suboptimal for nodes
@@ -3288,9 +3297,12 @@ static int __init migrate_on_reclaim_init(void)
         * rare case, do not bother trying to do anything special.
         */
        WARN_ON(ret < 0);
+       ret = cpuhp_setup_state(CPUHP_AP_MM_DEMOTION_ONLINE, "mm/demotion:online",
+                               migration_online_cpu, NULL);
+       WARN_ON(ret < 0);
 
        hotplug_memory_notifier(migrate_on_reclaim_callback, 100);
        return 0;
 }
 late_initcall(migrate_on_reclaim_init);
-#endif /* CONFIG_MEMORY_HOTPLUG */
+#endif /* CONFIG_HOTPLUG_CPU */
index dfb9165..2a52fd9 100644 (file)
@@ -269,7 +269,7 @@ static int __meminit init_section_page_ext(unsigned long pfn, int nid)
        total_usage += table_size;
        return 0;
 }
-#ifdef CONFIG_MEMORY_HOTPLUG
+
 static void free_page_ext(void *addr)
 {
        if (is_vmalloc_addr(addr)) {
@@ -374,8 +374,6 @@ static int __meminit page_ext_callback(struct notifier_block *self,
        return notifier_from_errno(ret);
 }
 
-#endif
-
 void __init page_ext_init(void)
 {
        unsigned long pfn;
index d0f7256..874b3f8 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1095,7 +1095,7 @@ static int slab_offline_cpu(unsigned int cpu)
        return 0;
 }
 
-#if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)
+#if defined(CONFIG_NUMA)
 /*
  * Drains freelist for a node on each slab cache, used for memory hot-remove.
  * Returns -EBUSY if all objects cannot be drained so that the node is not
@@ -1157,7 +1157,7 @@ static int __meminit slab_memory_callback(struct notifier_block *self,
 out:
        return notifier_from_errno(ret);
 }
-#endif /* CONFIG_NUMA && CONFIG_MEMORY_HOTPLUG */
+#endif /* CONFIG_NUMA */
 
 /*
  * swap the static kmem_cache_node with kmalloced memory
index 3d2025f..d8f7734 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1701,7 +1701,8 @@ static __always_inline bool slab_free_hook(struct kmem_cache *s,
 }
 
 static inline bool slab_free_freelist_hook(struct kmem_cache *s,
-                                          void **head, void **tail)
+                                          void **head, void **tail,
+                                          int *cnt)
 {
 
        void *object;
@@ -1728,6 +1729,12 @@ static inline bool slab_free_freelist_hook(struct kmem_cache *s,
                        *head = object;
                        if (!*tail)
                                *tail = object;
+               } else {
+                       /*
+                        * Adjust the reconstructed freelist depth
+                        * accordingly if object's reuse is delayed.
+                        */
+                       --(*cnt);
                }
        } while (object != old_tail);
 
@@ -3413,7 +3420,9 @@ static __always_inline void do_slab_free(struct kmem_cache *s,
        struct kmem_cache_cpu *c;
        unsigned long tid;
 
-       memcg_slab_free_hook(s, &head, 1);
+       /* memcg_slab_free_hook() is already called for bulk free. */
+       if (!tail)
+               memcg_slab_free_hook(s, &head, 1);
 redo:
        /*
         * Determine the currently cpus per cpu slab.
@@ -3480,7 +3489,7 @@ static __always_inline void slab_free(struct kmem_cache *s, struct page *page,
         * With KASAN enabled slab_free_freelist_hook modifies the freelist
         * to remove objects, whose reuse must be delayed.
         */
-       if (slab_free_freelist_hook(s, &head, &tail))
+       if (slab_free_freelist_hook(s, &head, &tail, &cnt))
                do_slab_free(s, page, head, tail, cnt, addr);
 }
 
@@ -4203,8 +4212,8 @@ static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags)
        if (alloc_kmem_cache_cpus(s))
                return 0;
 
-       free_kmem_cache_nodes(s);
 error:
+       __kmem_cache_release(s);
        return -EINVAL;
 }
 
@@ -4880,13 +4889,15 @@ int __kmem_cache_create(struct kmem_cache *s, slab_flags_t flags)
                return 0;
 
        err = sysfs_slab_add(s);
-       if (err)
+       if (err) {
                __kmem_cache_release(s);
+               return err;
+       }
 
        if (s->flags & SLAB_STORE_USER)
                debugfs_slab_add(s);
 
-       return err;
+       return 0;
 }
 
 void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
@@ -6108,9 +6119,14 @@ static int slab_debug_trace_open(struct inode *inode, struct file *filep)
        struct kmem_cache *s = file_inode(filep)->i_private;
        unsigned long *obj_map;
 
+       if (!t)
+               return -ENOMEM;
+
        obj_map = bitmap_alloc(oo_objects(s->oo), GFP_KERNEL);
-       if (!obj_map)
+       if (!obj_map) {
+               seq_release_private(inode, filep);
                return -ENOMEM;
+       }
 
        if (strcmp(filep->f_path.dentry->d_name.name, "alloc_traces") == 0)
                alloc = TRACK_ALLOC;
@@ -6119,6 +6135,7 @@ static int slab_debug_trace_open(struct inode *inode, struct file *filep)
 
        if (!alloc_loc_track(t, PAGE_SIZE / sizeof(struct location), GFP_KERNEL)) {
                bitmap_free(obj_map);
+               seq_release_private(inode, filep);
                return -ENOMEM;
        }
 
index e8136db..37ca764 100644 (file)
@@ -1125,9 +1125,7 @@ static inline unsigned long br_multicast_lmqt(const struct net_bridge_mcast *brm
 
 static inline unsigned long br_multicast_gmi(const struct net_bridge_mcast *brmctx)
 {
-       /* use the RFC default of 2 for QRV */
-       return 2 * brmctx->multicast_query_interval +
-              brmctx->multicast_query_response_interval;
+       return brmctx->multicast_membership_interval;
 }
 
 static inline bool
index 83d1798..ba045f3 100644 (file)
@@ -926,7 +926,9 @@ static int translate_table(struct net *net, const char *name,
                        return -ENOMEM;
                for_each_possible_cpu(i) {
                        newinfo->chainstack[i] =
-                         vmalloc(array_size(udc_cnt, sizeof(*(newinfo->chainstack[0]))));
+                         vmalloc_node(array_size(udc_cnt,
+                                         sizeof(*(newinfo->chainstack[0]))),
+                                      cpu_to_node(i));
                        if (!newinfo->chainstack[i]) {
                                while (i)
                                        vfree(newinfo->chainstack[--i]);
index caaa532..df6968b 100644 (file)
@@ -121,7 +121,7 @@ enum {
 struct tpcon {
        int idx;
        int len;
-       u8 state;
+       u32 state;
        u8 bs;
        u8 sn;
        u8 ll_dl;
@@ -848,6 +848,7 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
 {
        struct sock *sk = sock->sk;
        struct isotp_sock *so = isotp_sk(sk);
+       u32 old_state = so->tx.state;
        struct sk_buff *skb;
        struct net_device *dev;
        struct canfd_frame *cf;
@@ -860,45 +861,55 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
                return -EADDRNOTAVAIL;
 
        /* we do not support multiple buffers - for now */
-       if (so->tx.state != ISOTP_IDLE || wq_has_sleeper(&so->wait)) {
-               if (msg->msg_flags & MSG_DONTWAIT)
-                       return -EAGAIN;
+       if (cmpxchg(&so->tx.state, ISOTP_IDLE, ISOTP_SENDING) != ISOTP_IDLE ||
+           wq_has_sleeper(&so->wait)) {
+               if (msg->msg_flags & MSG_DONTWAIT) {
+                       err = -EAGAIN;
+                       goto err_out;
+               }
 
                /* wait for complete transmission of current pdu */
-               wait_event_interruptible(so->wait, so->tx.state == ISOTP_IDLE);
+               err = wait_event_interruptible(so->wait, so->tx.state == ISOTP_IDLE);
+               if (err)
+                       goto err_out;
        }
 
-       if (!size || size > MAX_MSG_LENGTH)
-               return -EINVAL;
+       if (!size || size > MAX_MSG_LENGTH) {
+               err = -EINVAL;
+               goto err_out;
+       }
 
        /* take care of a potential SF_DL ESC offset for TX_DL > 8 */
        off = (so->tx.ll_dl > CAN_MAX_DLEN) ? 1 : 0;
 
        /* does the given data fit into a single frame for SF_BROADCAST? */
        if ((so->opt.flags & CAN_ISOTP_SF_BROADCAST) &&
-           (size > so->tx.ll_dl - SF_PCI_SZ4 - ae - off))
-               return -EINVAL;
+           (size > so->tx.ll_dl - SF_PCI_SZ4 - ae - off)) {
+               err = -EINVAL;
+               goto err_out;
+       }
 
        err = memcpy_from_msg(so->tx.buf, msg, size);
        if (err < 0)
-               return err;
+               goto err_out;
 
        dev = dev_get_by_index(sock_net(sk), so->ifindex);
-       if (!dev)
-               return -ENXIO;
+       if (!dev) {
+               err = -ENXIO;
+               goto err_out;
+       }
 
        skb = sock_alloc_send_skb(sk, so->ll.mtu + sizeof(struct can_skb_priv),
                                  msg->msg_flags & MSG_DONTWAIT, &err);
        if (!skb) {
                dev_put(dev);
-               return err;
+               goto err_out;
        }
 
        can_skb_reserve(skb);
        can_skb_prv(skb)->ifindex = dev->ifindex;
        can_skb_prv(skb)->skbcnt = 0;
 
-       so->tx.state = ISOTP_SENDING;
        so->tx.len = size;
        so->tx.idx = 0;
 
@@ -954,15 +965,25 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
        if (err) {
                pr_notice_once("can-isotp: %s: can_send_ret %pe\n",
                               __func__, ERR_PTR(err));
-               return err;
+               goto err_out;
        }
 
        if (wait_tx_done) {
                /* wait for complete transmission of current pdu */
                wait_event_interruptible(so->wait, so->tx.state == ISOTP_IDLE);
+
+               if (sk->sk_err)
+                       return -sk->sk_err;
        }
 
        return size;
+
+err_out:
+       so->tx.state = old_state;
+       if (so->tx.state == ISOTP_IDLE)
+               wake_up_interruptible(&so->wait);
+
+       return err;
 }
 
 static int isotp_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
index f6df208..16af1a7 100644 (file)
@@ -330,6 +330,7 @@ int j1939_session_activate(struct j1939_session *session);
 void j1939_tp_schedule_txtimer(struct j1939_session *session, int msec);
 void j1939_session_timers_cancel(struct j1939_session *session);
 
+#define J1939_MIN_TP_PACKET_SIZE 9
 #define J1939_MAX_TP_PACKET_SIZE (7 * 0xff)
 #define J1939_MAX_ETP_PACKET_SIZE (7 * 0x00ffffff)
 
index 08c8606..9bc55ec 100644 (file)
@@ -249,11 +249,14 @@ struct j1939_priv *j1939_netdev_start(struct net_device *ndev)
        struct j1939_priv *priv, *priv_new;
        int ret;
 
-       priv = j1939_priv_get_by_ndev(ndev);
+       spin_lock(&j1939_netdev_lock);
+       priv = j1939_priv_get_by_ndev_locked(ndev);
        if (priv) {
                kref_get(&priv->rx_kref);
+               spin_unlock(&j1939_netdev_lock);
                return priv;
        }
+       spin_unlock(&j1939_netdev_lock);
 
        priv = j1939_priv_create(ndev);
        if (!priv)
@@ -269,10 +272,10 @@ struct j1939_priv *j1939_netdev_start(struct net_device *ndev)
                /* Someone was faster than us, use their priv and roll
                 * back our's.
                 */
+               kref_get(&priv_new->rx_kref);
                spin_unlock(&j1939_netdev_lock);
                dev_put(ndev);
                kfree(priv);
-               kref_get(&priv_new->rx_kref);
                return priv_new;
        }
        j1939_priv_set(ndev, priv);
index bb5c4b8..6c0a0eb 100644 (file)
@@ -1237,12 +1237,11 @@ static enum hrtimer_restart j1939_tp_rxtimer(struct hrtimer *hrtimer)
                session->err = -ETIME;
                j1939_session_deactivate(session);
        } else {
-               netdev_alert(priv->ndev, "%s: 0x%p: rx timeout, send abort\n",
-                            __func__, session);
-
                j1939_session_list_lock(session->priv);
                if (session->state >= J1939_SESSION_ACTIVE &&
                    session->state < J1939_SESSION_ACTIVE_MAX) {
+                       netdev_alert(priv->ndev, "%s: 0x%p: rx timeout, send abort\n",
+                                    __func__, session);
                        j1939_session_get(session);
                        hrtimer_start(&session->rxtimer,
                                      ms_to_ktime(J1939_XTP_ABORT_TIMEOUT_MS),
@@ -1609,6 +1608,8 @@ j1939_session *j1939_xtp_rx_rts_session_new(struct j1939_priv *priv,
                        abort = J1939_XTP_ABORT_FAULT;
                else if (len > priv->tp_max_packet_size)
                        abort = J1939_XTP_ABORT_RESOURCE;
+               else if (len < J1939_MIN_TP_PACKET_SIZE)
+                       abort = J1939_XTP_ABORT_FAULT;
        }
 
        if (abort != J1939_XTP_NO_ABORT) {
@@ -1789,6 +1790,7 @@ static void j1939_xtp_rx_dpo(struct j1939_priv *priv, struct sk_buff *skb,
 static void j1939_xtp_rx_dat_one(struct j1939_session *session,
                                 struct sk_buff *skb)
 {
+       enum j1939_xtp_abort abort = J1939_XTP_ABORT_FAULT;
        struct j1939_priv *priv = session->priv;
        struct j1939_sk_buff_cb *skcb, *se_skcb;
        struct sk_buff *se_skb = NULL;
@@ -1803,9 +1805,11 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session,
 
        skcb = j1939_skb_to_cb(skb);
        dat = skb->data;
-       if (skb->len <= 1)
+       if (skb->len != 8) {
                /* makes no sense */
+               abort = J1939_XTP_ABORT_UNEXPECTED_DATA;
                goto out_session_cancel;
+       }
 
        switch (session->last_cmd) {
        case 0xff:
@@ -1904,7 +1908,7 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session,
  out_session_cancel:
        kfree_skb(se_skb);
        j1939_session_timers_cancel(session);
-       j1939_session_cancel(session, J1939_XTP_ABORT_FAULT);
+       j1939_session_cancel(session, abort);
        j1939_session_put(session);
 }
 
index da18094..e9911b1 100644 (file)
@@ -1374,12 +1374,15 @@ static int dsa_switch_parse_ports_of(struct dsa_switch *ds,
 
        for_each_available_child_of_node(ports, port) {
                err = of_property_read_u32(port, "reg", &reg);
-               if (err)
+               if (err) {
+                       of_node_put(port);
                        goto out_put_node;
+               }
 
                if (reg >= ds->num_ports) {
                        dev_err(ds->dev, "port %pOF index %u exceeds num_ports (%zu)\n",
                                port, reg, ds->num_ports);
+                       of_node_put(port);
                        err = -EINVAL;
                        goto out_put_node;
                }
@@ -1387,8 +1390,10 @@ static int dsa_switch_parse_ports_of(struct dsa_switch *ds,
                dp = dsa_to_port(ds, reg);
 
                err = dsa_port_parse_of(dp, port);
-               if (err)
+               if (err) {
+                       of_node_put(port);
                        goto out_put_node;
+               }
        }
 
 out_put_node:
index 2e62e0d..5b8ce65 100644 (file)
@@ -1037,6 +1037,20 @@ static void tcp_v4_reqsk_destructor(struct request_sock *req)
 DEFINE_STATIC_KEY_FALSE(tcp_md5_needed);
 EXPORT_SYMBOL(tcp_md5_needed);
 
+static bool better_md5_match(struct tcp_md5sig_key *old, struct tcp_md5sig_key *new)
+{
+       if (!old)
+               return true;
+
+       /* l3index always overrides non-l3index */
+       if (old->l3index && new->l3index == 0)
+               return false;
+       if (old->l3index == 0 && new->l3index)
+               return true;
+
+       return old->prefixlen < new->prefixlen;
+}
+
 /* Find the Key structure for an address.  */
 struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk, int l3index,
                                           const union tcp_md5_addr *addr,
@@ -1059,7 +1073,7 @@ struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk, int l3index,
                                 lockdep_sock_is_held(sk)) {
                if (key->family != family)
                        continue;
-               if (key->l3index && key->l3index != l3index)
+               if (key->flags & TCP_MD5SIG_FLAG_IFINDEX && key->l3index != l3index)
                        continue;
                if (family == AF_INET) {
                        mask = inet_make_mask(key->prefixlen);
@@ -1074,8 +1088,7 @@ struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk, int l3index,
                        match = false;
                }
 
-               if (match && (!best_match ||
-                             key->prefixlen > best_match->prefixlen))
+               if (match && better_md5_match(best_match, key))
                        best_match = key;
        }
        return best_match;
@@ -1085,7 +1098,7 @@ EXPORT_SYMBOL(__tcp_md5_do_lookup);
 static struct tcp_md5sig_key *tcp_md5_do_lookup_exact(const struct sock *sk,
                                                      const union tcp_md5_addr *addr,
                                                      int family, u8 prefixlen,
-                                                     int l3index)
+                                                     int l3index, u8 flags)
 {
        const struct tcp_sock *tp = tcp_sk(sk);
        struct tcp_md5sig_key *key;
@@ -1105,7 +1118,9 @@ static struct tcp_md5sig_key *tcp_md5_do_lookup_exact(const struct sock *sk,
                                 lockdep_sock_is_held(sk)) {
                if (key->family != family)
                        continue;
-               if (key->l3index && key->l3index != l3index)
+               if ((key->flags & TCP_MD5SIG_FLAG_IFINDEX) != (flags & TCP_MD5SIG_FLAG_IFINDEX))
+                       continue;
+               if (key->l3index != l3index)
                        continue;
                if (!memcmp(&key->addr, addr, size) &&
                    key->prefixlen == prefixlen)
@@ -1129,7 +1144,7 @@ EXPORT_SYMBOL(tcp_v4_md5_lookup);
 
 /* This can be called on a newly created socket, from other files */
 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
-                  int family, u8 prefixlen, int l3index,
+                  int family, u8 prefixlen, int l3index, u8 flags,
                   const u8 *newkey, u8 newkeylen, gfp_t gfp)
 {
        /* Add Key to the list */
@@ -1137,7 +1152,7 @@ int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
        struct tcp_sock *tp = tcp_sk(sk);
        struct tcp_md5sig_info *md5sig;
 
-       key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen, l3index);
+       key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen, l3index, flags);
        if (key) {
                /* Pre-existing entry - just update that one.
                 * Note that the key might be used concurrently.
@@ -1182,6 +1197,7 @@ int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
        key->family = family;
        key->prefixlen = prefixlen;
        key->l3index = l3index;
+       key->flags = flags;
        memcpy(&key->addr, addr,
               (family == AF_INET6) ? sizeof(struct in6_addr) :
                                      sizeof(struct in_addr));
@@ -1191,11 +1207,11 @@ int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
 EXPORT_SYMBOL(tcp_md5_do_add);
 
 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family,
-                  u8 prefixlen, int l3index)
+                  u8 prefixlen, int l3index, u8 flags)
 {
        struct tcp_md5sig_key *key;
 
-       key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen, l3index);
+       key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen, l3index, flags);
        if (!key)
                return -ENOENT;
        hlist_del_rcu(&key->node);
@@ -1229,6 +1245,7 @@ static int tcp_v4_parse_md5_keys(struct sock *sk, int optname,
        const union tcp_md5_addr *addr;
        u8 prefixlen = 32;
        int l3index = 0;
+       u8 flags;
 
        if (optlen < sizeof(cmd))
                return -EINVAL;
@@ -1239,6 +1256,8 @@ static int tcp_v4_parse_md5_keys(struct sock *sk, int optname,
        if (sin->sin_family != AF_INET)
                return -EINVAL;
 
+       flags = cmd.tcpm_flags & TCP_MD5SIG_FLAG_IFINDEX;
+
        if (optname == TCP_MD5SIG_EXT &&
            cmd.tcpm_flags & TCP_MD5SIG_FLAG_PREFIX) {
                prefixlen = cmd.tcpm_prefixlen;
@@ -1246,7 +1265,7 @@ static int tcp_v4_parse_md5_keys(struct sock *sk, int optname,
                        return -EINVAL;
        }
 
-       if (optname == TCP_MD5SIG_EXT &&
+       if (optname == TCP_MD5SIG_EXT && cmd.tcpm_ifindex &&
            cmd.tcpm_flags & TCP_MD5SIG_FLAG_IFINDEX) {
                struct net_device *dev;
 
@@ -1267,12 +1286,12 @@ static int tcp_v4_parse_md5_keys(struct sock *sk, int optname,
        addr = (union tcp_md5_addr *)&sin->sin_addr.s_addr;
 
        if (!cmd.tcpm_keylen)
-               return tcp_md5_do_del(sk, addr, AF_INET, prefixlen, l3index);
+               return tcp_md5_do_del(sk, addr, AF_INET, prefixlen, l3index, flags);
 
        if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
                return -EINVAL;
 
-       return tcp_md5_do_add(sk, addr, AF_INET, prefixlen, l3index,
+       return tcp_md5_do_add(sk, addr, AF_INET, prefixlen, l3index, flags,
                              cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
 }
 
@@ -1596,7 +1615,7 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
                 * memory, then we end up not copying the key
                 * across. Shucks.
                 */
-               tcp_md5_do_add(newsk, addr, AF_INET, 32, l3index,
+               tcp_md5_do_add(newsk, addr, AF_INET, 32, l3index, key->flags,
                               key->key, key->keylen, GFP_ATOMIC);
                sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
        }
index 12f985f..2f044a4 100644 (file)
@@ -464,13 +464,14 @@ static bool ip6_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
 
 int ip6_forward(struct sk_buff *skb)
 {
-       struct inet6_dev *idev = __in6_dev_get_safely(skb->dev);
        struct dst_entry *dst = skb_dst(skb);
        struct ipv6hdr *hdr = ipv6_hdr(skb);
        struct inet6_skb_parm *opt = IP6CB(skb);
        struct net *net = dev_net(dst->dev);
+       struct inet6_dev *idev;
        u32 mtu;
 
+       idev = __in6_dev_get_safely(dev_get_by_index_rcu(net, IP6CB(skb)->iif));
        if (net->ipv6.devconf_all->forwarding == 0)
                goto error;
 
index 733c83d..4ad8b20 100644 (file)
@@ -25,12 +25,7 @@ MODULE_AUTHOR("Andras Kis-Szabo <kisza@sch.bme.hu>");
 static inline bool
 segsleft_match(u_int32_t min, u_int32_t max, u_int32_t id, bool invert)
 {
-       bool r;
-       pr_debug("segsleft_match:%c 0x%x <= 0x%x <= 0x%x\n",
-                invert ? '!' : ' ', min, id, max);
-       r = (id >= min && id <= max) ^ invert;
-       pr_debug(" result %s\n", r ? "PASS" : "FAILED");
-       return r;
+       return (id >= min && id <= max) ^ invert;
 }
 
 static bool rt_mt6(const struct sk_buff *skb, struct xt_action_param *par)
@@ -65,30 +60,6 @@ static bool rt_mt6(const struct sk_buff *skb, struct xt_action_param *par)
                return false;
        }
 
-       pr_debug("IPv6 RT LEN %u %u ", hdrlen, rh->hdrlen);
-       pr_debug("TYPE %04X ", rh->type);
-       pr_debug("SGS_LEFT %u %02X\n", rh->segments_left, rh->segments_left);
-
-       pr_debug("IPv6 RT segsleft %02X ",
-                segsleft_match(rtinfo->segsleft[0], rtinfo->segsleft[1],
-                               rh->segments_left,
-                               !!(rtinfo->invflags & IP6T_RT_INV_SGS)));
-       pr_debug("type %02X %02X %02X ",
-                rtinfo->rt_type, rh->type,
-                (!(rtinfo->flags & IP6T_RT_TYP) ||
-                 ((rtinfo->rt_type == rh->type) ^
-                  !!(rtinfo->invflags & IP6T_RT_INV_TYP))));
-       pr_debug("len %02X %04X %02X ",
-                rtinfo->hdrlen, hdrlen,
-                !(rtinfo->flags & IP6T_RT_LEN) ||
-                 ((rtinfo->hdrlen == hdrlen) ^
-                  !!(rtinfo->invflags & IP6T_RT_INV_LEN)));
-       pr_debug("res %02X %02X %02X ",
-                rtinfo->flags & IP6T_RT_RES,
-                ((const struct rt0_hdr *)rh)->reserved,
-                !((rtinfo->flags & IP6T_RT_RES) &&
-                  (((const struct rt0_hdr *)rh)->reserved)));
-
        ret = (segsleft_match(rtinfo->segsleft[0], rtinfo->segsleft[1],
                              rh->segments_left,
                              !!(rtinfo->invflags & IP6T_RT_INV_SGS))) &&
@@ -107,22 +78,22 @@ static bool rt_mt6(const struct sk_buff *skb, struct xt_action_param *par)
                                                       reserved),
                                        sizeof(_reserved),
                                        &_reserved);
+               if (!rp) {
+                       par->hotdrop = true;
+                       return false;
+               }
 
                ret = (*rp == 0);
        }
 
-       pr_debug("#%d ", rtinfo->addrnr);
        if (!(rtinfo->flags & IP6T_RT_FST)) {
                return ret;
        } else if (rtinfo->flags & IP6T_RT_FST_NSTRICT) {
-               pr_debug("Not strict ");
                if (rtinfo->addrnr > (unsigned int)((hdrlen - 8) / 16)) {
-                       pr_debug("There isn't enough space\n");
                        return false;
                } else {
                        unsigned int i = 0;
 
-                       pr_debug("#%d ", rtinfo->addrnr);
                        for (temp = 0;
                             temp < (unsigned int)((hdrlen - 8) / 16);
                             temp++) {
@@ -138,26 +109,20 @@ static bool rt_mt6(const struct sk_buff *skb, struct xt_action_param *par)
                                        return false;
                                }
 
-                               if (ipv6_addr_equal(ap, &rtinfo->addrs[i])) {
-                                       pr_debug("i=%d temp=%d;\n", i, temp);
+                               if (ipv6_addr_equal(ap, &rtinfo->addrs[i]))
                                        i++;
-                               }
                                if (i == rtinfo->addrnr)
                                        break;
                        }
-                       pr_debug("i=%d #%d\n", i, rtinfo->addrnr);
                        if (i == rtinfo->addrnr)
                                return ret;
                        else
                                return false;
                }
        } else {
-               pr_debug("Strict ");
                if (rtinfo->addrnr > (unsigned int)((hdrlen - 8) / 16)) {
-                       pr_debug("There isn't enough space\n");
                        return false;
                } else {
-                       pr_debug("#%d ", rtinfo->addrnr);
                        for (temp = 0; temp < rtinfo->addrnr; temp++) {
                                ap = skb_header_pointer(skb,
                                                        ptr
@@ -173,7 +138,6 @@ static bool rt_mt6(const struct sk_buff *skb, struct xt_action_param *par)
                                if (!ipv6_addr_equal(ap, &rtinfo->addrs[temp]))
                                        break;
                        }
-                       pr_debug("temp=%d #%d\n", temp, rtinfo->addrnr);
                        if (temp == rtinfo->addrnr &&
                            temp == (unsigned int)((hdrlen - 8) / 16))
                                return ret;
index 0ce52d4..b03dd02 100644 (file)
@@ -599,6 +599,7 @@ static int tcp_v6_parse_md5_keys(struct sock *sk, int optname,
        struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
        int l3index = 0;
        u8 prefixlen;
+       u8 flags;
 
        if (optlen < sizeof(cmd))
                return -EINVAL;
@@ -609,6 +610,8 @@ static int tcp_v6_parse_md5_keys(struct sock *sk, int optname,
        if (sin6->sin6_family != AF_INET6)
                return -EINVAL;
 
+       flags = cmd.tcpm_flags & TCP_MD5SIG_FLAG_IFINDEX;
+
        if (optname == TCP_MD5SIG_EXT &&
            cmd.tcpm_flags & TCP_MD5SIG_FLAG_PREFIX) {
                prefixlen = cmd.tcpm_prefixlen;
@@ -619,7 +622,7 @@ static int tcp_v6_parse_md5_keys(struct sock *sk, int optname,
                prefixlen = ipv6_addr_v4mapped(&sin6->sin6_addr) ? 32 : 128;
        }
 
-       if (optname == TCP_MD5SIG_EXT &&
+       if (optname == TCP_MD5SIG_EXT && cmd.tcpm_ifindex &&
            cmd.tcpm_flags & TCP_MD5SIG_FLAG_IFINDEX) {
                struct net_device *dev;
 
@@ -640,9 +643,9 @@ static int tcp_v6_parse_md5_keys(struct sock *sk, int optname,
                if (ipv6_addr_v4mapped(&sin6->sin6_addr))
                        return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
                                              AF_INET, prefixlen,
-                                             l3index);
+                                             l3index, flags);
                return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
-                                     AF_INET6, prefixlen, l3index);
+                                     AF_INET6, prefixlen, l3index, flags);
        }
 
        if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
@@ -650,12 +653,12 @@ static int tcp_v6_parse_md5_keys(struct sock *sk, int optname,
 
        if (ipv6_addr_v4mapped(&sin6->sin6_addr))
                return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
-                                     AF_INET, prefixlen, l3index,
+                                     AF_INET, prefixlen, l3index, flags,
                                      cmd.tcpm_key, cmd.tcpm_keylen,
                                      GFP_KERNEL);
 
        return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
-                             AF_INET6, prefixlen, l3index,
+                             AF_INET6, prefixlen, l3index, flags,
                              cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
 }
 
@@ -1404,7 +1407,7 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
                 * across. Shucks.
                 */
                tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
-                              AF_INET6, 128, l3index, key->key, key->keylen,
+                              AF_INET6, 128, l3index, key->flags, key->key, key->keylen,
                               sk_gfp_mask(sk, GFP_ATOMIC));
        }
 #endif
index 5439526..92a7478 100644 (file)
@@ -109,7 +109,7 @@ config NF_CONNTRACK_MARK
 config NF_CONNTRACK_SECMARK
        bool  'Connection tracking security mark support'
        depends on NETWORK_SECMARK
-       default m if NETFILTER_ADVANCED=n
+       default y if NETFILTER_ADVANCED=n
        help
          This option enables security markings to be applied to
          connections.  Typically they are copied to connections from
index c250970..29ec3ef 100644 (file)
@@ -4090,6 +4090,11 @@ static int __net_init ip_vs_control_net_init_sysctl(struct netns_ipvs *ipvs)
        tbl[idx++].data = &ipvs->sysctl_conn_reuse_mode;
        tbl[idx++].data = &ipvs->sysctl_schedule_icmp;
        tbl[idx++].data = &ipvs->sysctl_ignore_tunneled;
+#ifdef CONFIG_IP_VS_DEBUG
+       /* Global sysctls must be ro in non-init netns */
+       if (!net_eq(net, &init_net))
+               tbl[idx++].mode = 0444;
+#endif
 
        ipvs->sysctl_hdr = register_net_sysctl(net, "net/ipv4/vs", tbl);
        if (ipvs->sysctl_hdr == NULL) {
index 5b02408..3ced0eb 100644 (file)
@@ -342,12 +342,6 @@ static void nft_netdev_event(unsigned long event, struct net_device *dev,
                return;
        }
 
-       /* UNREGISTER events are also happening on netns exit.
-        *
-        * Although nf_tables core releases all tables/chains, only this event
-        * handler provides guarantee that hook->ops.dev is still accessible,
-        * so we cannot skip exiting net namespaces.
-        */
        __nft_release_basechain(ctx);
 }
 
@@ -366,6 +360,9 @@ static int nf_tables_netdev_event(struct notifier_block *this,
            event != NETDEV_CHANGENAME)
                return NOTIFY_DONE;
 
+       if (!check_net(ctx.net))
+               return NOTIFY_DONE;
+
        nft_net = nft_pernet(ctx.net);
        mutex_lock(&nft_net->commit_mutex);
        list_for_each_entry(table, &nft_net->tables, list) {
index 7b2f359..2f7cf5e 100644 (file)
@@ -137,7 +137,7 @@ static int idletimer_tg_create(struct idletimer_tg_info *info)
 {
        int ret;
 
-       info->timer = kmalloc(sizeof(*info->timer), GFP_KERNEL);
+       info->timer = kzalloc(sizeof(*info->timer), GFP_KERNEL);
        if (!info->timer) {
                ret = -ENOMEM;
                goto out;
index ad9df0c..90866ae 100644 (file)
@@ -960,6 +960,7 @@ static int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a,
        tmpl = p->tmpl;
 
        tcf_lastuse_update(&c->tcf_tm);
+       tcf_action_update_bstats(&c->common, skb);
 
        if (clear) {
                qdisc_skb_cb(skb)->post_ct = false;
@@ -1049,7 +1050,6 @@ out_push:
 
        qdisc_skb_cb(skb)->post_ct = true;
 out_clear:
-       tcf_action_update_bstats(&c->common, skb);
        if (defrag)
                qdisc_skb_cb(skb)->pkt_len = skb->len;
        return retval;
index e3d79a7..b5d5333 100644 (file)
@@ -918,6 +918,13 @@ void key_change_session_keyring(struct callback_head *twork)
                return;
        }
 
+       /* If get_ucounts fails more bits are needed in the refcount */
+       if (unlikely(!get_ucounts(old->ucounts))) {
+               WARN_ONCE(1, "In %s get_ucounts failed\n", __func__);
+               put_cred(new);
+               return;
+       }
+
        new->  uid      = old->  uid;
        new-> euid      = old-> euid;
        new-> suid      = old-> suid;
@@ -927,6 +934,7 @@ void key_change_session_keyring(struct callback_head *twork)
        new-> sgid      = old-> sgid;
        new->fsgid      = old->fsgid;
        new->user       = get_uid(old->user);
+       new->ucounts    = old->ucounts;
        new->user_ns    = get_user_ns(old->user_ns);
        new->group_info = get_group_info(old->group_info);
 
index 22d27b1..965b096 100644 (file)
@@ -2535,6 +2535,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1558, 0x65d2, "Clevo PB51R[CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
        SND_PCI_QUIRK(0x1558, 0x65e1, "Clevo PB51[ED][DF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
        SND_PCI_QUIRK(0x1558, 0x65e5, "Clevo PC50D[PRS](?:-D|-G)?", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+       SND_PCI_QUIRK(0x1558, 0x65f1, "Clevo PC50HS", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
        SND_PCI_QUIRK(0x1558, 0x67d1, "Clevo PB71[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
        SND_PCI_QUIRK(0x1558, 0x67e1, "Clevo PB71[DE][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
        SND_PCI_QUIRK(0x1558, 0x67e5, "Clevo PC70D[PRS](?:-D|-G)?", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
@@ -6405,6 +6406,44 @@ static void alc_fixup_no_int_mic(struct hda_codec *codec,
        }
 }
 
+/* GPIO1 = amplifier on/off
+ * GPIO3 = mic mute LED
+ */
+static void alc285_fixup_hp_spectre_x360_eb1(struct hda_codec *codec,
+                                         const struct hda_fixup *fix, int action)
+{
+       static const hda_nid_t conn[] = { 0x02 };
+
+       struct alc_spec *spec = codec->spec;
+       static const struct hda_pintbl pincfgs[] = {
+               { 0x14, 0x90170110 },  /* front/high speakers */
+               { 0x17, 0x90170130 },  /* back/bass speakers */
+               { }
+       };
+
+       //enable micmute led
+       alc_fixup_hp_gpio_led(codec, action, 0x00, 0x04);
+
+       switch (action) {
+       case HDA_FIXUP_ACT_PRE_PROBE:
+               spec->micmute_led_polarity = 1;
+               /* needed for amp of back speakers */
+               spec->gpio_mask |= 0x01;
+               spec->gpio_dir |= 0x01;
+               snd_hda_apply_pincfgs(codec, pincfgs);
+               /* share DAC to have unified volume control */
+               snd_hda_override_conn_list(codec, 0x14, ARRAY_SIZE(conn), conn);
+               snd_hda_override_conn_list(codec, 0x17, ARRAY_SIZE(conn), conn);
+               break;
+       case HDA_FIXUP_ACT_INIT:
+               /* need to toggle GPIO to enable the amp of back speakers */
+               alc_update_gpio_data(codec, 0x01, true);
+               msleep(100);
+               alc_update_gpio_data(codec, 0x01, false);
+               break;
+       }
+}
+
 static void alc285_fixup_hp_spectre_x360(struct hda_codec *codec,
                                          const struct hda_fixup *fix, int action)
 {
@@ -6557,6 +6596,7 @@ enum {
        ALC269_FIXUP_HP_DOCK_GPIO_MIC1_LED,
        ALC280_FIXUP_HP_9480M,
        ALC245_FIXUP_HP_X360_AMP,
+       ALC285_FIXUP_HP_SPECTRE_X360_EB1,
        ALC288_FIXUP_DELL_HEADSET_MODE,
        ALC288_FIXUP_DELL1_MIC_NO_PRESENCE,
        ALC288_FIXUP_DELL_XPS_13,
@@ -8250,6 +8290,10 @@ static const struct hda_fixup alc269_fixups[] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = alc285_fixup_hp_spectre_x360,
        },
+       [ALC285_FIXUP_HP_SPECTRE_X360_EB1] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc285_fixup_hp_spectre_x360_eb1
+       },
        [ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = alc285_fixup_ideapad_s740_coef,
@@ -8584,6 +8628,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x103c, 0x87f7, "HP Spectre x360 14", ALC245_FIXUP_HP_X360_AMP),
        SND_PCI_QUIRK(0x103c, 0x8805, "HP ProBook 650 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x880d, "HP EliteBook 830 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
+       SND_PCI_QUIRK(0x103c, 0x8811, "HP Spectre x360 15-eb1xxx", ALC285_FIXUP_HP_SPECTRE_X360_EB1),
+       SND_PCI_QUIRK(0x103c, 0x8812, "HP Spectre x360 15-eb1xxx", ALC285_FIXUP_HP_SPECTRE_X360_EB1),
        SND_PCI_QUIRK(0x103c, 0x8846, "HP EliteBook 850 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x8847, "HP EliteBook x360 830 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x884b, "HP EliteBook 840 Aero G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
@@ -9005,6 +9051,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
        {.id = ALC245_FIXUP_HP_X360_AMP, .name = "alc245-hp-x360-amp"},
        {.id = ALC295_FIXUP_HP_OMEN, .name = "alc295-hp-omen"},
        {.id = ALC285_FIXUP_HP_SPECTRE_X360, .name = "alc285-hp-spectre-x360"},
+       {.id = ALC285_FIXUP_HP_SPECTRE_X360_EB1, .name = "alc285-hp-spectre-x360-eb1"},
        {.id = ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP, .name = "alc287-ideapad-bass-spk-amp"},
        {.id = ALC623_FIXUP_LENOVO_THINKSTATION_P340, .name = "alc623-lenovo-thinkstation-p340"},
        {.id = ALC255_FIXUP_ACER_HEADPHONE_AND_MIC, .name = "alc255-acer-headphone-and-mic"},
index 82ee233..216cea0 100644 (file)
@@ -1583,6 +1583,7 @@ config SND_SOC_WCD938X_SDW
        tristate "WCD9380/WCD9385 Codec - SDW"
        select SND_SOC_WCD938X
        select SND_SOC_WCD_MBHC
+       select REGMAP_IRQ
        depends on SOUNDWIRE
        select REGMAP_SOUNDWIRE
        help
index fb1e4c3..9a463ab 100644 (file)
@@ -922,7 +922,6 @@ static int cs42l42_mute_stream(struct snd_soc_dai *dai, int mute, int stream)
        struct snd_soc_component *component = dai->component;
        struct cs42l42_private *cs42l42 = snd_soc_component_get_drvdata(component);
        unsigned int regval;
-       u8 fullScaleVol;
        int ret;
 
        if (mute) {
@@ -993,20 +992,11 @@ static int cs42l42_mute_stream(struct snd_soc_dai *dai, int mute, int stream)
                cs42l42->stream_use |= 1 << stream;
 
                if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
-                       /* Read the headphone load */
-                       regval = snd_soc_component_read(component, CS42L42_LOAD_DET_RCSTAT);
-                       if (((regval & CS42L42_RLA_STAT_MASK) >> CS42L42_RLA_STAT_SHIFT) ==
-                           CS42L42_RLA_STAT_15_OHM) {
-                               fullScaleVol = CS42L42_HP_FULL_SCALE_VOL_MASK;
-                       } else {
-                               fullScaleVol = 0;
-                       }
-
-                       /* Un-mute the headphone, set the full scale volume flag */
+                       /* Un-mute the headphone */
                        snd_soc_component_update_bits(component, CS42L42_HP_CTL,
                                                      CS42L42_HP_ANA_AMUTE_MASK |
-                                                     CS42L42_HP_ANA_BMUTE_MASK |
-                                                     CS42L42_HP_FULL_SCALE_VOL_MASK, fullScaleVol);
+                                                     CS42L42_HP_ANA_BMUTE_MASK,
+                                                     0);
                }
        }
 
index 7d3e54d..29d05e3 100644 (file)
@@ -305,12 +305,19 @@ static int cs4341_spi_probe(struct spi_device *spi)
        return cs4341_probe(&spi->dev);
 }
 
+static const struct spi_device_id cs4341_spi_ids[] = {
+       { "cs4341a" },
+       { }
+};
+MODULE_DEVICE_TABLE(spi, cs4341_spi_ids);
+
 static struct spi_driver cs4341_spi_driver = {
        .driver = {
                .name = "cs4341-spi",
                .of_match_table = of_match_ptr(cs4341_dt_ids),
        },
        .probe = cs4341_spi_probe,
+       .id_table = cs4341_spi_ids,
 };
 #endif
 
index db88be4..f946ef6 100644 (file)
@@ -867,8 +867,8 @@ static void nau8824_jdet_work(struct work_struct *work)
        struct regmap *regmap = nau8824->regmap;
        int adc_value, event = 0, event_mask = 0;
 
-       snd_soc_dapm_enable_pin(dapm, "MICBIAS");
-       snd_soc_dapm_enable_pin(dapm, "SAR");
+       snd_soc_dapm_force_enable_pin(dapm, "MICBIAS");
+       snd_soc_dapm_force_enable_pin(dapm, "SAR");
        snd_soc_dapm_sync(dapm);
 
        msleep(100);
index 0a54292..ebf63ea 100644 (file)
@@ -36,6 +36,7 @@ static const struct of_device_id pcm179x_of_match[] = {
 MODULE_DEVICE_TABLE(of, pcm179x_of_match);
 
 static const struct spi_device_id pcm179x_spi_ids[] = {
+       { "pcm1792a", 0 },
        { "pcm179x", 0 },
        { },
 };
index 4dc844f..60dee41 100644 (file)
@@ -116,6 +116,8 @@ static const struct reg_default pcm512x_reg_defaults[] = {
        { PCM512x_FS_SPEED_MODE,     0x00 },
        { PCM512x_IDAC_1,            0x01 },
        { PCM512x_IDAC_2,            0x00 },
+       { PCM512x_I2S_1,             0x02 },
+       { PCM512x_I2S_2,             0x00 },
 };
 
 static bool pcm512x_readable(struct device *dev, unsigned int reg)
index f0daf8d..52de7d1 100644 (file)
@@ -4144,10 +4144,10 @@ static int wcd938x_codec_set_jack(struct snd_soc_component *comp,
 {
        struct wcd938x_priv *wcd = dev_get_drvdata(comp->dev);
 
-       if (!jack)
+       if (jack)
                return wcd_mbhc_start(wcd->wcd_mbhc, &wcd->mbhc_cfg, jack);
-
-       wcd_mbhc_stop(wcd->wcd_mbhc);
+       else
+               wcd_mbhc_stop(wcd->wcd_mbhc);
 
        return 0;
 }
index 9e621a2..499604f 100644 (file)
@@ -742,9 +742,16 @@ static int wm8960_configure_clocking(struct snd_soc_component *component)
        int i, j, k;
        int ret;
 
-       if (!(iface1 & (1<<6))) {
-               dev_dbg(component->dev,
-                       "Codec is slave mode, no need to configure clock\n");
+       /*
+        * For Slave mode clocking should still be configured,
+        * so this if statement should be removed, but some platform
+        * may not work if the sysclk is not configured, to avoid such
+        * compatible issue, just add '!wm8960->sysclk' condition in
+        * this if statement.
+        */
+       if (!(iface1 & (1 << 6)) && !wm8960->sysclk) {
+               dev_warn(component->dev,
+                        "slave mode, but proceeding with no clock configuration\n");
                return 0;
        }
 
index 7ba2fd1..d0556c7 100644 (file)
@@ -487,8 +487,9 @@ static int fsl_xcvr_prepare(struct snd_pcm_substream *substream,
                return ret;
        }
 
-       /* clear DPATH RESET */
+       /* set DPATH RESET */
        m_ctl |= FSL_XCVR_EXT_CTRL_DPTH_RESET(tx);
+       v_ctl |= FSL_XCVR_EXT_CTRL_DPTH_RESET(tx);
        ret = regmap_update_bits(xcvr->regmap, FSL_XCVR_EXT_CTRL, m_ctl, v_ctl);
        if (ret < 0) {
                dev_err(dai->dev, "Error while setting EXT_CTRL: %d\n", ret);
@@ -590,10 +591,6 @@ static void fsl_xcvr_shutdown(struct snd_pcm_substream *substream,
                val  |= FSL_XCVR_EXT_CTRL_CMDC_RESET(tx);
        }
 
-       /* set DPATH RESET */
-       mask |= FSL_XCVR_EXT_CTRL_DPTH_RESET(tx);
-       val  |= FSL_XCVR_EXT_CTRL_DPTH_RESET(tx);
-
        ret = regmap_update_bits(xcvr->regmap, FSL_XCVR_EXT_CTRL, mask, val);
        if (ret < 0) {
                dev_err(dai->dev, "Err setting DPATH RESET: %d\n", ret);
@@ -643,6 +640,16 @@ static int fsl_xcvr_trigger(struct snd_pcm_substream *substream, int cmd,
                        dev_err(dai->dev, "Failed to enable DMA: %d\n", ret);
                        return ret;
                }
+
+               /* clear DPATH RESET */
+               ret = regmap_update_bits(xcvr->regmap, FSL_XCVR_EXT_CTRL,
+                                        FSL_XCVR_EXT_CTRL_DPTH_RESET(tx),
+                                        0);
+               if (ret < 0) {
+                       dev_err(dai->dev, "Failed to clear DPATH RESET: %d\n", ret);
+                       return ret;
+               }
+
                break;
        case SNDRV_PCM_TRIGGER_STOP:
        case SNDRV_PCM_TRIGGER_SUSPEND:
index 055248f..4d313d0 100644 (file)
@@ -456,12 +456,12 @@ static const struct dmi_system_id byt_cht_es8316_quirk_table[] = {
 
 static int snd_byt_cht_es8316_mc_probe(struct platform_device *pdev)
 {
+       struct device *dev = &pdev->dev;
        static const char * const mic_name[] = { "in1", "in2" };
+       struct snd_soc_acpi_mach *mach = dev_get_platdata(dev);
        struct property_entry props[MAX_NO_PROPS] = {};
        struct byt_cht_es8316_private *priv;
        const struct dmi_system_id *dmi_id;
-       struct device *dev = &pdev->dev;
-       struct snd_soc_acpi_mach *mach;
        struct fwnode_handle *fwnode;
        const char *platform_name;
        struct acpi_device *adev;
@@ -476,7 +476,6 @@ static int snd_byt_cht_es8316_mc_probe(struct platform_device *pdev)
        if (!priv)
                return -ENOMEM;
 
-       mach = dev->platform_data;
        /* fix index of codec dai */
        for (i = 0; i < ARRAY_SIZE(byt_cht_es8316_dais); i++) {
                if (!strcmp(byt_cht_es8316_dais[i].codecs->name,
@@ -494,7 +493,7 @@ static int snd_byt_cht_es8316_mc_probe(struct platform_device *pdev)
                put_device(&adev->dev);
                byt_cht_es8316_dais[dai_index].codecs->name = codec_name;
        } else {
-               dev_err(&pdev->dev, "Error cannot find '%s' dev\n", mach->id);
+               dev_err(dev, "Error cannot find '%s' dev\n", mach->id);
                return -ENXIO;
        }
 
@@ -533,11 +532,8 @@ static int snd_byt_cht_es8316_mc_probe(struct platform_device *pdev)
 
        /* get the clock */
        priv->mclk = devm_clk_get(dev, "pmc_plt_clk_3");
-       if (IS_ERR(priv->mclk)) {
-               ret = PTR_ERR(priv->mclk);
-               dev_err(dev, "clk_get pmc_plt_clk_3 failed: %d\n", ret);
-               return ret;
-       }
+       if (IS_ERR(priv->mclk))
+               return dev_err_probe(dev, PTR_ERR(priv->mclk), "clk_get pmc_plt_clk_3 failed\n");
 
        /* get speaker enable GPIO */
        codec_dev = acpi_get_first_physical_node(adev);
@@ -567,22 +563,13 @@ static int snd_byt_cht_es8316_mc_probe(struct platform_device *pdev)
 
        devm_acpi_dev_add_driver_gpios(codec_dev, byt_cht_es8316_gpios);
        priv->speaker_en_gpio =
-               gpiod_get_index(codec_dev, "speaker-enable", 0,
-                               /* see comment in byt_cht_es8316_resume */
-                               GPIOD_OUT_LOW | GPIOD_FLAGS_BIT_NONEXCLUSIVE);
-
+               gpiod_get_optional(codec_dev, "speaker-enable",
+                                  /* see comment in byt_cht_es8316_resume() */
+                                  GPIOD_OUT_LOW | GPIOD_FLAGS_BIT_NONEXCLUSIVE);
        if (IS_ERR(priv->speaker_en_gpio)) {
-               ret = PTR_ERR(priv->speaker_en_gpio);
-               switch (ret) {
-               case -ENOENT:
-                       priv->speaker_en_gpio = NULL;
-                       break;
-               default:
-                       dev_err(dev, "get speaker GPIO failed: %d\n", ret);
-                       fallthrough;
-               case -EPROBE_DEFER:
-                       goto err_put_codec;
-               }
+               ret = dev_err_probe(dev, PTR_ERR(priv->speaker_en_gpio),
+                                   "get speaker GPIO failed\n");
+               goto err_put_codec;
        }
 
        snprintf(components_string, sizeof(components_string),
@@ -597,7 +584,7 @@ static int snd_byt_cht_es8316_mc_probe(struct platform_device *pdev)
        byt_cht_es8316_card.long_name = long_name;
 #endif
 
-       sof_parent = snd_soc_acpi_sof_parent(&pdev->dev);
+       sof_parent = snd_soc_acpi_sof_parent(dev);
 
        /* set card and driver name */
        if (sof_parent) {
index c830e96..80ca260 100644 (file)
@@ -2599,6 +2599,7 @@ int snd_soc_component_initialize(struct snd_soc_component *component,
        INIT_LIST_HEAD(&component->dai_list);
        INIT_LIST_HEAD(&component->dobj_list);
        INIT_LIST_HEAD(&component->card_list);
+       INIT_LIST_HEAD(&component->list);
        mutex_init(&component->io_mutex);
 
        component->name = fmt_single_name(dev, &component->id);
index 7b67f1e..59d0764 100644 (file)
@@ -2561,6 +2561,7 @@ static int snd_soc_dapm_set_pin(struct snd_soc_dapm_context *dapm,
                                const char *pin, int status)
 {
        struct snd_soc_dapm_widget *w = dapm_find_widget(dapm, pin, true);
+       int ret = 0;
 
        dapm_assert_locked(dapm);
 
@@ -2573,13 +2574,14 @@ static int snd_soc_dapm_set_pin(struct snd_soc_dapm_context *dapm,
                dapm_mark_dirty(w, "pin configuration");
                dapm_widget_invalidate_input_paths(w);
                dapm_widget_invalidate_output_paths(w);
+               ret = 1;
        }
 
        w->connected = status;
        if (status == 0)
                w->force = 0;
 
-       return 0;
+       return ret;
 }
 
 /**
@@ -3583,14 +3585,15 @@ int snd_soc_dapm_put_pin_switch(struct snd_kcontrol *kcontrol,
 {
        struct snd_soc_card *card = snd_kcontrol_chip(kcontrol);
        const char *pin = (const char *)kcontrol->private_value;
+       int ret;
 
        if (ucontrol->value.integer.value[0])
-               snd_soc_dapm_enable_pin(&card->dapm, pin);
+               ret = snd_soc_dapm_enable_pin(&card->dapm, pin);
        else
-               snd_soc_dapm_disable_pin(&card->dapm, pin);
+               ret = snd_soc_dapm_disable_pin(&card->dapm, pin);
 
        snd_soc_dapm_sync(&card->dapm);
-       return 0;
+       return ret;
 }
 EXPORT_SYMBOL_GPL(snd_soc_dapm_put_pin_switch);
 
@@ -4023,7 +4026,7 @@ static int snd_soc_dapm_dai_link_put(struct snd_kcontrol *kcontrol,
 
        rtd->params_select = ucontrol->value.enumerated.item[0];
 
-       return 0;
+       return 1;
 }
 
 static void
index a2ce535..8e030b1 100644 (file)
@@ -1198,6 +1198,13 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval,
                        cval->res = 1;
                }
                break;
+       case USB_ID(0x1224, 0x2a25): /* Jieli Technology USB PHY 2.0 */
+               if (!strcmp(kctl->id.name, "Mic Capture Volume")) {
+                       usb_audio_info(chip,
+                               "set resolution quirk: cval->res = 16\n");
+                       cval->res = 16;
+               }
+               break;
        }
 }
 
index de18fff..2af8c68 100644 (file)
@@ -4012,6 +4012,38 @@ YAMAHA_DEVICE(0x7010, "UB99"),
                }
        }
 },
+{
+       /*
+        * Sennheiser GSP670
+        * Change order of interfaces loaded
+        */
+       USB_DEVICE(0x1395, 0x0300),
+       .bInterfaceClass = USB_CLASS_PER_INTERFACE,
+       .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+               .ifnum = QUIRK_ANY_INTERFACE,
+               .type = QUIRK_COMPOSITE,
+               .data = &(const struct snd_usb_audio_quirk[]) {
+                       // Communication
+                       {
+                               .ifnum = 3,
+                               .type = QUIRK_AUDIO_STANDARD_INTERFACE
+                       },
+                       // Recording
+                       {
+                               .ifnum = 4,
+                               .type = QUIRK_AUDIO_STANDARD_INTERFACE
+                       },
+                       // Main
+                       {
+                               .ifnum = 1,
+                               .type = QUIRK_AUDIO_STANDARD_INTERFACE
+                       },
+                       {
+                               .ifnum = -1
+                       }
+               }
+       }
+},
 
 #undef USB_DEVICE_VENDOR_SPEC
 #undef USB_AUDIO_DEVICE
index 889c855..8929d9a 100644 (file)
@@ -1719,6 +1719,11 @@ void snd_usb_audioformat_attributes_quirk(struct snd_usb_audio *chip,
                 */
                fp->attributes &= ~UAC_EP_CS_ATTR_FILL_MAX;
                break;
+       case USB_ID(0x1224, 0x2a25):  /* Jieli Technology USB PHY 2.0 */
+               /* mic works only when ep packet size is set to wMaxPacketSize */
+               fp->attributes |= UAC_EP_CS_ATTR_FILL_MAX;
+               break;
+
        }
 }
 
@@ -1884,10 +1889,14 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
                   QUIRK_FLAG_GET_SAMPLE_RATE),
        DEVICE_FLG(0x2912, 0x30c8, /* Audioengine D1 */
                   QUIRK_FLAG_GET_SAMPLE_RATE),
+       DEVICE_FLG(0x30be, 0x0101, /* Schiit Hel */
+                  QUIRK_FLAG_IGNORE_CTL_ERROR),
        DEVICE_FLG(0x413c, 0xa506, /* Dell AE515 sound bar */
                   QUIRK_FLAG_GET_SAMPLE_RATE),
        DEVICE_FLG(0x534d, 0x2109, /* MacroSilicon MS2109 */
                   QUIRK_FLAG_ALIGN_TRANSFER),
+       DEVICE_FLG(0x1224, 0x2a25, /* Jieli Technology USB PHY 2.0 */
+                  QUIRK_FLAG_GET_SAMPLE_RATE),
 
        /* Vendor matches */
        VENDOR_FLG(0x045e, /* MS Lifecam */
index b0bf56c..5a5bd74 100755 (executable)
@@ -742,7 +742,7 @@ class DebugfsProvider(Provider):
         The fields are all available KVM debugfs files
 
         """
-        exempt_list = ['halt_poll_fail_ns', 'halt_poll_success_ns']
+        exempt_list = ['halt_poll_fail_ns', 'halt_poll_success_ns', 'halt_wait_ns']
         fields = [field for field in self.walkdir(PATH_DEBUGFS_KVM)[2]
                   if field not in exempt_list]
 
index 21b646d..86ab429 100644 (file)
@@ -43,3 +43,4 @@ CONFIG_NET_ACT_TUNNEL_KEY=m
 CONFIG_NET_ACT_MIRRED=m
 CONFIG_BAREUDP=m
 CONFIG_IPV6_IOAM6_LWTUNNEL=y
+CONFIG_CRYPTO_SM4=y
index 13350cd..8e67a25 100755 (executable)
@@ -289,6 +289,12 @@ set_sysctl()
        run_cmd sysctl -q -w $*
 }
 
+# get sysctl values in NS-A
+get_sysctl()
+{
+       ${NSA_CMD} sysctl -n $*
+}
+
 ################################################################################
 # Setup for tests
 
@@ -1003,6 +1009,60 @@ ipv4_tcp_md5()
        run_cmd nettest -s -I ${NSA_DEV} -M ${MD5_PW} -m ${NS_NET}
        log_test $? 1 "MD5: VRF: Device must be a VRF - prefix"
 
+       test_ipv4_md5_vrf__vrf_server__no_bind_ifindex
+       test_ipv4_md5_vrf__global_server__bind_ifindex0
+}
+
+test_ipv4_md5_vrf__vrf_server__no_bind_ifindex()
+{
+       log_start
+       show_hint "Simulates applications using VRF without TCP_MD5SIG_FLAG_IFINDEX"
+       run_cmd nettest -s -I ${VRF} -M ${MD5_PW} -m ${NS_NET} --no-bind-key-ifindex &
+       sleep 1
+       run_cmd_nsb nettest -r ${NSA_IP} -X ${MD5_PW}
+       log_test $? 0 "MD5: VRF: VRF-bound server, unbound key accepts connection"
+
+       log_start
+       show_hint "Binding both the socket and the key is not required but it works"
+       run_cmd nettest -s -I ${VRF} -M ${MD5_PW} -m ${NS_NET} --force-bind-key-ifindex &
+       sleep 1
+       run_cmd_nsb nettest -r ${NSA_IP} -X ${MD5_PW}
+       log_test $? 0 "MD5: VRF: VRF-bound server, bound key accepts connection"
+}
+
+test_ipv4_md5_vrf__global_server__bind_ifindex0()
+{
+       # This particular test needs tcp_l3mdev_accept=1 for Global server to accept VRF connections
+       local old_tcp_l3mdev_accept
+       old_tcp_l3mdev_accept=$(get_sysctl net.ipv4.tcp_l3mdev_accept)
+       set_sysctl net.ipv4.tcp_l3mdev_accept=1
+
+       log_start
+       run_cmd nettest -s -M ${MD5_PW} -m ${NS_NET} --force-bind-key-ifindex &
+       sleep 1
+       run_cmd_nsb nettest -r ${NSA_IP} -X ${MD5_PW}
+       log_test $? 2 "MD5: VRF: Global server, Key bound to ifindex=0 rejects VRF connection"
+
+       log_start
+       run_cmd nettest -s -M ${MD5_PW} -m ${NS_NET} --force-bind-key-ifindex &
+       sleep 1
+       run_cmd_nsc nettest -r ${NSA_IP} -X ${MD5_PW}
+       log_test $? 0 "MD5: VRF: Global server, key bound to ifindex=0 accepts non-VRF connection"
+       log_start
+
+       run_cmd nettest -s -M ${MD5_PW} -m ${NS_NET} --no-bind-key-ifindex &
+       sleep 1
+       run_cmd_nsb nettest -r ${NSA_IP} -X ${MD5_PW}
+       log_test $? 0 "MD5: VRF: Global server, key not bound to ifindex accepts VRF connection"
+
+       log_start
+       run_cmd nettest -s -M ${MD5_PW} -m ${NS_NET} --no-bind-key-ifindex &
+       sleep 1
+       run_cmd_nsc nettest -r ${NSA_IP} -X ${MD5_PW}
+       log_test $? 0 "MD5: VRF: Global server, key not bound to ifindex accepts non-VRF connection"
+
+       # restore value
+       set_sysctl net.ipv4.tcp_l3mdev_accept="$old_tcp_l3mdev_accept"
 }
 
 ipv4_tcp_novrf()
index d97bd68..72ee644 100644 (file)
@@ -9,6 +9,7 @@ TEST_PROGS = bridge_igmp.sh \
        gre_inner_v4_multipath.sh \
        gre_inner_v6_multipath.sh \
        gre_multipath.sh \
+       ip6_forward_instats_vrf.sh \
        ip6gre_inner_v4_multipath.sh \
        ip6gre_inner_v6_multipath.sh \
        ipip_flat_gre_key.sh \
index b802c14..e5e2fbe 100644 (file)
@@ -39,3 +39,5 @@ NETIF_CREATE=yes
 # Timeout (in seconds) before ping exits regardless of how many packets have
 # been sent or received
 PING_TIMEOUT=5
+# IPv6 traceroute utility name.
+TROUTE6=traceroute6
diff --git a/tools/testing/selftests/net/forwarding/ip6_forward_instats_vrf.sh b/tools/testing/selftests/net/forwarding/ip6_forward_instats_vrf.sh
new file mode 100755 (executable)
index 0000000..9f5b3e2
--- /dev/null
@@ -0,0 +1,172 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# Test ipv6 stats on the incoming if when forwarding with VRF
+
+ALL_TESTS="
+       ipv6_ping
+       ipv6_in_too_big_err
+       ipv6_in_hdr_err
+       ipv6_in_addr_err
+       ipv6_in_discard
+"
+
+NUM_NETIFS=4
+source lib.sh
+
+h1_create()
+{
+       simple_if_init $h1 2001:1:1::2/64
+       ip -6 route add vrf v$h1 2001:1:2::/64 via 2001:1:1::1
+}
+
+h1_destroy()
+{
+       ip -6 route del vrf v$h1 2001:1:2::/64 via 2001:1:1::1
+       simple_if_fini $h1 2001:1:1::2/64
+}
+
+router_create()
+{
+       vrf_create router
+       __simple_if_init $rtr1 router 2001:1:1::1/64
+       __simple_if_init $rtr2 router 2001:1:2::1/64
+       mtu_set $rtr2 1280
+}
+
+router_destroy()
+{
+       mtu_restore $rtr2
+       __simple_if_fini $rtr2 2001:1:2::1/64
+       __simple_if_fini $rtr1 2001:1:1::1/64
+       vrf_destroy router
+}
+
+h2_create()
+{
+       simple_if_init $h2 2001:1:2::2/64
+       ip -6 route add vrf v$h2 2001:1:1::/64 via 2001:1:2::1
+       mtu_set $h2 1280
+}
+
+h2_destroy()
+{
+       mtu_restore $h2
+       ip -6 route del vrf v$h2 2001:1:1::/64 via 2001:1:2::1
+       simple_if_fini $h2 2001:1:2::2/64
+}
+
+setup_prepare()
+{
+       h1=${NETIFS[p1]}
+       rtr1=${NETIFS[p2]}
+
+       rtr2=${NETIFS[p3]}
+       h2=${NETIFS[p4]}
+
+       vrf_prepare
+       h1_create
+       router_create
+       h2_create
+
+       forwarding_enable
+}
+
+cleanup()
+{
+       pre_cleanup
+
+       forwarding_restore
+
+       h2_destroy
+       router_destroy
+       h1_destroy
+       vrf_cleanup
+}
+
+ipv6_in_too_big_err()
+{
+       RET=0
+
+       local t0=$(ipv6_stats_get $rtr1 Ip6InTooBigErrors)
+       local vrf_name=$(master_name_get $h1)
+
+       # Send too big packets
+       ip vrf exec $vrf_name \
+               $PING6 -s 1300 2001:1:2::2 -c 1 -w $PING_TIMEOUT &> /dev/null
+
+       local t1=$(ipv6_stats_get $rtr1 Ip6InTooBigErrors)
+       test "$((t1 - t0))" -ne 0
+       check_err $?
+       log_test "Ip6InTooBigErrors"
+}
+
+ipv6_in_hdr_err()
+{
+       RET=0
+
+       local t0=$(ipv6_stats_get $rtr1 Ip6InHdrErrors)
+       local vrf_name=$(master_name_get $h1)
+
+       # Send packets with hop limit 1, easiest with traceroute6 as some ping6
+       # doesn't allow hop limit to be specified
+       ip vrf exec $vrf_name \
+               $TROUTE6 2001:1:2::2 &> /dev/null
+
+       local t1=$(ipv6_stats_get $rtr1 Ip6InHdrErrors)
+       test "$((t1 - t0))" -ne 0
+       check_err $?
+       log_test "Ip6InHdrErrors"
+}
+
+ipv6_in_addr_err()
+{
+       RET=0
+
+       local t0=$(ipv6_stats_get $rtr1 Ip6InAddrErrors)
+       local vrf_name=$(master_name_get $h1)
+
+       # Disable forwarding temporary while sending the packet
+       sysctl -qw net.ipv6.conf.all.forwarding=0
+       ip vrf exec $vrf_name \
+               $PING6 2001:1:2::2 -c 1 -w $PING_TIMEOUT &> /dev/null
+       sysctl -qw net.ipv6.conf.all.forwarding=1
+
+       local t1=$(ipv6_stats_get $rtr1 Ip6InAddrErrors)
+       test "$((t1 - t0))" -ne 0
+       check_err $?
+       log_test "Ip6InAddrErrors"
+}
+
+ipv6_in_discard()
+{
+       RET=0
+
+       local t0=$(ipv6_stats_get $rtr1 Ip6InDiscards)
+       local vrf_name=$(master_name_get $h1)
+
+       # Add a policy to discard
+       ip xfrm policy add dst 2001:1:2::2/128 dir fwd action block
+       ip vrf exec $vrf_name \
+               $PING6 2001:1:2::2 -c 1 -w $PING_TIMEOUT &> /dev/null
+       ip xfrm policy del dst 2001:1:2::2/128 dir fwd
+
+       local t1=$(ipv6_stats_get $rtr1 Ip6InDiscards)
+       test "$((t1 - t0))" -ne 0
+       check_err $?
+       log_test "Ip6InDiscards"
+}
+ipv6_ping()
+{
+       RET=0
+
+       ping6_test $h1 2001:1:2::2
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+tests_run
+
+exit $EXIT_STATUS
index e7fc5c3..92087d4 100644 (file)
@@ -751,6 +751,14 @@ qdisc_parent_stats_get()
            | jq '.[] | select(.parent == "'"$parent"'") | '"$selector"
 }
 
+ipv6_stats_get()
+{
+       local dev=$1; shift
+       local stat=$1; shift
+
+       cat /proc/net/dev_snmp6/$dev | grep "^$stat" | cut -f2
+}
+
 humanize()
 {
        local speed=$1; shift
index bd62883..b599003 100644 (file)
@@ -28,6 +28,7 @@
 #include <unistd.h>
 #include <time.h>
 #include <errno.h>
+#include <getopt.h>
 
 #include <linux/xfrm.h>
 #include <linux/ipsec.h>
@@ -101,6 +102,8 @@ struct sock_args {
                struct sockaddr_in6 v6;
        } md5_prefix;
        unsigned int prefix_len;
+       /* 0: default, -1: force off, +1: force on */
+       int bind_key_ifindex;
 
        /* expected addresses and device index for connection */
        const char *expected_dev;
@@ -271,11 +274,14 @@ static int tcp_md5sig(int sd, void *addr, socklen_t alen, struct sock_args *args
        }
        memcpy(&md5sig.tcpm_addr, addr, alen);
 
-       if (args->ifindex) {
+       if ((args->ifindex && args->bind_key_ifindex >= 0) || args->bind_key_ifindex >= 1) {
                opt = TCP_MD5SIG_EXT;
                md5sig.tcpm_flags |= TCP_MD5SIG_FLAG_IFINDEX;
 
                md5sig.tcpm_ifindex = args->ifindex;
+               log_msg("TCP_MD5SIG_FLAG_IFINDEX set tcpm_ifindex=%d\n", md5sig.tcpm_ifindex);
+       } else {
+               log_msg("TCP_MD5SIG_FLAG_IFINDEX off\n", md5sig.tcpm_ifindex);
        }
 
        rc = setsockopt(sd, IPPROTO_TCP, opt, &md5sig, sizeof(md5sig));
@@ -1822,6 +1828,14 @@ static int ipc_parent(int cpid, int fd, struct sock_args *args)
 }
 
 #define GETOPT_STR  "sr:l:c:p:t:g:P:DRn:M:X:m:d:I:BN:O:SCi6xL:0:1:2:3:Fbq"
+#define OPT_FORCE_BIND_KEY_IFINDEX 1001
+#define OPT_NO_BIND_KEY_IFINDEX 1002
+
+static struct option long_opts[] = {
+       {"force-bind-key-ifindex", 0, 0, OPT_FORCE_BIND_KEY_IFINDEX},
+       {"no-bind-key-ifindex", 0, 0, OPT_NO_BIND_KEY_IFINDEX},
+       {0, 0, 0, 0}
+};
 
 static void print_usage(char *prog)
 {
@@ -1858,6 +1872,10 @@ static void print_usage(char *prog)
        "    -M password   use MD5 sum protection\n"
        "    -X password   MD5 password for client mode\n"
        "    -m prefix/len prefix and length to use for MD5 key\n"
+       "    --no-bind-key-ifindex: Force TCP_MD5SIG_FLAG_IFINDEX off\n"
+       "    --force-bind-key-ifindex: Force TCP_MD5SIG_FLAG_IFINDEX on\n"
+       "        (default: only if -I is passed)\n"
+       "\n"
        "    -g grp        multicast group (e.g., 239.1.1.1)\n"
        "    -i            interactive mode (default is echo and terminate)\n"
        "\n"
@@ -1893,7 +1911,7 @@ int main(int argc, char *argv[])
         * process input args
         */
 
-       while ((rc = getopt(argc, argv, GETOPT_STR)) != -1) {
+       while ((rc = getopt_long(argc, argv, GETOPT_STR, long_opts, NULL)) != -1) {
                switch (rc) {
                case 'B':
                        both_mode = 1;
@@ -1966,6 +1984,12 @@ int main(int argc, char *argv[])
                case 'M':
                        args.password = optarg;
                        break;
+               case OPT_FORCE_BIND_KEY_IFINDEX:
+                       args.bind_key_ifindex = 1;
+                       break;
+               case OPT_NO_BIND_KEY_IFINDEX:
+                       args.bind_key_ifindex = -1;
+                       break;
                case 'X':
                        args.client_pw = optarg;
                        break;
index 427d948..d4ffebb 100755 (executable)
@@ -199,7 +199,6 @@ fi
 # test basic connectivity
 if ! ip netns exec ns1 ping -c 1 -q 10.0.2.99 > /dev/null; then
   echo "ERROR: ns1 cannot reach ns2" 1>&2
-  bash
   exit 1
 fi
 
index d7e07f4..da1c1e4 100755 (executable)
@@ -741,6 +741,149 @@ EOF
        return $lret
 }
 
+# test port shadowing.
+# create two listening services, one on router (ns0), one
+# on client (ns2), which is masqueraded from ns1 point of view.
+# ns2 sends udp packet coming from service port to ns1, on a highport.
+# Later, if n1 uses same highport to connect to ns0:service, packet
+# might be port-forwarded to ns2 instead.
+
+# second argument tells if we expect the 'fake-entry' to take effect
+# (CLIENT) or not (ROUTER).
+test_port_shadow()
+{
+       local test=$1
+       local expect=$2
+       local daddrc="10.0.1.99"
+       local daddrs="10.0.1.1"
+       local result=""
+       local logmsg=""
+
+       echo ROUTER | ip netns exec "$ns0" nc -w 5 -u -l -p 1405 >/dev/null 2>&1 &
+       nc_r=$!
+
+       echo CLIENT | ip netns exec "$ns2" nc -w 5 -u -l -p 1405 >/dev/null 2>&1 &
+       nc_c=$!
+
+       # make shadow entry, from client (ns2), going to (ns1), port 41404, sport 1405.
+       echo "fake-entry" | ip netns exec "$ns2" nc -w 1 -p 1405 -u "$daddrc" 41404 > /dev/null
+
+       # ns1 tries to connect to ns0:1405.  With default settings this should connect
+       # to client, it matches the conntrack entry created above.
+
+       result=$(echo "" | ip netns exec "$ns1" nc -w 1 -p 41404 -u "$daddrs" 1405)
+
+       if [ "$result" = "$expect" ] ;then
+               echo "PASS: portshadow test $test: got reply from ${expect}${logmsg}"
+       else
+               echo "ERROR: portshadow test $test: got reply from \"$result\", not $expect as intended"
+               ret=1
+       fi
+
+       kill $nc_r $nc_c 2>/dev/null
+
+       # flush udp entries for next test round, if any
+       ip netns exec "$ns0" conntrack -F >/dev/null 2>&1
+}
+
+# This prevents port shadow of router service via packet filter,
+# packets claiming to originate from service port from internal
+# network are dropped.
+test_port_shadow_filter()
+{
+       local family=$1
+
+ip netns exec "$ns0" nft -f /dev/stdin <<EOF
+table $family filter {
+       chain forward {
+               type filter hook forward priority 0; policy accept;
+               meta iif veth1 udp sport 1405 drop
+       }
+}
+EOF
+       test_port_shadow "port-filter" "ROUTER"
+
+       ip netns exec "$ns0" nft delete table $family filter
+}
+
+# This prevents port shadow of router service via notrack.
+test_port_shadow_notrack()
+{
+       local family=$1
+
+ip netns exec "$ns0" nft -f /dev/stdin <<EOF
+table $family raw {
+       chain prerouting {
+               type filter hook prerouting priority -300; policy accept;
+               meta iif veth0 udp dport 1405 notrack
+               udp dport 1405 notrack
+       }
+       chain output {
+               type filter hook output priority -300; policy accept;
+               udp sport 1405 notrack
+       }
+}
+EOF
+       test_port_shadow "port-notrack" "ROUTER"
+
+       ip netns exec "$ns0" nft delete table $family raw
+}
+
+# This prevents port shadow of router service via sport remap.
+test_port_shadow_pat()
+{
+       local family=$1
+
+ip netns exec "$ns0" nft -f /dev/stdin <<EOF
+table $family pat {
+       chain postrouting {
+               type nat hook postrouting priority -1; policy accept;
+               meta iif veth1 udp sport <= 1405 masquerade to : 1406-65535 random
+       }
+}
+EOF
+       test_port_shadow "pat" "ROUTER"
+
+       ip netns exec "$ns0" nft delete table $family pat
+}
+
+test_port_shadowing()
+{
+       local family="ip"
+
+       ip netns exec "$ns0" sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null
+       ip netns exec "$ns0" sysctl net.ipv4.conf.veth1.forwarding=1 > /dev/null
+
+       ip netns exec "$ns0" nft -f /dev/stdin <<EOF
+table $family nat {
+       chain postrouting {
+               type nat hook postrouting priority 0; policy accept;
+               meta oif veth0 masquerade
+       }
+}
+EOF
+       if [ $? -ne 0 ]; then
+               echo "SKIP: Could not add add $family masquerade hook"
+               return $ksft_skip
+       fi
+
+       # test default behaviour. Packet from ns1 to ns0 is redirected to ns2.
+       test_port_shadow "default" "CLIENT"
+
+       # test packet filter based mitigation: prevent forwarding of
+       # packets claiming to come from the service port.
+       test_port_shadow_filter "$family"
+
+       # test conntrack based mitigation: connections going or coming
+       # from router:service bypass connection tracking.
+       test_port_shadow_notrack "$family"
+
+       # test nat based mitigation: fowarded packets coming from service port
+       # are masqueraded with random highport.
+       test_port_shadow_pat "$family"
+
+       ip netns exec "$ns0" nft delete table $family nat
+}
 
 # ip netns exec "$ns0" ping -c 1 -q 10.0.$i.99
 for i in 0 1 2; do
@@ -861,6 +1004,8 @@ reset_counters
 $test_inet_nat && test_redirect inet
 $test_inet_nat && test_redirect6 inet
 
+test_port_shadowing
+
 if [ $ret -ne 0 ];then
        echo -n "FAIL: "
        nft --version
index 10ab56c..60aa1a4 100644 (file)
@@ -414,9 +414,6 @@ static void uffd_test_ctx_init_ext(uint64_t *features)
        uffd_test_ops->allocate_area((void **)&area_src);
        uffd_test_ops->allocate_area((void **)&area_dst);
 
-       uffd_test_ops->release_pages(area_src);
-       uffd_test_ops->release_pages(area_dst);
-
        userfaultfd_open(features);
 
        count_verify = malloc(nr_pages * sizeof(unsigned long long));
@@ -437,6 +434,26 @@ static void uffd_test_ctx_init_ext(uint64_t *features)
                *(area_count(area_src, nr) + 1) = 1;
        }
 
+       /*
+        * After initialization of area_src, we must explicitly release pages
+        * for area_dst to make sure it's fully empty.  Otherwise we could have
+        * some area_dst pages be errornously initialized with zero pages,
+        * hence we could hit memory corruption later in the test.
+        *
+        * One example is when THP is globally enabled, above allocate_area()
+        * calls could have the two areas merged into a single VMA (as they
+        * will have the same VMA flags so they're mergeable).  When we
+        * initialize the area_src above, it's possible that some part of
+        * area_dst could have been faulted in via one huge THP that will be
+        * shared between area_src and area_dst.  It could cause some of the
+        * area_dst won't be trapped by missing userfaults.
+        *
+        * This release_pages() will guarantee even if that happened, we'll
+        * proactively split the thp and drop any accidentally initialized
+        * pages within area_dst.
+        */
+       uffd_test_ops->release_pages(area_dst);
+
        pipefd = malloc(sizeof(int) * nr_cpus * 2);
        if (!pipefd)
                err("pipefd");
index cec6f5a..fa927ad 100644 (file)
@@ -332,8 +332,6 @@ static void test_no_sockets(const struct test_opts *opts)
        read_vsock_stat(&sockets);
 
        check_no_sockets(&sockets);
-
-       free_sock_stat(&sockets);
 }
 
 static void test_listen_socket_server(const struct test_opts *opts)