Merge tag 'iommu-fixes-v5.8-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git...
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 26 Jun 2020 19:30:07 +0000 (12:30 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 26 Jun 2020 19:30:07 +0000 (12:30 -0700)
Pull iommu fixes from Joerg Roedel:
 "A couple of Intel VT-d fixes:

   - Make Intel SVM code 64bit only. The code uses pgd_t* and the IOMMU
     only supports long-mode page-table formats, so its broken on 32bit
     anyway.

   - Make sure GFX quirks in for Intel VT-d are not applied to untrusted
     devices. Those devices might gain full memory access otherwise.

   - Identity mapping setup fix.

   - Fix ACS enabling when Intel IOMMU is off and untrusted devices are
     detected.

   - Two smaller fixes for coherency and IO page-table setup"

* tag 'iommu-fixes-v5.8-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu:
  iommu/vt-d: Fix misuse of iommu_domain_identity_map()
  iommu/vt-d: Update scalable mode paging structure coherency
  iommu/vt-d: Enable PCI ACS for platform opt in hint
  iommu/vt-d: Don't apply gfx quirks to untrusted devices
  iommu/vt-d: Set U/S bit in first level page table by default
  iommu/vt-d: Make Intel SVM code 64-bit only

392 files changed:
Documentation/ABI/testing/dev-kmsg
Documentation/admin-guide/cgroup-v2.rst
Documentation/admin-guide/mm/transhuge.rst
Documentation/bpf/prog_cgroup_sockopt.rst
Documentation/core-api/pin_user_pages.rst
Documentation/devicetree/bindings/spi/amlogic,meson-gx-spicc.yaml
Documentation/networking/ieee802154.rst
MAINTAINERS
arch/arm64/kernel/probes/kprobes.c
arch/mips/kvm/mips.c
arch/openrisc/kernel/dma.c
arch/s390/kernel/debug.c
arch/s390/kernel/early.c
arch/s390/kernel/entry.S
arch/x86/hyperv/hv_init.c
arch/x86/include/asm/kvm_host.h
arch/x86/include/asm/mwait.h
arch/x86/include/asm/pgtable_types.h
arch/x86/kernel/cpu/umwait.c
arch/x86/kvm/lapic.c
arch/x86/kvm/mmu.h
arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/mmu/paging_tmpl.h
arch/x86/kvm/svm/svm.c
arch/x86/kvm/vmx/vmcs.h
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/vmx/vmx.h
arch/x86/kvm/x86.c
drivers/base/regmap/regmap.c
drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
drivers/gpu/drm/amd/amdkfd/kfd_process.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
drivers/gpu/drm/drm_fb_helper.c
drivers/gpu/drm/drm_panel_orientation_quirks.c
drivers/gpu/drm/mcde/mcde_display.c
drivers/gpu/drm/mcde/mcde_drv.c
drivers/gpu/drm/panel/panel-simple.c
drivers/gpu/drm/radeon/ni_dpm.c
drivers/gpu/drm/rcar-du/Kconfig
drivers/gpu/drm/sun4i/sun8i_mixer.c
drivers/gpu/drm/tegra/dc.c
drivers/gpu/drm/tegra/hub.c
drivers/gpu/drm/ttm/ttm_bo.c
drivers/gpu/drm/ttm/ttm_bo_vm.c
drivers/gpu/host1x/bus.c
drivers/gpu/host1x/dev.c
drivers/infiniband/core/cm.c
drivers/infiniband/core/cma.c
drivers/infiniband/core/counters.c
drivers/infiniband/core/mad.c
drivers/infiniband/core/rdma_core.c
drivers/infiniband/hw/efa/efa_verbs.c
drivers/infiniband/hw/hfi1/debugfs.c
drivers/infiniband/hw/hfi1/iowait.h
drivers/infiniband/hw/hfi1/ipoib.h
drivers/infiniband/hw/hfi1/ipoib_tx.c
drivers/infiniband/hw/hfi1/netdev_rx.c
drivers/infiniband/hw/hfi1/verbs_txreq.h
drivers/infiniband/hw/hns/hns_roce_device.h
drivers/infiniband/hw/hns/hns_roce_hw_v1.c
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
drivers/infiniband/hw/hns/hns_roce_mr.c
drivers/infiniband/hw/mlx5/qp.c
drivers/infiniband/hw/mlx5/qpc.c
drivers/infiniband/hw/qedr/qedr_iw_cm.c
drivers/infiniband/sw/rdmavt/qp.c
drivers/infiniband/sw/siw/siw_qp_rx.c
drivers/media/platform/omap3isp/isp.c
drivers/media/platform/omap3isp/ispvideo.c
drivers/net/bareudp.c
drivers/net/dsa/bcm_sf2.c
drivers/net/dsa/sja1105/sja1105_vl.c
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/bnxt/bnxt.h
drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
drivers/net/ethernet/broadcom/genet/bcmgenet.c
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/cadence/macb_main.c
drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h
drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h
drivers/net/ethernet/chelsio/cxgb4/l2t.c
drivers/net/ethernet/chelsio/cxgb4/sched.c
drivers/net/ethernet/chelsio/cxgb4/sge.c
drivers/net/ethernet/chelsio/cxgb4/smt.c
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
drivers/net/ethernet/freescale/enetc/enetc.c
drivers/net/ethernet/freescale/enetc/enetc_hw.h
drivers/net/ethernet/freescale/enetc/enetc_pf.c
drivers/net/ethernet/hisilicon/hns/hns_enet.c
drivers/net/ethernet/ibm/ibmveth.c
drivers/net/ethernet/ibm/ibmvnic.c
drivers/net/ethernet/intel/i40e/i40e_ethtool.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/ice/ice_lib.c
drivers/net/ethernet/intel/ice/ice_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/marvell/mvneta.c
drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
drivers/net/ethernet/mellanox/mlxsw/spectrum.h
drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
drivers/net/ethernet/neterion/vxge/vxge-config.h
drivers/net/ethernet/netronome/nfp/flower/main.c
drivers/net/ethernet/netronome/nfp/flower/main.h
drivers/net/ethernet/netronome/nfp/flower/offload.c
drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
drivers/net/ethernet/pensando/ionic/ionic_lif.c
drivers/net/ethernet/qlogic/qed/qed_cxt.c
drivers/net/ethernet/qlogic/qed/qed_debug.c
drivers/net/ethernet/qlogic/qed/qed_dev.c
drivers/net/ethernet/qlogic/qed/qed_iwarp.c
drivers/net/ethernet/qlogic/qed/qed_roce.c
drivers/net/ethernet/qlogic/qed/qed_vf.c
drivers/net/ethernet/qlogic/qede/qede_main.c
drivers/net/ethernet/qlogic/qede/qede_ptp.c
drivers/net/ethernet/qlogic/qede/qede_ptp.h
drivers/net/ethernet/qlogic/qede/qede_rdma.c
drivers/net/ethernet/realtek/r8169_main.c
drivers/net/ethernet/socionext/netsec.c
drivers/net/geneve.c
drivers/net/phy/Kconfig
drivers/net/phy/mscc/mscc_macsec.c
drivers/net/phy/phy.c
drivers/net/phy/phy_device.c
drivers/net/phy/phylink.c
drivers/net/phy/smsc.c
drivers/net/usb/ax88179_178a.c
drivers/net/usb/smsc95xx.c
drivers/net/vxlan.c
drivers/net/wireguard/device.c
drivers/net/wireguard/device.h
drivers/net/wireguard/netlink.c
drivers/net/wireguard/noise.c
drivers/net/wireguard/receive.c
drivers/net/wireguard/socket.c
drivers/net/wireless/ath/wil6210/txrx.c
drivers/of/of_mdio.c
drivers/regulator/Kconfig
drivers/regulator/da9063-regulator.c
drivers/regulator/helpers.c
drivers/regulator/pfuze100-regulator.c
drivers/s390/net/qeth_core_main.c
drivers/spi/spi-fsl-dspi.c
drivers/spi/spi-rspi.c
drivers/spi/spi-sprd-adi.c
drivers/spi/spi-stm32-qspi.c
drivers/spi/spidev.c
drivers/vdpa/vdpa.c
drivers/vhost/test.c
drivers/vhost/test.h
drivers/vhost/vdpa.c
drivers/video/fbdev/core/fbcon.c
drivers/video/fbdev/uvesafb.c
drivers/virtio/virtio_mem.c
fs/btrfs/block-group.c
fs/btrfs/ctree.h
fs/btrfs/file.c
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/btrfs/tree-log.c
fs/erofs/zdata.h
fs/file_table.c
fs/ocfs2/dlmglue.c
fs/ocfs2/ocfs2.h
fs/ocfs2/ocfs2_fs.h
fs/ocfs2/suballoc.c
include/asm-generic/cacheflush.h
include/linux/host1x.h
include/linux/mlx5/mlx5_ifc.h
include/linux/mmzone.h
include/linux/netdevice.h
include/linux/netfilter_ipv4/ip_tables.h
include/linux/netfilter_ipv6/ip6_tables.h
include/linux/phy.h
include/linux/qed/qed_chain.h
include/linux/swap.h
include/linux/vmalloc.h
include/net/flow_offload.h
include/net/gue.h
include/net/sctp/constants.h
include/net/sock.h
include/net/xfrm.h
include/sound/dmaengine_pcm.h
include/sound/soc.h
include/trace/events/rxrpc.h
include/uapi/linux/bpf.h
include/uapi/linux/fb.h
include/uapi/linux/mrp_bridge.h
include/uapi/linux/rds.h
include/uapi/linux/spi/spidev.h
kernel/bpf/cgroup.c
kernel/bpf/devmap.c
kernel/kexec_file.c
kernel/module.c
kernel/nsproxy.c
kernel/printk/printk.c
kernel/trace/bpf_trace.c
kernel/trace/ring_buffer.c
kernel/trace/trace_boot.c
kernel/trace/trace_events_trigger.c
lib/test_hmm.c
mm/compaction.c
mm/debug_vm_pgtable.c
mm/memcontrol.c
mm/memory.c
mm/memory_hotplug.c
mm/nommu.c
mm/slab.h
mm/slab_common.c
mm/slub.c
mm/swap.c
mm/swap_state.c
mm/vmalloc.c
mm/vmscan.c
mm/workingset.c
net/9p/mod.c
net/bridge/br_mrp.c
net/bridge/br_private.h
net/bridge/netfilter/nft_meta_bridge.c
net/bridge/netfilter/nft_reject_bridge.c
net/core/dev.c
net/core/drop_monitor.c
net/core/flow_offload.c
net/core/sock.c
net/core/xdp.c
net/dsa/tag_edsa.c
net/ethtool/cabletest.c
net/ethtool/common.c
net/ethtool/ioctl.c
net/ethtool/linkstate.c
net/hsr/hsr_device.c
net/hsr/hsr_device.h
net/hsr/hsr_main.c
net/hsr/hsr_netlink.c
net/ipv4/Kconfig
net/ipv4/esp4_offload.c
net/ipv4/fib_semantics.c
net/ipv4/fou.c
net/ipv4/ip_tunnel.c
net/ipv4/netfilter/ip_tables.c
net/ipv4/netfilter/ipt_SYNPROXY.c
net/ipv4/netfilter/iptable_filter.c
net/ipv4/netfilter/iptable_mangle.c
net/ipv4/netfilter/iptable_nat.c
net/ipv4/netfilter/iptable_raw.c
net/ipv4/netfilter/iptable_security.c
net/ipv4/netfilter/nf_flow_table_ipv4.c
net/ipv4/netfilter/nft_dup_ipv4.c
net/ipv4/netfilter/nft_fib_ipv4.c
net/ipv4/netfilter/nft_reject_ipv4.c
net/ipv4/tcp_cubic.c
net/ipv4/tcp_input.c
net/ipv6/Kconfig
net/ipv6/esp6_offload.c
net/ipv6/fou6.c
net/ipv6/ila/ila_main.c
net/ipv6/ip6_gre.c
net/ipv6/netfilter/ip6_tables.c
net/ipv6/netfilter/ip6t_SYNPROXY.c
net/ipv6/netfilter/ip6table_filter.c
net/ipv6/netfilter/ip6table_mangle.c
net/ipv6/netfilter/ip6table_nat.c
net/ipv6/netfilter/ip6table_raw.c
net/ipv6/netfilter/ip6table_security.c
net/ipv6/netfilter/nf_flow_table_ipv6.c
net/ipv6/netfilter/nft_dup_ipv6.c
net/ipv6/netfilter/nft_fib_ipv6.c
net/ipv6/netfilter/nft_reject_ipv6.c
net/mptcp/options.c
net/mptcp/protocol.h
net/mptcp/subflow.c
net/netfilter/ipset/ip_set_core.c
net/netfilter/nf_dup_netdev.c
net/netfilter/nf_flow_table_core.c
net/netfilter/nf_flow_table_inet.c
net/netfilter/nf_flow_table_offload.c
net/netfilter/nf_synproxy_core.c
net/netfilter/nf_tables_offload.c
net/netfilter/nfnetlink.c
net/netfilter/nft_compat.c
net/netfilter/nft_connlimit.c
net/netfilter/nft_counter.c
net/netfilter/nft_ct.c
net/netfilter/nft_dup_netdev.c
net/netfilter/nft_fib_inet.c
net/netfilter/nft_fib_netdev.c
net/netfilter/nft_flow_offload.c
net/netfilter/nft_hash.c
net/netfilter/nft_limit.c
net/netfilter/nft_log.c
net/netfilter/nft_masq.c
net/netfilter/nft_nat.c
net/netfilter/nft_numgen.c
net/netfilter/nft_objref.c
net/netfilter/nft_osf.c
net/netfilter/nft_queue.c
net/netfilter/nft_quota.c
net/netfilter/nft_redir.c
net/netfilter/nft_reject.c
net/netfilter/nft_reject_inet.c
net/netfilter/nft_synproxy.c
net/netfilter/nft_tunnel.c
net/netfilter/xt_nat.c
net/openvswitch/actions.c
net/rds/transport.c
net/rxrpc/call_accept.c
net/rxrpc/call_event.c
net/rxrpc/input.c
net/sched/act_gate.c
net/sched/cls_api.c
net/sched/sch_cake.c
net/sched/sch_fq.c
net/sched/sch_fq_codel.c
net/sched/sch_hhf.c
net/sctp/associola.c
net/sctp/bind_addr.c
net/sctp/protocol.c
net/xfrm/Kconfig
net/xfrm/xfrm_device.c
net/xfrm/xfrm_output.c
samples/bpf/xdp_monitor_user.c
samples/bpf/xdp_redirect_cpu_user.c
samples/bpf/xdp_rxq_info_user.c
sound/hda/intel-dsp-config.c
sound/pci/hda/hda_intel.c
sound/pci/hda/patch_hdmi.c
sound/pci/hda/patch_realtek.c
sound/soc/amd/raven/acp3x-pcm-dma.c
sound/soc/codecs/hdac_hda.c
sound/soc/codecs/max98390.c
sound/soc/codecs/rt1015.c
sound/soc/codecs/rt1015.h
sound/soc/codecs/rt5682.c
sound/soc/fsl/fsl_asrc_common.h
sound/soc/fsl/fsl_asrc_dma.c
sound/soc/fsl/fsl_ssi.c
sound/soc/intel/boards/Kconfig
sound/soc/qcom/common.c
sound/soc/qcom/qdsp6/q6afe.c
sound/soc/qcom/qdsp6/q6afe.h
sound/soc/qcom/qdsp6/q6asm.c
sound/soc/rockchip/rockchip_pdm.c
sound/soc/soc-core.c
sound/soc/soc-devres.c
sound/soc/soc-generic-dmaengine-pcm.c
sound/soc/soc-pcm.c
sound/soc/soc-topology.c
sound/soc/sof/intel/Kconfig
sound/soc/sof/intel/hda-stream.c
sound/soc/sof/sof-pci-dev.c
sound/usb/format.c
sound/usb/mixer.c
sound/usb/mixer.h
sound/usb/mixer_quirks.c
sound/usb/pcm.c
sound/usb/quirks.c
tools/bpf/bpftool/Documentation/bpftool-map.rst
tools/bpf/bpftool/map.c
tools/include/uapi/linux/bpf.h
tools/spi/spidev_test.c
tools/testing/selftests/bpf/prog_tests/sockopt_sk.c
tools/testing/selftests/bpf/progs/bpf_cubic.c
tools/testing/selftests/bpf/progs/sockopt_sk.c
tools/testing/selftests/net/so_txtime.c
tools/testing/selftests/netfilter/Makefile
tools/testing/selftests/netfilter/nft_conntrack_helper.sh [new file with mode: 0755]
tools/testing/selftests/pidfd/pidfd.h
tools/testing/selftests/pidfd/pidfd_getfd_test.c
tools/testing/selftests/pidfd/pidfd_setns_test.c
tools/testing/selftests/tc-testing/tc-tests/actions/bpf.json
tools/testing/selftests/tc-testing/tc-tests/actions/csum.json
tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json
tools/testing/selftests/wireguard/netns.sh
tools/virtio/linux/kernel.h
tools/virtio/linux/virtio.h
tools/virtio/virtio_test.c
tools/virtio/vringh_test.c

index 1e6c28b..f307506 100644 (file)
@@ -56,11 +56,6 @@ Description: The /dev/kmsg character device node provides userspace access
                  seek after the last record available at the time
                  the last SYSLOG_ACTION_CLEAR was issued.
 
-               Due to the record nature of this interface with a "read all"
-               behavior and the specific positions each seek operation sets,
-               SEEK_CUR is not supported, returning -ESPIPE (invalid seek) to
-               errno whenever requested.
-
                The output format consists of a prefix carrying the syslog
                prefix including priority and facility, the 64 bit message
                sequence number and the monotonic timestamp in microseconds,
index ce3e05e..d09471a 100644 (file)
@@ -1356,8 +1356,8 @@ PAGE_SIZE multiple when read back.
 
          thp_fault_alloc
                Number of transparent hugepages which were allocated to satisfy
-               a page fault, including COW faults. This counter is not present
-               when CONFIG_TRANSPARENT_HUGEPAGE is not set.
+               a page fault. This counter is not present when CONFIG_TRANSPARENT_HUGEPAGE
+                is not set.
 
          thp_collapse_alloc
                Number of transparent hugepages which were allocated to allow
index 6a233e4..b2acd0d 100644 (file)
@@ -305,8 +305,7 @@ monitor how successfully the system is providing huge pages for use.
 
 thp_fault_alloc
        is incremented every time a huge page is successfully
-       allocated to handle a page fault. This applies to both the
-       first time a page is faulted and for COW faults.
+       allocated to handle a page fault.
 
 thp_collapse_alloc
        is incremented by khugepaged when it has found
index c47d974..172f957 100644 (file)
@@ -86,6 +86,20 @@ then the next program in the chain (A) will see those changes,
 *not* the original input ``setsockopt`` arguments. The potentially
 modified values will be then passed down to the kernel.
 
+Large optval
+============
+When the ``optval`` is greater than the ``PAGE_SIZE``, the BPF program
+can access only the first ``PAGE_SIZE`` of that data. So it has to options:
+
+* Set ``optlen`` to zero, which indicates that the kernel should
+  use the original buffer from the userspace. Any modifications
+  done by the BPF program to the ``optval`` are ignored.
+* Set ``optlen`` to the value less than ``PAGE_SIZE``, which
+  indicates that the kernel should use BPF's trimmed ``optval``.
+
+When the BPF program returns with the ``optlen`` greater than
+``PAGE_SIZE``, the userspace will receive ``EFAULT`` errno.
+
 Example
 =======
 
index 6068266..7ca8c7b 100644 (file)
@@ -33,7 +33,7 @@ all combinations of get*(), pin*(), FOLL_LONGTERM, and more. Also, the
 pin_user_pages*() APIs are clearly distinct from the get_user_pages*() APIs, so
 that's a natural dividing line, and a good point to make separate wrapper calls.
 In other words, use pin_user_pages*() for DMA-pinned pages, and
-get_user_pages*() for other cases. There are four cases described later on in
+get_user_pages*() for other cases. There are five cases described later on in
 this document, to further clarify that concept.
 
 FOLL_PIN and FOLL_GET are mutually exclusive for a given gup call. However,
index 9147df2..38efb50 100644 (file)
@@ -34,12 +34,15 @@ properties:
     maxItems: 1
 
   clocks:
-    maxItems: 1
+    minItems: 1
+    maxItems: 2
+    items:
+      - description: controller register bus clock
+      - description: baud rate generator and delay control clock
 
   clock-names:
-    description: input clock for the baud rate generator
-    items:
-      - const: core
+    minItems: 1
+    maxItems: 2
 
 if:
   properties:
@@ -51,17 +54,22 @@ if:
 then:
   properties:
     clocks:
-      contains:
-        items:
-          - description: controller register bus clock
-          - description: baud rate generator and delay control clock
+      minItems: 2
 
     clock-names:
-      minItems: 2
       items:
         - const: core
         - const: pclk
 
+else:
+  properties:
+    clocks:
+      maxItems: 1
+
+    clock-names:
+      items:
+        - const: core
+
 required:
   - compatible
   - reg
index 36ca823..6f4bf84 100644 (file)
@@ -30,8 +30,8 @@ Socket API
 
 The address family, socket addresses etc. are defined in the
 include/net/af_ieee802154.h header or in the special header
-in the userspace package (see either http://wpan.cakelab.org/ or the
-git tree at https://github.com/linux-wpan/wpan-tools).
+in the userspace package (see either https://linux-wpan.org/wpan-tools.html
+or the git tree at https://github.com/linux-wpan/wpan-tools).
 
 6LoWPAN Linux implementation
 ============================
index 7b5ffd6..28bd471 100644 (file)
@@ -8333,7 +8333,7 @@ M:        Alexander Aring <alex.aring@gmail.com>
 M:     Stefan Schmidt <stefan@datenfreihafen.org>
 L:     linux-wpan@vger.kernel.org
 S:     Maintained
-W:     http://wpan.cakelab.org/
+W:     https://linux-wpan.org/
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/sschmidt/wpan.git
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/sschmidt/wpan-next.git
 F:     Documentation/networking/ieee802154.rst
@@ -10808,7 +10808,7 @@ F:      Documentation/devicetree/bindings/dma/mtk-*
 F:     drivers/dma/mediatek/
 
 MEDIATEK ETHERNET DRIVER
-M:     Felix Fietkau <nbd@openwrt.org>
+M:     Felix Fietkau <nbd@nbd.name>
 M:     John Crispin <john@phrozen.org>
 M:     Sean Wang <sean.wang@mediatek.com>
 M:     Mark Lee <Mark-MC.Lee@mediatek.com>
@@ -16058,8 +16058,10 @@ SPARSE CHECKER
 M:     "Luc Van Oostenryck" <luc.vanoostenryck@gmail.com>
 L:     linux-sparse@vger.kernel.org
 S:     Maintained
-W:     https://sparse.wiki.kernel.org/
+W:     https://sparse.docs.kernel.org/
 T:     git git://git.kernel.org/pub/scm/devel/sparse/sparse.git
+Q:     https://patchwork.kernel.org/project/linux-sparse/list/
+B:     https://bugzilla.kernel.org/enter_bug.cgi?component=Sparse&product=Tools
 F:     include/linux/compiler.h
 
 SPEAR CLOCK FRAMEWORK SUPPORT
index d1c95dc..cbe49cd 100644 (file)
@@ -120,15 +120,9 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
 
 void *alloc_insn_page(void)
 {
-       void *page;
-
-       page = vmalloc_exec(PAGE_SIZE);
-       if (page) {
-               set_memory_ro((unsigned long)page, 1);
-               set_vm_flush_reset_perms(page);
-       }
-
-       return page;
+       return __vmalloc_node_range(PAGE_SIZE, 1, VMALLOC_START, VMALLOC_END,
+                       GFP_KERNEL, PAGE_KERNEL_ROX, VM_FLUSH_RESET_PERMS,
+                       NUMA_NO_NODE, __func__);
 }
 
 /* arm kprobe: install breakpoint in text */
index 521bd58..666d335 100644 (file)
@@ -67,8 +67,10 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
        VCPU_STAT("vz_ghfc", vz_ghfc_exits),
        VCPU_STAT("vz_gpa", vz_gpa_exits),
        VCPU_STAT("vz_resvd", vz_resvd_exits),
+#ifdef CONFIG_CPU_LOONGSON64
        VCPU_STAT("vz_cpucfg", vz_cpucfg_exits),
 #endif
+#endif
        VCPU_STAT("halt_successful_poll", halt_successful_poll),
        VCPU_STAT("halt_attempted_poll", halt_attempted_poll),
        VCPU_STAT("halt_poll_invalid", halt_poll_invalid),
index c152a68..3457276 100644 (file)
@@ -74,8 +74,11 @@ void *arch_dma_set_uncached(void *cpu_addr, size_t size)
         * We need to iterate through the pages, clearing the dcache for
         * them and setting the cache-inhibit bit.
         */
+       mmap_read_lock(&init_mm);
        error = walk_page_range(&init_mm, va, va + size, &set_nocache_walk_ops,
                        NULL);
+       mmap_read_unlock(&init_mm);
+
        if (error)
                return ERR_PTR(error);
        return cpu_addr;
@@ -85,9 +88,11 @@ void arch_dma_clear_uncached(void *cpu_addr, size_t size)
 {
        unsigned long va = (unsigned long)cpu_addr;
 
+       mmap_read_lock(&init_mm);
        /* walk_page_range shouldn't be able to fail here */
        WARN_ON(walk_page_range(&init_mm, va, va + size,
                        &clear_nocache_walk_ops, NULL));
+       mmap_read_unlock(&init_mm);
 }
 
 void arch_sync_dma_for_device(phys_addr_t addr, size_t size,
index 6364460..263075a 100644 (file)
@@ -198,9 +198,10 @@ static debug_entry_t ***debug_areas_alloc(int pages_per_area, int nr_areas)
        if (!areas)
                goto fail_malloc_areas;
        for (i = 0; i < nr_areas; i++) {
+               /* GFP_NOWARN to avoid user triggerable WARN, we handle fails */
                areas[i] = kmalloc_array(pages_per_area,
                                         sizeof(debug_entry_t *),
-                                        GFP_KERNEL);
+                                        GFP_KERNEL | __GFP_NOWARN);
                if (!areas[i])
                        goto fail_malloc_areas2;
                for (j = 0; j < pages_per_area; j++) {
index cd241ee..0782772 100644 (file)
@@ -170,6 +170,8 @@ static noinline __init void setup_lowcore_early(void)
        psw_t psw;
 
        psw.mask = PSW_MASK_BASE | PSW_DEFAULT_KEY | PSW_MASK_EA | PSW_MASK_BA;
+       if (IS_ENABLED(CONFIG_KASAN))
+               psw.mask |= PSW_MASK_DAT;
        psw.addr = (unsigned long) s390_base_ext_handler;
        S390_lowcore.external_new_psw = psw;
        psw.addr = (unsigned long) s390_base_pgm_handler;
index 496f74d..969b35b 100644 (file)
@@ -378,9 +378,9 @@ ENTRY(system_call)
        stmg    %r8,%r15,__LC_SAVE_AREA_SYNC
        BPOFF
        lg      %r12,__LC_CURRENT
-       lghi    %r13,__TASK_thread
        lghi    %r14,_PIF_SYSCALL
 .Lsysc_per:
+       lghi    %r13,__TASK_thread
        lg      %r15,__LC_KERNEL_STACK
        la      %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs
        UPDATE_VTIME %r8,%r9,__LC_SYNC_ENTER_TIMER
index a54c6a4..2bdc72e 100644 (file)
@@ -375,7 +375,9 @@ void __init hyperv_init(void)
        guest_id = generate_guest_id(0, LINUX_VERSION_CODE, 0);
        wrmsrl(HV_X64_MSR_GUEST_OS_ID, guest_id);
 
-       hv_hypercall_pg = vmalloc_exec(PAGE_SIZE);
+       hv_hypercall_pg = __vmalloc_node_range(PAGE_SIZE, 1, VMALLOC_START,
+                       VMALLOC_END, GFP_KERNEL, PAGE_KERNEL_ROX,
+                       VM_FLUSH_RESET_PERMS, NUMA_NO_NODE, __func__);
        if (hv_hypercall_pg == NULL) {
                wrmsrl(HV_X64_MSR_GUEST_OS_ID, 0);
                goto remove_cpuhp_state;
index f8998e9..be5363b 100644 (file)
@@ -943,7 +943,7 @@ struct kvm_arch {
        atomic_t vapics_in_nmi_mode;
        struct mutex apic_map_lock;
        struct kvm_apic_map *apic_map;
-       bool apic_map_dirty;
+       atomic_t apic_map_dirty;
 
        bool apic_access_page_done;
        unsigned long apicv_inhibit_reasons;
@@ -1220,7 +1220,7 @@ struct kvm_x86_ops {
        void (*enable_log_dirty_pt_masked)(struct kvm *kvm,
                                           struct kvm_memory_slot *slot,
                                           gfn_t offset, unsigned long mask);
-       int (*write_log_dirty)(struct kvm_vcpu *vcpu);
+       int (*write_log_dirty)(struct kvm_vcpu *vcpu, gpa_t l2_gpa);
 
        /* pmu operations of sub-arch */
        const struct kvm_pmu_ops *pmu_ops;
index 73d997a..e039a93 100644 (file)
@@ -25,8 +25,6 @@
 #define TPAUSE_C01_STATE               1
 #define TPAUSE_C02_STATE               0
 
-u32 get_umwait_control_msr(void);
-
 static inline void __monitor(const void *eax, unsigned long ecx,
                             unsigned long edx)
 {
index 2da1f95..816b31c 100644 (file)
@@ -194,6 +194,7 @@ enum page_cache_mode {
 #define _PAGE_TABLE_NOENC       (__PP|__RW|_USR|___A|   0|___D|   0|   0)
 #define _PAGE_TABLE             (__PP|__RW|_USR|___A|   0|___D|   0|   0| _ENC)
 #define __PAGE_KERNEL_RO        (__PP|   0|   0|___A|__NX|___D|   0|___G)
+#define __PAGE_KERNEL_ROX       (__PP|   0|   0|___A|   0|___D|   0|___G)
 #define __PAGE_KERNEL_NOCACHE   (__PP|__RW|   0|___A|__NX|___D|   0|___G| __NC)
 #define __PAGE_KERNEL_VVAR      (__PP|   0|_USR|___A|__NX|___D|   0|___G)
 #define __PAGE_KERNEL_LARGE     (__PP|__RW|   0|___A|__NX|___D|_PSE|___G)
@@ -219,6 +220,7 @@ enum page_cache_mode {
 #define PAGE_KERNEL_RO         __pgprot_mask(__PAGE_KERNEL_RO         | _ENC)
 #define PAGE_KERNEL_EXEC       __pgprot_mask(__PAGE_KERNEL_EXEC       | _ENC)
 #define PAGE_KERNEL_EXEC_NOENC __pgprot_mask(__PAGE_KERNEL_EXEC       |    0)
+#define PAGE_KERNEL_ROX                __pgprot_mask(__PAGE_KERNEL_ROX        | _ENC)
 #define PAGE_KERNEL_NOCACHE    __pgprot_mask(__PAGE_KERNEL_NOCACHE    | _ENC)
 #define PAGE_KERNEL_LARGE      __pgprot_mask(__PAGE_KERNEL_LARGE      | _ENC)
 #define PAGE_KERNEL_LARGE_EXEC __pgprot_mask(__PAGE_KERNEL_LARGE_EXEC | _ENC)
index 300e3fd..ec8064c 100644 (file)
  */
 static u32 umwait_control_cached = UMWAIT_CTRL_VAL(100000, UMWAIT_C02_ENABLE);
 
-u32 get_umwait_control_msr(void)
-{
-       return umwait_control_cached;
-}
-EXPORT_SYMBOL_GPL(get_umwait_control_msr);
-
 /*
  * Cache the original IA32_UMWAIT_CONTROL MSR value which is configured by
  * hardware or BIOS before kernel boot.
index 34a7e05..5bf72fc 100644 (file)
@@ -169,6 +169,18 @@ static void kvm_apic_map_free(struct rcu_head *rcu)
        kvfree(map);
 }
 
+/*
+ * CLEAN -> DIRTY and UPDATE_IN_PROGRESS -> DIRTY changes happen without a lock.
+ *
+ * DIRTY -> UPDATE_IN_PROGRESS and UPDATE_IN_PROGRESS -> CLEAN happen with
+ * apic_map_lock_held.
+ */
+enum {
+       CLEAN,
+       UPDATE_IN_PROGRESS,
+       DIRTY
+};
+
 void kvm_recalculate_apic_map(struct kvm *kvm)
 {
        struct kvm_apic_map *new, *old = NULL;
@@ -176,17 +188,17 @@ void kvm_recalculate_apic_map(struct kvm *kvm)
        int i;
        u32 max_id = 255; /* enough space for any xAPIC ID */
 
-       if (!kvm->arch.apic_map_dirty) {
-               /*
-                * Read kvm->arch.apic_map_dirty before
-                * kvm->arch.apic_map
-                */
-               smp_rmb();
+       /* Read kvm->arch.apic_map_dirty before kvm->arch.apic_map.  */
+       if (atomic_read_acquire(&kvm->arch.apic_map_dirty) == CLEAN)
                return;
-       }
 
        mutex_lock(&kvm->arch.apic_map_lock);
-       if (!kvm->arch.apic_map_dirty) {
+       /*
+        * Read kvm->arch.apic_map_dirty before kvm->arch.apic_map
+        * (if clean) or the APIC registers (if dirty).
+        */
+       if (atomic_cmpxchg_acquire(&kvm->arch.apic_map_dirty,
+                                  DIRTY, UPDATE_IN_PROGRESS) == CLEAN) {
                /* Someone else has updated the map. */
                mutex_unlock(&kvm->arch.apic_map_lock);
                return;
@@ -256,11 +268,11 @@ out:
                        lockdep_is_held(&kvm->arch.apic_map_lock));
        rcu_assign_pointer(kvm->arch.apic_map, new);
        /*
-        * Write kvm->arch.apic_map before
-        * clearing apic->apic_map_dirty
+        * Write kvm->arch.apic_map before clearing apic->apic_map_dirty.
+        * If another update has come in, leave it DIRTY.
         */
-       smp_wmb();
-       kvm->arch.apic_map_dirty = false;
+       atomic_cmpxchg_release(&kvm->arch.apic_map_dirty,
+                              UPDATE_IN_PROGRESS, CLEAN);
        mutex_unlock(&kvm->arch.apic_map_lock);
 
        if (old)
@@ -282,20 +294,20 @@ static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val)
                else
                        static_key_slow_inc(&apic_sw_disabled.key);
 
-               apic->vcpu->kvm->arch.apic_map_dirty = true;
+               atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
        }
 }
 
 static inline void kvm_apic_set_xapic_id(struct kvm_lapic *apic, u8 id)
 {
        kvm_lapic_set_reg(apic, APIC_ID, id << 24);
-       apic->vcpu->kvm->arch.apic_map_dirty = true;
+       atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
 }
 
 static inline void kvm_apic_set_ldr(struct kvm_lapic *apic, u32 id)
 {
        kvm_lapic_set_reg(apic, APIC_LDR, id);
-       apic->vcpu->kvm->arch.apic_map_dirty = true;
+       atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
 }
 
 static inline u32 kvm_apic_calc_x2apic_ldr(u32 id)
@@ -311,7 +323,7 @@ static inline void kvm_apic_set_x2apic_id(struct kvm_lapic *apic, u32 id)
 
        kvm_lapic_set_reg(apic, APIC_ID, id);
        kvm_lapic_set_reg(apic, APIC_LDR, ldr);
-       apic->vcpu->kvm->arch.apic_map_dirty = true;
+       atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
 }
 
 static inline int apic_lvt_enabled(struct kvm_lapic *apic, int lvt_type)
@@ -1976,7 +1988,7 @@ int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
        case APIC_DFR:
                if (!apic_x2apic_mode(apic)) {
                        kvm_lapic_set_reg(apic, APIC_DFR, val | 0x0FFFFFFF);
-                       apic->vcpu->kvm->arch.apic_map_dirty = true;
+                       atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
                } else
                        ret = 1;
                break;
@@ -2232,7 +2244,7 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
                        static_key_slow_dec_deferred(&apic_hw_disabled);
                } else {
                        static_key_slow_inc(&apic_hw_disabled.key);
-                       vcpu->kvm->arch.apic_map_dirty = true;
+                       atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
                }
        }
 
@@ -2273,7 +2285,6 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
        if (!apic)
                return;
 
-       vcpu->kvm->arch.apic_map_dirty = false;
        /* Stop the timer in case it's a reset to an active apic */
        hrtimer_cancel(&apic->lapic_timer.timer);
 
@@ -2567,6 +2578,7 @@ int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
        }
        memcpy(vcpu->arch.apic->regs, s->regs, sizeof(*s));
 
+       atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
        kvm_recalculate_apic_map(vcpu->kvm);
        kvm_apic_set_version(vcpu);
 
index 0ad06bf..444bb9c 100644 (file)
@@ -222,7 +222,7 @@ void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
 void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
 bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
                                    struct kvm_memory_slot *slot, u64 gfn);
-int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu);
+int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu, gpa_t l2_gpa);
 
 int kvm_mmu_post_init_vm(struct kvm *kvm);
 void kvm_mmu_pre_destroy_vm(struct kvm *kvm);
index fdd05c2..76817d1 100644 (file)
@@ -1745,10 +1745,10 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
  * Emulate arch specific page modification logging for the
  * nested hypervisor
  */
-int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu)
+int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu, gpa_t l2_gpa)
 {
        if (kvm_x86_ops.write_log_dirty)
-               return kvm_x86_ops.write_log_dirty(vcpu);
+               return kvm_x86_ops.write_log_dirty(vcpu, l2_gpa);
 
        return 0;
 }
index a6d484e..bd70ece 100644 (file)
@@ -235,7 +235,7 @@ static inline unsigned FNAME(gpte_access)(u64 gpte)
 static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu,
                                             struct kvm_mmu *mmu,
                                             struct guest_walker *walker,
-                                            int write_fault)
+                                            gpa_t addr, int write_fault)
 {
        unsigned level, index;
        pt_element_t pte, orig_pte;
@@ -260,7 +260,7 @@ static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu,
                                !(pte & PT_GUEST_DIRTY_MASK)) {
                        trace_kvm_mmu_set_dirty_bit(table_gfn, index, sizeof(pte));
 #if PTTYPE == PTTYPE_EPT
-                       if (kvm_arch_write_log_dirty(vcpu))
+                       if (kvm_arch_write_log_dirty(vcpu, addr))
                                return -EINVAL;
 #endif
                        pte |= PT_GUEST_DIRTY_MASK;
@@ -360,7 +360,6 @@ retry_walk:
        ++walker->level;
 
        do {
-               gfn_t real_gfn;
                unsigned long host_addr;
 
                pt_access = pte_access;
@@ -375,7 +374,7 @@ retry_walk:
                walker->table_gfn[walker->level - 1] = table_gfn;
                walker->pte_gpa[walker->level - 1] = pte_gpa;
 
-               real_gfn = mmu->translate_gpa(vcpu, gfn_to_gpa(table_gfn),
+               real_gpa = mmu->translate_gpa(vcpu, gfn_to_gpa(table_gfn),
                                              nested_access,
                                              &walker->fault);
 
@@ -389,12 +388,10 @@ retry_walk:
                 * information to fix the exit_qualification or exit_info_1
                 * fields.
                 */
-               if (unlikely(real_gfn == UNMAPPED_GVA))
+               if (unlikely(real_gpa == UNMAPPED_GVA))
                        return 0;
 
-               real_gfn = gpa_to_gfn(real_gfn);
-
-               host_addr = kvm_vcpu_gfn_to_hva_prot(vcpu, real_gfn,
+               host_addr = kvm_vcpu_gfn_to_hva_prot(vcpu, gpa_to_gfn(real_gpa),
                                            &walker->pte_writable[walker->level - 1]);
                if (unlikely(kvm_is_error_hva(host_addr)))
                        goto error;
@@ -457,7 +454,8 @@ retry_walk:
                        (PT_GUEST_DIRTY_SHIFT - PT_GUEST_ACCESSED_SHIFT);
 
        if (unlikely(!accessed_dirty)) {
-               ret = FNAME(update_accessed_dirty_bits)(vcpu, mmu, walker, write_fault);
+               ret = FNAME(update_accessed_dirty_bits)(vcpu, mmu, walker,
+                                                       addr, write_fault);
                if (unlikely(ret < 0))
                        goto error;
                else if (ret)
index 8ccfa41..c0da4dd 100644 (file)
@@ -3344,7 +3344,7 @@ static fastpath_t svm_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
 
 void __svm_vcpu_run(unsigned long vmcb_pa, unsigned long *regs);
 
-static fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
+static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
 {
        fastpath_t exit_fastpath;
        struct vcpu_svm *svm = to_svm(vcpu);
index 5c0ff80..7a3675f 100644 (file)
@@ -72,11 +72,24 @@ struct loaded_vmcs {
        struct vmcs_controls_shadow controls_shadow;
 };
 
+static inline bool is_intr_type(u32 intr_info, u32 type)
+{
+       const u32 mask = INTR_INFO_VALID_MASK | INTR_INFO_INTR_TYPE_MASK;
+
+       return (intr_info & mask) == (INTR_INFO_VALID_MASK | type);
+}
+
+static inline bool is_intr_type_n(u32 intr_info, u32 type, u8 vector)
+{
+       const u32 mask = INTR_INFO_VALID_MASK | INTR_INFO_INTR_TYPE_MASK |
+                        INTR_INFO_VECTOR_MASK;
+
+       return (intr_info & mask) == (INTR_INFO_VALID_MASK | type | vector);
+}
+
 static inline bool is_exception_n(u32 intr_info, u8 vector)
 {
-       return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
-                            INTR_INFO_VALID_MASK)) ==
-               (INTR_TYPE_HARD_EXCEPTION | vector | INTR_INFO_VALID_MASK);
+       return is_intr_type_n(intr_info, INTR_TYPE_HARD_EXCEPTION, vector);
 }
 
 static inline bool is_debug(u32 intr_info)
@@ -106,28 +119,23 @@ static inline bool is_gp_fault(u32 intr_info)
 
 static inline bool is_machine_check(u32 intr_info)
 {
-       return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
-                            INTR_INFO_VALID_MASK)) ==
-               (INTR_TYPE_HARD_EXCEPTION | MC_VECTOR | INTR_INFO_VALID_MASK);
+       return is_exception_n(intr_info, MC_VECTOR);
 }
 
 /* Undocumented: icebp/int1 */
 static inline bool is_icebp(u32 intr_info)
 {
-       return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
-               == (INTR_TYPE_PRIV_SW_EXCEPTION | INTR_INFO_VALID_MASK);
+       return is_intr_type(intr_info, INTR_TYPE_PRIV_SW_EXCEPTION);
 }
 
 static inline bool is_nmi(u32 intr_info)
 {
-       return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
-               == (INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK);
+       return is_intr_type(intr_info, INTR_TYPE_NMI_INTR);
 }
 
 static inline bool is_external_intr(u32 intr_info)
 {
-       return (intr_info & (INTR_INFO_VALID_MASK | INTR_INFO_INTR_TYPE_MASK))
-               == (INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR);
+       return is_intr_type(intr_info, INTR_TYPE_EXT_INTR);
 }
 
 enum vmcs_field_width {
index 36c7717..cb22f33 100644 (file)
@@ -6606,23 +6606,6 @@ static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx)
                                        msrs[i].host, false);
 }
 
-static void atomic_switch_umwait_control_msr(struct vcpu_vmx *vmx)
-{
-       u32 host_umwait_control;
-
-       if (!vmx_has_waitpkg(vmx))
-               return;
-
-       host_umwait_control = get_umwait_control_msr();
-
-       if (vmx->msr_ia32_umwait_control != host_umwait_control)
-               add_atomic_switch_msr(vmx, MSR_IA32_UMWAIT_CONTROL,
-                       vmx->msr_ia32_umwait_control,
-                       host_umwait_control, false);
-       else
-               clear_atomic_switch_msr(vmx, MSR_IA32_UMWAIT_CONTROL);
-}
-
 static void vmx_update_hv_timer(struct kvm_vcpu *vcpu)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -6728,9 +6711,7 @@ reenter_guest:
 
        pt_guest_enter(vmx);
 
-       if (vcpu_to_pmu(vcpu)->version)
-               atomic_switch_perf_msrs(vmx);
-       atomic_switch_umwait_control_msr(vmx);
+       atomic_switch_perf_msrs(vmx);
 
        if (enable_preemption_timer)
                vmx_update_hv_timer(vcpu);
@@ -7501,11 +7482,11 @@ static void vmx_flush_log_dirty(struct kvm *kvm)
        kvm_flush_pml_buffers(kvm);
 }
 
-static int vmx_write_pml_buffer(struct kvm_vcpu *vcpu)
+static int vmx_write_pml_buffer(struct kvm_vcpu *vcpu, gpa_t gpa)
 {
        struct vmcs12 *vmcs12;
        struct vcpu_vmx *vmx = to_vmx(vcpu);
-       gpa_t gpa, dst;
+       gpa_t dst;
 
        if (is_guest_mode(vcpu)) {
                WARN_ON_ONCE(vmx->nested.pml_full);
@@ -7524,7 +7505,7 @@ static int vmx_write_pml_buffer(struct kvm_vcpu *vcpu)
                        return 1;
                }
 
-               gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS) & ~0xFFFull;
+               gpa &= ~0xFFFull;
                dst = vmcs12->pml_address + sizeof(u64) * vmcs12->guest_pml_index;
 
                if (kvm_write_guest_page(vcpu->kvm, gpa_to_gfn(dst), &gpa,
index 8a83b5e..639798e 100644 (file)
@@ -288,8 +288,6 @@ struct vcpu_vmx {
 
        u64 current_tsc_ratio;
 
-       u32 host_pkru;
-
        unsigned long host_debugctlmsr;
 
        /*
index 00c88c2..3b92db4 100644 (file)
@@ -2856,7 +2856,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                return kvm_mtrr_set_msr(vcpu, msr, data);
        case MSR_IA32_APICBASE:
                return kvm_set_apic_base(vcpu, msr_info);
-       case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
+       case APIC_BASE_MSR ... APIC_BASE_MSR + 0xff:
                return kvm_x2apic_msr_write(vcpu, msr, data);
        case MSR_IA32_TSCDEADLINE:
                kvm_set_lapic_tscdeadline_msr(vcpu, data);
@@ -3196,7 +3196,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
        case MSR_IA32_APICBASE:
                msr_info->data = kvm_get_apic_base(vcpu);
                break;
-       case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
+       case APIC_BASE_MSR ... APIC_BASE_MSR + 0xff:
                return kvm_x2apic_msr_read(vcpu, msr_info->index, &msr_info->data);
        case MSR_IA32_TSCDEADLINE:
                msr_info->data = kvm_get_lapic_tscdeadline_msr(vcpu);
@@ -4603,7 +4603,8 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
                r = -EINVAL;
                user_tsc_khz = (u32)arg;
 
-               if (user_tsc_khz >= kvm_max_guest_tsc_khz)
+               if (kvm_has_tsc_control &&
+                   user_tsc_khz >= kvm_max_guest_tsc_khz)
                        goto out;
 
                if (user_tsc_khz == 0)
index c472f62..06a7968 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/delay.h>
 #include <linux/log2.h>
 #include <linux/hwspinlock.h>
+#include <asm/unaligned.h>
 
 #define CREATE_TRACE_POINTS
 #include "trace.h"
@@ -249,22 +250,20 @@ static void regmap_format_8(void *buf, unsigned int val, unsigned int shift)
 
 static void regmap_format_16_be(void *buf, unsigned int val, unsigned int shift)
 {
-       __be16 *b = buf;
-
-       b[0] = cpu_to_be16(val << shift);
+       put_unaligned_be16(val << shift, buf);
 }
 
 static void regmap_format_16_le(void *buf, unsigned int val, unsigned int shift)
 {
-       __le16 *b = buf;
-
-       b[0] = cpu_to_le16(val << shift);
+       put_unaligned_le16(val << shift, buf);
 }
 
 static void regmap_format_16_native(void *buf, unsigned int val,
                                    unsigned int shift)
 {
-       *(u16 *)buf = val << shift;
+       u16 v = val << shift;
+
+       memcpy(buf, &v, sizeof(v));
 }
 
 static void regmap_format_24(void *buf, unsigned int val, unsigned int shift)
@@ -280,43 +279,39 @@ static void regmap_format_24(void *buf, unsigned int val, unsigned int shift)
 
 static void regmap_format_32_be(void *buf, unsigned int val, unsigned int shift)
 {
-       __be32 *b = buf;
-
-       b[0] = cpu_to_be32(val << shift);
+       put_unaligned_be32(val << shift, buf);
 }
 
 static void regmap_format_32_le(void *buf, unsigned int val, unsigned int shift)
 {
-       __le32 *b = buf;
-
-       b[0] = cpu_to_le32(val << shift);
+       put_unaligned_le32(val << shift, buf);
 }
 
 static void regmap_format_32_native(void *buf, unsigned int val,
                                    unsigned int shift)
 {
-       *(u32 *)buf = val << shift;
+       u32 v = val << shift;
+
+       memcpy(buf, &v, sizeof(v));
 }
 
 #ifdef CONFIG_64BIT
 static void regmap_format_64_be(void *buf, unsigned int val, unsigned int shift)
 {
-       __be64 *b = buf;
-
-       b[0] = cpu_to_be64((u64)val << shift);
+       put_unaligned_be64((u64) val << shift, buf);
 }
 
 static void regmap_format_64_le(void *buf, unsigned int val, unsigned int shift)
 {
-       __le64 *b = buf;
-
-       b[0] = cpu_to_le64((u64)val << shift);
+       put_unaligned_le64((u64) val << shift, buf);
 }
 
 static void regmap_format_64_native(void *buf, unsigned int val,
                                    unsigned int shift)
 {
-       *(u64 *)buf = (u64)val << shift;
+       u64 v = (u64) val << shift;
+
+       memcpy(buf, &v, sizeof(v));
 }
 #endif
 
@@ -333,35 +328,34 @@ static unsigned int regmap_parse_8(const void *buf)
 
 static unsigned int regmap_parse_16_be(const void *buf)
 {
-       const __be16 *b = buf;
-
-       return be16_to_cpu(b[0]);
+       return get_unaligned_be16(buf);
 }
 
 static unsigned int regmap_parse_16_le(const void *buf)
 {
-       const __le16 *b = buf;
-
-       return le16_to_cpu(b[0]);
+       return get_unaligned_le16(buf);
 }
 
 static void regmap_parse_16_be_inplace(void *buf)
 {
-       __be16 *b = buf;
+       u16 v = get_unaligned_be16(buf);
 
-       b[0] = be16_to_cpu(b[0]);
+       memcpy(buf, &v, sizeof(v));
 }
 
 static void regmap_parse_16_le_inplace(void *buf)
 {
-       __le16 *b = buf;
+       u16 v = get_unaligned_le16(buf);
 
-       b[0] = le16_to_cpu(b[0]);
+       memcpy(buf, &v, sizeof(v));
 }
 
 static unsigned int regmap_parse_16_native(const void *buf)
 {
-       return *(u16 *)buf;
+       u16 v;
+
+       memcpy(&v, buf, sizeof(v));
+       return v;
 }
 
 static unsigned int regmap_parse_24(const void *buf)
@@ -376,69 +370,67 @@ static unsigned int regmap_parse_24(const void *buf)
 
 static unsigned int regmap_parse_32_be(const void *buf)
 {
-       const __be32 *b = buf;
-
-       return be32_to_cpu(b[0]);
+       return get_unaligned_be32(buf);
 }
 
 static unsigned int regmap_parse_32_le(const void *buf)
 {
-       const __le32 *b = buf;
-
-       return le32_to_cpu(b[0]);
+       return get_unaligned_le32(buf);
 }
 
 static void regmap_parse_32_be_inplace(void *buf)
 {
-       __be32 *b = buf;
+       u32 v = get_unaligned_be32(buf);
 
-       b[0] = be32_to_cpu(b[0]);
+       memcpy(buf, &v, sizeof(v));
 }
 
 static void regmap_parse_32_le_inplace(void *buf)
 {
-       __le32 *b = buf;
+       u32 v = get_unaligned_le32(buf);
 
-       b[0] = le32_to_cpu(b[0]);
+       memcpy(buf, &v, sizeof(v));
 }
 
 static unsigned int regmap_parse_32_native(const void *buf)
 {
-       return *(u32 *)buf;
+       u32 v;
+
+       memcpy(&v, buf, sizeof(v));
+       return v;
 }
 
 #ifdef CONFIG_64BIT
 static unsigned int regmap_parse_64_be(const void *buf)
 {
-       const __be64 *b = buf;
-
-       return be64_to_cpu(b[0]);
+       return get_unaligned_be64(buf);
 }
 
 static unsigned int regmap_parse_64_le(const void *buf)
 {
-       const __le64 *b = buf;
-
-       return le64_to_cpu(b[0]);
+       return get_unaligned_le64(buf);
 }
 
 static void regmap_parse_64_be_inplace(void *buf)
 {
-       __be64 *b = buf;
+       u64 v =  get_unaligned_be64(buf);
 
-       b[0] = be64_to_cpu(b[0]);
+       memcpy(buf, &v, sizeof(v));
 }
 
 static void regmap_parse_64_le_inplace(void *buf)
 {
-       __le64 *b = buf;
+       u64 v = get_unaligned_le64(buf);
 
-       b[0] = le64_to_cpu(b[0]);
+       memcpy(buf, &v, sizeof(v));
 }
 
 static unsigned int regmap_parse_64_native(const void *buf)
 {
-       return *(u64 *)buf;
+       u64 v;
+
+       memcpy(&v, buf, sizeof(v));
+       return v;
 }
 #endif
 
@@ -1357,6 +1349,7 @@ void regmap_exit(struct regmap *map)
        if (map->hwlock)
                hwspin_lock_free(map->hwlock);
        kfree_const(map->name);
+       kfree(map->patch);
        kfree(map);
 }
 EXPORT_SYMBOL_GPL(regmap_exit);
@@ -2944,8 +2937,9 @@ EXPORT_SYMBOL_GPL(regmap_update_bits_base);
  * @reg: Register to read from
  * @bits: Bits to test
  *
- * Returns -1 if the underlying regmap_read() fails, 0 if at least one of the
- * tested bits is not set and 1 if all tested bits are set.
+ * Returns 0 if at least one of the tested bits is not set, 1 if all tested
+ * bits are set and a negative error number if the underlying regmap_read()
+ * fails.
  */
 int regmap_test_bits(struct regmap *map, unsigned int reg, unsigned int bits)
 {
index b544baf..5d71c23 100644 (file)
@@ -1298,8 +1298,12 @@ static int sdma_v5_0_sw_fini(void *handle)
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
        int i;
 
-       for (i = 0; i < adev->sdma.num_instances; i++)
+       for (i = 0; i < adev->sdma.num_instances; i++) {
+               if (adev->sdma.instance[i].fw != NULL)
+                       release_firmware(adev->sdma.instance[i].fw);
+
                amdgpu_ring_fini(&adev->sdma.instance[i].ring);
+       }
 
        return 0;
 }
index d27221d..0e0c42e 100644 (file)
@@ -428,6 +428,7 @@ struct kfd_process *kfd_create_process(struct file *filep)
                                           (int)process->lead_thread->pid);
                if (ret) {
                        pr_warn("Creating procfs pid directory failed");
+                       kobject_put(process->kobj);
                        goto out;
                }
 
index 7ced9f8..10ac807 100644 (file)
@@ -5024,7 +5024,8 @@ create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
        struct drm_connector *connector = &aconnector->base;
        struct amdgpu_device *adev = connector->dev->dev_private;
        struct dc_stream_state *stream;
-       int requested_bpc = connector->state ? connector->state->max_requested_bpc : 8;
+       const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
+       int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
        enum dc_status dc_result = DC_OK;
 
        do {
index 076af26..1d692f4 100644 (file)
@@ -1058,7 +1058,6 @@ static const struct {
                {"link_settings", &dp_link_settings_debugfs_fops},
                {"phy_settings", &dp_phy_settings_debugfs_fop},
                {"test_pattern", &dp_phy_test_pattern_fops},
-               {"output_bpc", &output_bpc_fops},
                {"vrr_range", &vrr_range_fops},
 #ifdef CONFIG_DRM_AMD_DC_HDCP
                {"hdcp_sink_capability", &hdcp_sink_capability_fops},
@@ -1142,6 +1141,9 @@ void connector_debugfs_init(struct amdgpu_dm_connector *connector)
        debugfs_create_file_unsafe("force_yuv420_output", 0644, dir, connector,
                                   &force_yuv420_output_fops);
 
+       debugfs_create_file("output_bpc", 0644, dir, connector,
+                           &output_bpc_fops);
+
        connector->debugfs_dpcd_address = 0;
        connector->debugfs_dpcd_size = 0;
 
index dcf84a6..949d10e 100644 (file)
@@ -510,8 +510,10 @@ static ssize_t srm_data_read(struct file *filp, struct kobject *kobj, struct bin
 
        srm = psp_get_srm(work->hdcp.config.psp.handle, &srm_version, &srm_size);
 
-       if (!srm)
-               return -EINVAL;
+       if (!srm) {
+               ret = -EINVAL;
+               goto ret;
+       }
 
        if (pos >= srm_size)
                ret = 0;
index 170aa76..5609e16 100644 (file)
@@ -227,18 +227,9 @@ int drm_fb_helper_debug_leave(struct fb_info *info)
 }
 EXPORT_SYMBOL(drm_fb_helper_debug_leave);
 
-/**
- * drm_fb_helper_restore_fbdev_mode_unlocked - restore fbdev configuration
- * @fb_helper: driver-allocated fbdev helper, can be NULL
- *
- * This should be called from driver's drm &drm_driver.lastclose callback
- * when implementing an fbcon on top of kms using this helper. This ensures that
- * the user isn't greeted with a black screen when e.g. X dies.
- *
- * RETURNS:
- * Zero if everything went ok, negative error code otherwise.
- */
-int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper)
+static int
+__drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper,
+                                           bool force)
 {
        bool do_delayed;
        int ret;
@@ -250,7 +241,16 @@ int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper)
                return 0;
 
        mutex_lock(&fb_helper->lock);
-       ret = drm_client_modeset_commit(&fb_helper->client);
+       if (force) {
+               /*
+                * Yes this is the _locked version which expects the master lock
+                * to be held. But for forced restores we're intentionally
+                * racing here, see drm_fb_helper_set_par().
+                */
+               ret = drm_client_modeset_commit_locked(&fb_helper->client);
+       } else {
+               ret = drm_client_modeset_commit(&fb_helper->client);
+       }
 
        do_delayed = fb_helper->delayed_hotplug;
        if (do_delayed)
@@ -262,6 +262,22 @@ int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper)
 
        return ret;
 }
+
+/**
+ * drm_fb_helper_restore_fbdev_mode_unlocked - restore fbdev configuration
+ * @fb_helper: driver-allocated fbdev helper, can be NULL
+ *
+ * This should be called from driver's drm &drm_driver.lastclose callback
+ * when implementing an fbcon on top of kms using this helper. This ensures that
+ * the user isn't greeted with a black screen when e.g. X dies.
+ *
+ * RETURNS:
+ * Zero if everything went ok, negative error code otherwise.
+ */
+int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper)
+{
+       return __drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper, false);
+}
 EXPORT_SYMBOL(drm_fb_helper_restore_fbdev_mode_unlocked);
 
 #ifdef CONFIG_MAGIC_SYSRQ
@@ -1318,6 +1334,7 @@ int drm_fb_helper_set_par(struct fb_info *info)
 {
        struct drm_fb_helper *fb_helper = info->par;
        struct fb_var_screeninfo *var = &info->var;
+       bool force;
 
        if (oops_in_progress)
                return -EBUSY;
@@ -1327,7 +1344,25 @@ int drm_fb_helper_set_par(struct fb_info *info)
                return -EINVAL;
        }
 
-       drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper);
+       /*
+        * Normally we want to make sure that a kms master takes precedence over
+        * fbdev, to avoid fbdev flickering and occasionally stealing the
+        * display status. But Xorg first sets the vt back to text mode using
+        * the KDSET IOCTL with KD_TEXT, and only after that drops the master
+        * status when exiting.
+        *
+        * In the past this was caught by drm_fb_helper_lastclose(), but on
+        * modern systems where logind always keeps a drm fd open to orchestrate
+        * the vt switching, this doesn't work.
+        *
+        * To not break the userspace ABI we have this special case here, which
+        * is only used for the above case. Everything else uses the normal
+        * commit function, which ensures that we never steal the display from
+        * an active drm master.
+        */
+       force = var->activate & FB_ACTIVATE_KD_TEXT;
+
+       __drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper, force);
 
        return 0;
 }
index ffd95bf..d00ea38 100644 (file)
@@ -30,12 +30,6 @@ struct drm_dmi_panel_orientation_data {
        int orientation;
 };
 
-static const struct drm_dmi_panel_orientation_data acer_s1003 = {
-       .width = 800,
-       .height = 1280,
-       .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
-};
-
 static const struct drm_dmi_panel_orientation_data asus_t100ha = {
        .width = 800,
        .height = 1280,
@@ -114,13 +108,19 @@ static const struct dmi_system_id orientation_data[] = {
                  DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Acer"),
                  DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "One S1003"),
                },
-               .driver_data = (void *)&acer_s1003,
+               .driver_data = (void *)&lcd800x1280_rightside_up,
        }, {    /* Asus T100HA */
                .matches = {
                  DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
                  DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T100HAN"),
                },
                .driver_data = (void *)&asus_t100ha,
+       }, {    /* Asus T101HA */
+               .matches = {
+                 DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+                 DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T101HA"),
+               },
+               .driver_data = (void *)&lcd800x1280_rightside_up,
        }, {    /* GPD MicroPC (generic strings, also match on bios date) */
                .matches = {
                  DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Default string"),
index 04e1d38..08802e5 100644 (file)
@@ -812,7 +812,7 @@ static void mcde_display_enable(struct drm_simple_display_pipe *pipe,
        struct drm_crtc *crtc = &pipe->crtc;
        struct drm_plane *plane = &pipe->plane;
        struct drm_device *drm = crtc->dev;
-       struct mcde *mcde = drm->dev_private;
+       struct mcde *mcde = to_mcde(drm);
        const struct drm_display_mode *mode = &cstate->mode;
        struct drm_framebuffer *fb = plane->state->fb;
        u32 format = fb->format->format;
index 84f3e2d..80082d6 100644 (file)
@@ -209,7 +209,6 @@ static int mcde_modeset_init(struct drm_device *drm)
 
        drm_mode_config_reset(drm);
        drm_kms_helper_poll_init(drm);
-       drm_fbdev_generic_setup(drm, 32);
 
        return 0;
 }
@@ -264,6 +263,8 @@ static int mcde_drm_bind(struct device *dev)
        if (ret < 0)
                goto unbind;
 
+       drm_fbdev_generic_setup(drm, 32);
+
        return 0;
 
 unbind:
index b6ecd15..5178f87 100644 (file)
@@ -2495,6 +2495,7 @@ static const struct panel_desc logicpd_type_28 = {
        .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
        .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE |
                     DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE,
+       .connector_type = DRM_MODE_CONNECTOR_DPI,
 };
 
 static const struct panel_desc mitsubishi_aa070mc01 = {
@@ -2663,6 +2664,7 @@ static const struct panel_desc newhaven_nhd_43_480272ef_atxl = {
        .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
        .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE |
                     DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE,
+       .connector_type = DRM_MODE_CONNECTOR_DPI,
 };
 
 static const struct display_timing nlt_nl192108ac18_02d_timing = {
index b57c37d..c7fbb79 100644 (file)
@@ -2127,7 +2127,7 @@ static int ni_init_smc_spll_table(struct radeon_device *rdev)
                if (clk_s & ~(SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_MASK >> SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT))
                        ret = -EINVAL;
 
-               if (clk_s & ~(SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_MASK >> SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT))
+               if (fb_div & ~(SMC_NISLANDS_SPLL_DIV_TABLE_FBDIV_MASK >> SMC_NISLANDS_SPLL_DIV_TABLE_FBDIV_SHIFT))
                        ret = -EINVAL;
 
                if (clk_v & ~(SMC_NISLANDS_SPLL_DIV_TABLE_CLKV_MASK >> SMC_NISLANDS_SPLL_DIV_TABLE_CLKV_SHIFT))
index 0919f1f..f65d148 100644 (file)
@@ -31,6 +31,7 @@ config DRM_RCAR_DW_HDMI
 config DRM_RCAR_LVDS
        tristate "R-Car DU LVDS Encoder Support"
        depends on DRM && DRM_BRIDGE && OF
+       select DRM_KMS_HELPER
        select DRM_PANEL
        select OF_FLATTREE
        select OF_OVERLAY
index 56cc037..cc4fb91 100644 (file)
@@ -363,6 +363,19 @@ static int sun8i_mixer_bind(struct device *dev, struct device *master,
        mixer->engine.ops = &sun8i_engine_ops;
        mixer->engine.node = dev->of_node;
 
+       if (of_find_property(dev->of_node, "iommus", NULL)) {
+               /*
+                * This assume we have the same DMA constraints for
+                * all our the mixers in our pipeline. This sounds
+                * bad, but it has always been the case for us, and
+                * DRM doesn't do per-device allocation either, so we
+                * would need to fix DRM first...
+                */
+               ret = of_dma_configure(drm->dev, dev->of_node, true);
+               if (ret)
+                       return ret;
+       }
+
        /*
         * While this function can fail, we shouldn't do anything
         * if this happens. Some early DE2 DT entries don't provide
index 83f31c6..04d6848 100644 (file)
@@ -957,6 +957,7 @@ static struct drm_plane *tegra_dc_cursor_plane_create(struct drm_device *drm,
        }
 
        drm_plane_helper_add(&plane->base, &tegra_cursor_plane_helper_funcs);
+       drm_plane_create_zpos_immutable_property(&plane->base, 255);
 
        return &plane->base;
 }
index 8183e61..22a03f7 100644 (file)
@@ -149,7 +149,9 @@ int tegra_display_hub_prepare(struct tegra_display_hub *hub)
        for (i = 0; i < hub->soc->num_wgrps; i++) {
                struct tegra_windowgroup *wgrp = &hub->wgrps[i];
 
-               tegra_windowgroup_enable(wgrp);
+               /* Skip orphaned window group whose parent DC is disabled */
+               if (wgrp->parent)
+                       tegra_windowgroup_enable(wgrp);
        }
 
        return 0;
@@ -166,7 +168,9 @@ void tegra_display_hub_cleanup(struct tegra_display_hub *hub)
        for (i = 0; i < hub->soc->num_wgrps; i++) {
                struct tegra_windowgroup *wgrp = &hub->wgrps[i];
 
-               tegra_windowgroup_disable(wgrp);
+               /* Skip orphaned window group whose parent DC is disabled */
+               if (wgrp->parent)
+                       tegra_windowgroup_disable(wgrp);
        }
 }
 
@@ -944,6 +948,15 @@ static int tegra_display_hub_probe(struct platform_device *pdev)
                dev_err(&pdev->dev, "failed to register host1x client: %d\n",
                        err);
 
+       err = devm_of_platform_populate(&pdev->dev);
+       if (err < 0)
+               goto unregister;
+
+       return err;
+
+unregister:
+       host1x_client_unregister(&hub->client);
+       pm_runtime_disable(&pdev->dev);
        return err;
 }
 
index f73b81c..0f20e14 100644 (file)
@@ -883,8 +883,10 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
        if (!fence)
                return 0;
 
-       if (no_wait_gpu)
+       if (no_wait_gpu) {
+               dma_fence_put(fence);
                return -EBUSY;
+       }
 
        dma_resv_add_shared_fence(bo->base.resv, fence);
 
index a43aa72..fa03fab 100644 (file)
@@ -300,8 +300,10 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
                        break;
                case -EBUSY:
                case -ERESTARTSYS:
+                       dma_fence_put(moving);
                        return VM_FAULT_NOPAGE;
                default:
+                       dma_fence_put(moving);
                        return VM_FAULT_SIGBUS;
                }
 
index 6a995db..e201f62 100644 (file)
@@ -686,8 +686,17 @@ EXPORT_SYMBOL(host1x_driver_register_full);
  */
 void host1x_driver_unregister(struct host1x_driver *driver)
 {
+       struct host1x *host1x;
+
        driver_unregister(&driver->driver);
 
+       mutex_lock(&devices_lock);
+
+       list_for_each_entry(host1x, &devices, list)
+               host1x_detach_driver(host1x, driver);
+
+       mutex_unlock(&devices_lock);
+
        mutex_lock(&drivers_lock);
        list_del_init(&driver->list);
        mutex_unlock(&drivers_lock);
index d24344e..d0ebb70 100644 (file)
@@ -468,11 +468,18 @@ static int host1x_probe(struct platform_device *pdev)
 
        err = host1x_register(host);
        if (err < 0)
-               goto deinit_intr;
+               goto deinit_debugfs;
+
+       err = devm_of_platform_populate(&pdev->dev);
+       if (err < 0)
+               goto unregister;
 
        return 0;
 
-deinit_intr:
+unregister:
+       host1x_unregister(host);
+deinit_debugfs:
+       host1x_debug_deinit(host);
        host1x_intr_deinit(host);
 deinit_syncpt:
        host1x_syncpt_deinit(host);
index 9ce787e..0d13772 100644 (file)
@@ -918,6 +918,7 @@ static void cm_free_work(struct cm_work *work)
 
 static void cm_queue_work_unlock(struct cm_id_private *cm_id_priv,
                                 struct cm_work *work)
+       __releases(&cm_id_priv->lock)
 {
        bool immediate;
 
index 3d7cc9f..c30cf53 100644 (file)
@@ -1624,6 +1624,8 @@ static struct rdma_id_private *cma_find_listener(
 {
        struct rdma_id_private *id_priv, *id_priv_dev;
 
+       lockdep_assert_held(&lock);
+
        if (!bind_list)
                return ERR_PTR(-EINVAL);
 
@@ -1670,6 +1672,7 @@ cma_ib_id_from_event(struct ib_cm_id *cm_id,
                }
        }
 
+       mutex_lock(&lock);
        /*
         * Net namespace might be getting deleted while route lookup,
         * cm_id lookup is in progress. Therefore, perform netdevice
@@ -1711,6 +1714,7 @@ cma_ib_id_from_event(struct ib_cm_id *cm_id,
        id_priv = cma_find_listener(bind_list, cm_id, ib_event, req, *net_dev);
 err:
        rcu_read_unlock();
+       mutex_unlock(&lock);
        if (IS_ERR(id_priv) && *net_dev) {
                dev_put(*net_dev);
                *net_dev = NULL;
@@ -2492,6 +2496,8 @@ static void cma_listen_on_dev(struct rdma_id_private *id_priv,
        struct net *net = id_priv->id.route.addr.dev_addr.net;
        int ret;
 
+       lockdep_assert_held(&lock);
+
        if (cma_family(id_priv) == AF_IB && !rdma_cap_ib_cm(cma_dev->device, 1))
                return;
 
@@ -3342,6 +3348,8 @@ static void cma_bind_port(struct rdma_bind_list *bind_list,
        u64 sid, mask;
        __be16 port;
 
+       lockdep_assert_held(&lock);
+
        addr = cma_src_addr(id_priv);
        port = htons(bind_list->port);
 
@@ -3370,6 +3378,8 @@ static int cma_alloc_port(enum rdma_ucm_port_space ps,
        struct rdma_bind_list *bind_list;
        int ret;
 
+       lockdep_assert_held(&lock);
+
        bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL);
        if (!bind_list)
                return -ENOMEM;
@@ -3396,6 +3406,8 @@ static int cma_port_is_unique(struct rdma_bind_list *bind_list,
        struct sockaddr  *saddr = cma_src_addr(id_priv);
        __be16 dport = cma_port(daddr);
 
+       lockdep_assert_held(&lock);
+
        hlist_for_each_entry(cur_id, &bind_list->owners, node) {
                struct sockaddr  *cur_daddr = cma_dst_addr(cur_id);
                struct sockaddr  *cur_saddr = cma_src_addr(cur_id);
@@ -3435,6 +3447,8 @@ static int cma_alloc_any_port(enum rdma_ucm_port_space ps,
        unsigned int rover;
        struct net *net = id_priv->id.route.addr.dev_addr.net;
 
+       lockdep_assert_held(&lock);
+
        inet_get_local_port_range(net, &low, &high);
        remaining = (high - low) + 1;
        rover = prandom_u32() % remaining + low;
@@ -3482,6 +3496,8 @@ static int cma_check_port(struct rdma_bind_list *bind_list,
        struct rdma_id_private *cur_id;
        struct sockaddr *addr, *cur_addr;
 
+       lockdep_assert_held(&lock);
+
        addr = cma_src_addr(id_priv);
        hlist_for_each_entry(cur_id, &bind_list->owners, node) {
                if (id_priv == cur_id)
@@ -3512,6 +3528,8 @@ static int cma_use_port(enum rdma_ucm_port_space ps,
        unsigned short snum;
        int ret;
 
+       lockdep_assert_held(&lock);
+
        snum = ntohs(cma_port(cma_src_addr(id_priv)));
        if (snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE))
                return -EACCES;
index 2257d7f..738d1fa 100644 (file)
@@ -202,7 +202,7 @@ static int __rdma_counter_unbind_qp(struct ib_qp *qp)
        return ret;
 }
 
-static void counter_history_stat_update(const struct rdma_counter *counter)
+static void counter_history_stat_update(struct rdma_counter *counter)
 {
        struct ib_device *dev = counter->device;
        struct rdma_port_counter *port_counter;
@@ -212,6 +212,8 @@ static void counter_history_stat_update(const struct rdma_counter *counter)
        if (!port_counter->hstats)
                return;
 
+       rdma_counter_query_stats(counter);
+
        for (i = 0; i < counter->stats->num_counters; i++)
                port_counter->hstats->value[i] += counter->stats->value[i];
 }
index 186e0d6..a09f8e3 100644 (file)
@@ -509,10 +509,10 @@ static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
        xa_erase(&ib_mad_clients, mad_agent_priv->agent.hi_tid);
 
        flush_workqueue(port_priv->wq);
-       ib_cancel_rmpp_recvs(mad_agent_priv);
 
        deref_mad_agent(mad_agent_priv);
        wait_for_completion(&mad_agent_priv->comp);
+       ib_cancel_rmpp_recvs(mad_agent_priv);
 
        ib_mad_agent_security_cleanup(&mad_agent_priv->agent);
 
@@ -2718,6 +2718,7 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
                                                 DMA_FROM_DEVICE);
                if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device,
                                                  sg_list.addr))) {
+                       kfree(mad_priv);
                        ret = -ENOMEM;
                        break;
                }
index 38de494..3027cd2 100644 (file)
@@ -470,40 +470,46 @@ static struct ib_uobject *
 alloc_begin_fd_uobject(const struct uverbs_api_object *obj,
                       struct uverbs_attr_bundle *attrs)
 {
-       const struct uverbs_obj_fd_type *fd_type =
-               container_of(obj->type_attrs, struct uverbs_obj_fd_type, type);
+       const struct uverbs_obj_fd_type *fd_type;
        int new_fd;
-       struct ib_uobject *uobj;
+       struct ib_uobject *uobj, *ret;
        struct file *filp;
 
+       uobj = alloc_uobj(attrs, obj);
+       if (IS_ERR(uobj))
+               return uobj;
+
+       fd_type =
+               container_of(obj->type_attrs, struct uverbs_obj_fd_type, type);
        if (WARN_ON(fd_type->fops->release != &uverbs_uobject_fd_release &&
-                   fd_type->fops->release != &uverbs_async_event_release))
-               return ERR_PTR(-EINVAL);
+                   fd_type->fops->release != &uverbs_async_event_release)) {
+               ret = ERR_PTR(-EINVAL);
+               goto err_fd;
+       }
 
        new_fd = get_unused_fd_flags(O_CLOEXEC);
-       if (new_fd < 0)
-               return ERR_PTR(new_fd);
-
-       uobj = alloc_uobj(attrs, obj);
-       if (IS_ERR(uobj))
+       if (new_fd < 0) {
+               ret = ERR_PTR(new_fd);
                goto err_fd;
+       }
 
        /* Note that uverbs_uobject_fd_release() is called during abort */
        filp = anon_inode_getfile(fd_type->name, fd_type->fops, NULL,
                                  fd_type->flags);
        if (IS_ERR(filp)) {
-               uverbs_uobject_put(uobj);
-               uobj = ERR_CAST(filp);
-               goto err_fd;
+               ret = ERR_CAST(filp);
+               goto err_getfile;
        }
        uobj->object = filp;
 
        uobj->id = new_fd;
        return uobj;
 
-err_fd:
+err_getfile:
        put_unused_fd(new_fd);
-       return uobj;
+err_fd:
+       uverbs_uobject_put(uobj);
+       return ret;
 }
 
 struct ib_uobject *rdma_alloc_begin_uobject(const struct uverbs_api_object *obj,
index 08313f7..7dd0824 100644 (file)
@@ -212,6 +212,7 @@ int efa_query_device(struct ib_device *ibdev,
        props->max_send_sge = dev_attr->max_sq_sge;
        props->max_recv_sge = dev_attr->max_rq_sge;
        props->max_sge_rd = dev_attr->max_wr_rdma_sge;
+       props->max_pkeys = 1;
 
        if (udata && udata->outlen) {
                resp.max_sq_sge = dev_attr->max_sq_sge;
index 4633a0c..2ced236 100644 (file)
@@ -985,15 +985,10 @@ static ssize_t qsfp2_debugfs_read(struct file *file, char __user *buf,
 static int __i2c_debugfs_open(struct inode *in, struct file *fp, u32 target)
 {
        struct hfi1_pportdata *ppd;
-       int ret;
 
        ppd = private2ppd(fp);
 
-       ret = acquire_chip_resource(ppd->dd, i2c_target(target), 0);
-       if (ret) /* failed - release the module */
-               module_put(THIS_MODULE);
-
-       return ret;
+       return acquire_chip_resource(ppd->dd, i2c_target(target), 0);
 }
 
 static int i2c1_debugfs_open(struct inode *in, struct file *fp)
@@ -1013,7 +1008,6 @@ static int __i2c_debugfs_release(struct inode *in, struct file *fp, u32 target)
        ppd = private2ppd(fp);
 
        release_chip_resource(ppd->dd, i2c_target(target));
-       module_put(THIS_MODULE);
 
        return 0;
 }
@@ -1031,18 +1025,10 @@ static int i2c2_debugfs_release(struct inode *in, struct file *fp)
 static int __qsfp_debugfs_open(struct inode *in, struct file *fp, u32 target)
 {
        struct hfi1_pportdata *ppd;
-       int ret;
-
-       if (!try_module_get(THIS_MODULE))
-               return -ENODEV;
 
        ppd = private2ppd(fp);
 
-       ret = acquire_chip_resource(ppd->dd, i2c_target(target), 0);
-       if (ret) /* failed - release the module */
-               module_put(THIS_MODULE);
-
-       return ret;
+       return acquire_chip_resource(ppd->dd, i2c_target(target), 0);
 }
 
 static int qsfp1_debugfs_open(struct inode *in, struct file *fp)
@@ -1062,7 +1048,6 @@ static int __qsfp_debugfs_release(struct inode *in, struct file *fp, u32 target)
        ppd = private2ppd(fp);
 
        release_chip_resource(ppd->dd, i2c_target(target));
-       module_put(THIS_MODULE);
 
        return 0;
 }
index 07847cb..d580aa1 100644 (file)
@@ -399,7 +399,7 @@ static inline void iowait_get_priority(struct iowait *w)
  * @wait_head: the wait queue
  *
  * This function is called to insert an iowait struct into a
- * wait queue after a resource (eg, sdma decriptor or pio
+ * wait queue after a resource (eg, sdma descriptor or pio
  * buffer) is run out.
  */
 static inline void iowait_queue(bool pkts_sent, struct iowait *w,
index 185c9b0..b8c9d0a 100644 (file)
@@ -67,6 +67,9 @@ struct hfi1_ipoib_circ_buf {
  * @sde: sdma engine
  * @tx_list: tx request list
  * @sent_txreqs: count of txreqs posted to sdma
+ * @stops: count of stops of queue
+ * @ring_full: ring has been filled
+ * @no_desc: descriptor shortage seen
  * @flow: tracks when list needs to be flushed for a flow change
  * @q_idx: ipoib Tx queue index
  * @pkts_sent: indicator packets have been sent from this queue
@@ -80,6 +83,9 @@ struct hfi1_ipoib_txq {
        struct sdma_engine *sde;
        struct list_head tx_list;
        u64 sent_txreqs;
+       atomic_t stops;
+       atomic_t ring_full;
+       atomic_t no_desc;
        union hfi1_ipoib_flow flow;
        u8 q_idx;
        bool pkts_sent;
index 883cb9d..9df292b 100644 (file)
@@ -55,23 +55,48 @@ static u64 hfi1_ipoib_txreqs(const u64 sent, const u64 completed)
        return sent - completed;
 }
 
-static void hfi1_ipoib_check_queue_depth(struct hfi1_ipoib_txq *txq)
+static u64 hfi1_ipoib_used(struct hfi1_ipoib_txq *txq)
 {
-       if (unlikely(hfi1_ipoib_txreqs(++txq->sent_txreqs,
-                                      atomic64_read(&txq->complete_txreqs)) >=
-           min_t(unsigned int, txq->priv->netdev->tx_queue_len,
-                 txq->tx_ring.max_items - 1)))
+       return hfi1_ipoib_txreqs(txq->sent_txreqs,
+                                atomic64_read(&txq->complete_txreqs));
+}
+
+static void hfi1_ipoib_stop_txq(struct hfi1_ipoib_txq *txq)
+{
+       if (atomic_inc_return(&txq->stops) == 1)
                netif_stop_subqueue(txq->priv->netdev, txq->q_idx);
 }
 
+static void hfi1_ipoib_wake_txq(struct hfi1_ipoib_txq *txq)
+{
+       if (atomic_dec_and_test(&txq->stops))
+               netif_wake_subqueue(txq->priv->netdev, txq->q_idx);
+}
+
+static uint hfi1_ipoib_ring_hwat(struct hfi1_ipoib_txq *txq)
+{
+       return min_t(uint, txq->priv->netdev->tx_queue_len,
+                    txq->tx_ring.max_items - 1);
+}
+
+static uint hfi1_ipoib_ring_lwat(struct hfi1_ipoib_txq *txq)
+{
+       return min_t(uint, txq->priv->netdev->tx_queue_len,
+                    txq->tx_ring.max_items) >> 1;
+}
+
+static void hfi1_ipoib_check_queue_depth(struct hfi1_ipoib_txq *txq)
+{
+       ++txq->sent_txreqs;
+       if (hfi1_ipoib_used(txq) >= hfi1_ipoib_ring_hwat(txq) &&
+           !atomic_xchg(&txq->ring_full, 1))
+               hfi1_ipoib_stop_txq(txq);
+}
+
 static void hfi1_ipoib_check_queue_stopped(struct hfi1_ipoib_txq *txq)
 {
        struct net_device *dev = txq->priv->netdev;
 
-       /* If the queue is already running just return */
-       if (likely(!__netif_subqueue_stopped(dev, txq->q_idx)))
-               return;
-
        /* If shutting down just return as queue state is irrelevant */
        if (unlikely(dev->reg_state != NETREG_REGISTERED))
                return;
@@ -86,11 +111,9 @@ static void hfi1_ipoib_check_queue_stopped(struct hfi1_ipoib_txq *txq)
         * Use the minimum of the current tx_queue_len or the rings max txreqs
         * to protect against ring overflow.
         */
-       if (hfi1_ipoib_txreqs(txq->sent_txreqs,
-                             atomic64_read(&txq->complete_txreqs))
-           < min_t(unsigned int, dev->tx_queue_len,
-                   txq->tx_ring.max_items) >> 1)
-               netif_wake_subqueue(dev, txq->q_idx);
+       if (hfi1_ipoib_used(txq) < hfi1_ipoib_ring_lwat(txq) &&
+           atomic_xchg(&txq->ring_full, 0))
+               hfi1_ipoib_wake_txq(txq);
 }
 
 static void hfi1_ipoib_free_tx(struct ipoib_txreq *tx, int budget)
@@ -364,11 +387,12 @@ static struct ipoib_txreq *hfi1_ipoib_send_dma_common(struct net_device *dev,
        if (unlikely(!tx))
                return ERR_PTR(-ENOMEM);
 
-       /* so that we can test if the sdma decriptors are there */
+       /* so that we can test if the sdma descriptors are there */
        tx->txreq.num_desc = 0;
        tx->priv = priv;
        tx->txq = txp->txq;
        tx->skb = skb;
+       INIT_LIST_HEAD(&tx->txreq.list);
 
        hfi1_ipoib_build_ib_tx_headers(tx, txp);
 
@@ -469,6 +493,7 @@ static int hfi1_ipoib_send_dma_single(struct net_device *dev,
 
        ret = hfi1_ipoib_submit_tx(txq, tx);
        if (likely(!ret)) {
+tx_ok:
                trace_sdma_output_ibhdr(tx->priv->dd,
                                        &tx->sdma_hdr.hdr,
                                        ib_is_sc5(txp->flow.sc5));
@@ -478,20 +503,8 @@ static int hfi1_ipoib_send_dma_single(struct net_device *dev,
 
        txq->pkts_sent = false;
 
-       if (ret == -EBUSY) {
-               list_add_tail(&tx->txreq.list, &txq->tx_list);
-
-               trace_sdma_output_ibhdr(tx->priv->dd,
-                                       &tx->sdma_hdr.hdr,
-                                       ib_is_sc5(txp->flow.sc5));
-               hfi1_ipoib_check_queue_depth(txq);
-               return NETDEV_TX_OK;
-       }
-
-       if (ret == -ECOMM) {
-               hfi1_ipoib_check_queue_depth(txq);
-               return NETDEV_TX_OK;
-       }
+       if (ret == -EBUSY || ret == -ECOMM)
+               goto tx_ok;
 
        sdma_txclean(priv->dd, &tx->txreq);
        dev_kfree_skb_any(skb);
@@ -509,9 +522,17 @@ static int hfi1_ipoib_send_dma_list(struct net_device *dev,
        struct ipoib_txreq *tx;
 
        /* Has the flow change ? */
-       if (txq->flow.as_int != txp->flow.as_int)
-               (void)hfi1_ipoib_flush_tx_list(dev, txq);
-
+       if (txq->flow.as_int != txp->flow.as_int) {
+               int ret;
+
+               ret = hfi1_ipoib_flush_tx_list(dev, txq);
+               if (unlikely(ret)) {
+                       if (ret == -EBUSY)
+                               ++dev->stats.tx_dropped;
+                       dev_kfree_skb_any(skb);
+                       return NETDEV_TX_OK;
+               }
+       }
        tx = hfi1_ipoib_send_dma_common(dev, skb, txp);
        if (IS_ERR(tx)) {
                int ret = PTR_ERR(tx);
@@ -610,10 +631,14 @@ static int hfi1_ipoib_sdma_sleep(struct sdma_engine *sde,
                        return -EAGAIN;
                }
 
-               netif_stop_subqueue(txq->priv->netdev, txq->q_idx);
-
-               if (list_empty(&txq->wait.list))
+               if (list_empty(&txreq->list))
+                       /* came from non-list submit */
+                       list_add_tail(&txreq->list, &txq->tx_list);
+               if (list_empty(&txq->wait.list)) {
+                       if (!atomic_xchg(&txq->no_desc, 1))
+                               hfi1_ipoib_stop_txq(txq);
                        iowait_queue(pkts_sent, wait->iow, &sde->dmawait);
+               }
 
                write_sequnlock(&sde->waitlock);
                return -EBUSY;
@@ -648,9 +673,9 @@ static void hfi1_ipoib_flush_txq(struct work_struct *work)
        struct net_device *dev = txq->priv->netdev;
 
        if (likely(dev->reg_state == NETREG_REGISTERED) &&
-           likely(__netif_subqueue_stopped(dev, txq->q_idx)) &&
            likely(!hfi1_ipoib_flush_tx_list(dev, txq)))
-               netif_wake_subqueue(dev, txq->q_idx);
+               if (atomic_xchg(&txq->no_desc, 0))
+                       hfi1_ipoib_wake_txq(txq);
 }
 
 int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv)
@@ -704,6 +729,9 @@ int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv)
                txq->sde = NULL;
                INIT_LIST_HEAD(&txq->tx_list);
                atomic64_set(&txq->complete_txreqs, 0);
+               atomic_set(&txq->stops, 0);
+               atomic_set(&txq->ring_full, 0);
+               atomic_set(&txq->no_desc, 0);
                txq->q_idx = i;
                txq->flow.tx_queue = 0xff;
                txq->flow.sc5 = 0xff;
@@ -769,7 +797,7 @@ static void hfi1_ipoib_drain_tx_list(struct hfi1_ipoib_txq *txq)
                atomic64_inc(complete_txreqs);
        }
 
-       if (hfi1_ipoib_txreqs(txq->sent_txreqs, atomic64_read(complete_txreqs)))
+       if (hfi1_ipoib_used(txq))
                dd_dev_warn(txq->priv->dd,
                            "txq %d not empty found %llu requests\n",
                            txq->q_idx,
index 63688e8..6d263c9 100644 (file)
@@ -373,7 +373,7 @@ void hfi1_netdev_free(struct hfi1_devdata *dd)
 {
        if (dd->dummy_netdev) {
                dd_dev_info(dd, "hfi1 netdev freed\n");
-               free_netdev(dd->dummy_netdev);
+               kfree(dd->dummy_netdev);
                dd->dummy_netdev = NULL;
        }
 }
index bfa6e08..d2d526c 100644 (file)
@@ -91,7 +91,7 @@ static inline struct verbs_txreq *get_txreq(struct hfi1_ibdev *dev,
        tx->mr = NULL;
        tx->sde = priv->s_sde;
        tx->psc = priv->s_sendcontext;
-       /* so that we can test if the sdma decriptors are there */
+       /* so that we can test if the sdma descriptors are there */
        tx->txreq.num_desc = 0;
        /* Set the header type */
        tx->phdr.hdr.hdr_type = priv->hdr_type;
index a77fa67..479fa55 100644 (file)
@@ -898,13 +898,14 @@ struct hns_roce_hw {
        int (*set_mac)(struct hns_roce_dev *hr_dev, u8 phy_port, u8 *addr);
        void (*set_mtu)(struct hns_roce_dev *hr_dev, u8 phy_port,
                        enum ib_mtu mtu);
-       int (*write_mtpt)(void *mb_buf, struct hns_roce_mr *mr,
-                         unsigned long mtpt_idx);
+       int (*write_mtpt)(struct hns_roce_dev *hr_dev, void *mb_buf,
+                         struct hns_roce_mr *mr, unsigned long mtpt_idx);
        int (*rereg_write_mtpt)(struct hns_roce_dev *hr_dev,
                                struct hns_roce_mr *mr, int flags, u32 pdn,
                                int mr_access_flags, u64 iova, u64 size,
                                void *mb_buf);
-       int (*frmr_write_mtpt)(void *mb_buf, struct hns_roce_mr *mr);
+       int (*frmr_write_mtpt)(struct hns_roce_dev *hr_dev, void *mb_buf,
+                              struct hns_roce_mr *mr);
        int (*mw_write_mtpt)(void *mb_buf, struct hns_roce_mw *mw);
        void (*write_cqc)(struct hns_roce_dev *hr_dev,
                          struct hns_roce_cq *hr_cq, void *mb_buf, u64 *mtts,
index d02207c..cf39f56 100644 (file)
@@ -1756,10 +1756,10 @@ static void hns_roce_v1_set_mtu(struct hns_roce_dev *hr_dev, u8 phy_port,
                   val);
 }
 
-static int hns_roce_v1_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
+static int hns_roce_v1_write_mtpt(struct hns_roce_dev *hr_dev, void *mb_buf,
+                                 struct hns_roce_mr *mr,
                                  unsigned long mtpt_idx)
 {
-       struct hns_roce_dev *hr_dev = to_hr_dev(mr->ibmr.device);
        u64 pages[HNS_ROCE_MAX_INNER_MTPT_NUM] = { 0 };
        struct ib_device *ibdev = &hr_dev->ib_dev;
        struct hns_roce_v1_mpt_entry *mpt_entry;
index c597d72..dd01a51 100644 (file)
@@ -910,7 +910,7 @@ static int hns_roce_v2_rst_process_cmd(struct hns_roce_dev *hr_dev)
        instance_stage = handle->rinfo.instance_state;
        reset_stage = handle->rinfo.reset_state;
        reset_cnt = ops->ae_dev_reset_cnt(handle);
-       hw_resetting = ops->get_hw_reset_stat(handle);
+       hw_resetting = ops->get_cmdq_stat(handle);
        sw_resetting = ops->ae_dev_resetting(handle);
 
        if (reset_cnt != hr_dev->reset_cnt)
@@ -2529,10 +2529,10 @@ static int hns_roce_v2_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port,
        return hns_roce_cmq_send(hr_dev, &desc, 1);
 }
 
-static int set_mtpt_pbl(struct hns_roce_v2_mpt_entry *mpt_entry,
+static int set_mtpt_pbl(struct hns_roce_dev *hr_dev,
+                       struct hns_roce_v2_mpt_entry *mpt_entry,
                        struct hns_roce_mr *mr)
 {
-       struct hns_roce_dev *hr_dev = to_hr_dev(mr->ibmr.device);
        u64 pages[HNS_ROCE_V2_MAX_INNER_MTPT_NUM] = { 0 };
        struct ib_device *ibdev = &hr_dev->ib_dev;
        dma_addr_t pbl_ba;
@@ -2571,7 +2571,8 @@ static int set_mtpt_pbl(struct hns_roce_v2_mpt_entry *mpt_entry,
        return 0;
 }
 
-static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
+static int hns_roce_v2_write_mtpt(struct hns_roce_dev *hr_dev,
+                                 void *mb_buf, struct hns_roce_mr *mr,
                                  unsigned long mtpt_idx)
 {
        struct hns_roce_v2_mpt_entry *mpt_entry;
@@ -2620,7 +2621,7 @@ static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
        if (mr->type == MR_TYPE_DMA)
                return 0;
 
-       ret = set_mtpt_pbl(mpt_entry, mr);
+       ret = set_mtpt_pbl(hr_dev, mpt_entry, mr);
 
        return ret;
 }
@@ -2666,15 +2667,15 @@ static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev,
                mr->iova = iova;
                mr->size = size;
 
-               ret = set_mtpt_pbl(mpt_entry, mr);
+               ret = set_mtpt_pbl(hr_dev, mpt_entry, mr);
        }
 
        return ret;
 }
 
-static int hns_roce_v2_frmr_write_mtpt(void *mb_buf, struct hns_roce_mr *mr)
+static int hns_roce_v2_frmr_write_mtpt(struct hns_roce_dev *hr_dev,
+                                      void *mb_buf, struct hns_roce_mr *mr)
 {
-       struct hns_roce_dev *hr_dev = to_hr_dev(mr->ibmr.device);
        struct ib_device *ibdev = &hr_dev->ib_dev;
        struct hns_roce_v2_mpt_entry *mpt_entry;
        dma_addr_t pbl_ba = 0;
index 4c0bbb1..0e71ebe 100644 (file)
@@ -180,9 +180,10 @@ static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev,
        }
 
        if (mr->type != MR_TYPE_FRMR)
-               ret = hr_dev->hw->write_mtpt(mailbox->buf, mr, mtpt_idx);
+               ret = hr_dev->hw->write_mtpt(hr_dev, mailbox->buf, mr,
+                                            mtpt_idx);
        else
-               ret = hr_dev->hw->frmr_write_mtpt(mailbox->buf, mr);
+               ret = hr_dev->hw->frmr_write_mtpt(hr_dev, mailbox->buf, mr);
        if (ret) {
                dev_err(dev, "Write mtpt fail!\n");
                goto err_page;
index 81bf6b9..f939c9b 100644 (file)
@@ -1862,7 +1862,7 @@ static int create_xrc_tgt_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
        if (!in)
                return -ENOMEM;
 
-       if (MLX5_CAP_GEN(mdev, ece_support))
+       if (MLX5_CAP_GEN(mdev, ece_support) && ucmd)
                MLX5_SET(create_qp_in, in, ece, ucmd->ece_options);
        qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
 
@@ -2341,18 +2341,18 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
        unsigned long flags;
        int err;
 
-       if (qp->ibqp.rwq_ind_tbl) {
+       if (qp->is_rss) {
                destroy_rss_raw_qp_tir(dev, qp);
                return;
        }
 
-       base = (qp->ibqp.qp_type == IB_QPT_RAW_PACKET ||
+       base = (qp->type == IB_QPT_RAW_PACKET ||
                qp->flags & IB_QP_CREATE_SOURCE_QPN) ?
-              &qp->raw_packet_qp.rq.base :
-              &qp->trans_qp.base;
+                      &qp->raw_packet_qp.rq.base :
+                      &qp->trans_qp.base;
 
        if (qp->state != IB_QPS_RESET) {
-               if (qp->ibqp.qp_type != IB_QPT_RAW_PACKET &&
+               if (qp->type != IB_QPT_RAW_PACKET &&
                    !(qp->flags & IB_QP_CREATE_SOURCE_QPN)) {
                        err = mlx5_core_qp_modify(dev, MLX5_CMD_OP_2RST_QP, 0,
                                                  NULL, &base->mqp, NULL);
@@ -2368,8 +2368,8 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
                                     base->mqp.qpn);
        }
 
-       get_cqs(qp->ibqp.qp_type, qp->ibqp.send_cq, qp->ibqp.recv_cq,
-               &send_cq, &recv_cq);
+       get_cqs(qp->type, qp->ibqp.send_cq, qp->ibqp.recv_cq, &send_cq,
+               &recv_cq);
 
        spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
        mlx5_ib_lock_cqs(send_cq, recv_cq);
@@ -2391,7 +2391,7 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
        mlx5_ib_unlock_cqs(send_cq, recv_cq);
        spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
 
-       if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET ||
+       if (qp->type == IB_QPT_RAW_PACKET ||
            qp->flags & IB_QP_CREATE_SOURCE_QPN) {
                destroy_raw_packet_qp(dev, qp);
        } else {
@@ -2669,6 +2669,9 @@ static int process_create_flags(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
                return (create_flags) ? -EINVAL : 0;
 
        process_create_flag(dev, &create_flags,
+                           IB_QP_CREATE_INTEGRITY_EN,
+                           MLX5_CAP_GEN(mdev, sho), qp);
+       process_create_flag(dev, &create_flags,
                            IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK,
                            MLX5_CAP_GEN(mdev, block_lb_mc), qp);
        process_create_flag(dev, &create_flags, IB_QP_CREATE_CROSS_CHANNEL,
@@ -2873,7 +2876,6 @@ static int mlx5_ib_destroy_dct(struct mlx5_ib_qp *mqp)
 static int check_ucmd_data(struct mlx5_ib_dev *dev,
                           struct mlx5_create_qp_params *params)
 {
-       struct ib_qp_init_attr *attr = params->attr;
        struct ib_udata *udata = params->udata;
        size_t size, last;
        int ret;
@@ -2885,14 +2887,7 @@ static int check_ucmd_data(struct mlx5_ib_dev *dev,
                 */
                last = sizeof(struct mlx5_ib_create_qp_rss);
        else
-               /* IB_QPT_RAW_PACKET doesn't have ECE data */
-               switch (attr->qp_type) {
-               case IB_QPT_RAW_PACKET:
-                       last = offsetof(struct mlx5_ib_create_qp, ece_options);
-                       break;
-               default:
-                       last = offsetof(struct mlx5_ib_create_qp, reserved);
-               }
+               last = offsetof(struct mlx5_ib_create_qp, reserved);
 
        if (udata->inlen <= last)
                return 0;
@@ -2907,7 +2902,7 @@ static int check_ucmd_data(struct mlx5_ib_dev *dev,
        if (!ret)
                mlx5_ib_dbg(
                        dev,
-                       "udata is not cleared, inlen = %lu, ucmd = %lu, last = %lu, size = %lu\n",
+                       "udata is not cleared, inlen = %zu, ucmd = %zu, last = %zu, size = %zu\n",
                        udata->inlen, params->ucmd_size, last, size);
        return ret ? 0 : -EINVAL;
 }
@@ -3002,10 +2997,18 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attr,
        return &qp->ibqp;
 
 destroy_qp:
-       if (qp->type == MLX5_IB_QPT_DCT)
+       if (qp->type == MLX5_IB_QPT_DCT) {
                mlx5_ib_destroy_dct(qp);
-       else
+       } else {
+               /*
+                * The two lines below are temp solution till QP allocation
+                * will be moved to be under IB/core responsiblity.
+                */
+               qp->ibqp.send_cq = attr->send_cq;
+               qp->ibqp.recv_cq = attr->recv_cq;
                destroy_qp_common(dev, qp, udata);
+       }
+
        qp = NULL;
 free_qp:
        kfree(qp);
@@ -4162,8 +4165,6 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr,
 
                if (udata->outlen < min_resp_len)
                        return -EINVAL;
-               resp.response_length = min_resp_len;
-
                /*
                 * If we don't have enough space for the ECE options,
                 * simply indicate it with resp.response_length.
@@ -4384,8 +4385,7 @@ static void to_rdma_ah_attr(struct mlx5_ib_dev *ibdev,
                                MLX5_GET(ads, path, src_addr_index),
                                MLX5_GET(ads, path, hop_limit),
                                MLX5_GET(ads, path, tclass));
-               memcpy(ah_attr, MLX5_ADDR_OF(ads, path, rgid_rip),
-                      MLX5_FLD_SZ_BYTES(ads, rgid_rip));
+               rdma_ah_set_dgid_raw(ah_attr, MLX5_ADDR_OF(ads, path, rgid_rip));
        }
 }
 
index c19d91d..7c3968e 100644 (file)
@@ -346,6 +346,9 @@ static int get_ece_from_mbox(void *out, u16 opcode)
        int ece = 0;
 
        switch (opcode) {
+       case MLX5_CMD_OP_INIT2INIT_QP:
+               ece = MLX5_GET(init2init_qp_out, out, ece);
+               break;
        case MLX5_CMD_OP_INIT2RTR_QP:
                ece = MLX5_GET(init2rtr_qp_out, out, ece);
                break;
@@ -355,6 +358,9 @@ static int get_ece_from_mbox(void *out, u16 opcode)
        case MLX5_CMD_OP_RTS2RTS_QP:
                ece = MLX5_GET(rts2rts_qp_out, out, ece);
                break;
+       case MLX5_CMD_OP_RST2INIT_QP:
+               ece = MLX5_GET(rst2init_qp_out, out, ece);
+               break;
        default:
                break;
        }
@@ -406,6 +412,7 @@ static int modify_qp_mbox_alloc(struct mlx5_core_dev *dev, u16 opcode, int qpn,
                        return -ENOMEM;
                MOD_QP_IN_SET_QPC(rst2init_qp, mbox->in, opcode, qpn,
                                  opt_param_mask, qpc, uid);
+               MLX5_SET(rst2init_qp_in, mbox->in, ece, ece);
                break;
        case MLX5_CMD_OP_INIT2RTR_QP:
                if (MBOX_ALLOC(mbox, init2rtr_qp))
@@ -439,6 +446,7 @@ static int modify_qp_mbox_alloc(struct mlx5_core_dev *dev, u16 opcode, int qpn,
                        return -ENOMEM;
                MOD_QP_IN_SET_QPC(init2init_qp, mbox->in, opcode, qpn,
                                  opt_param_mask, qpc, uid);
+               MLX5_SET(init2init_qp_in, mbox->in, ece, ece);
                break;
        default:
                return -EINVAL;
index 792eecd..97fc7dd 100644 (file)
@@ -150,8 +150,17 @@ qedr_iw_issue_event(void *context,
        if (params->cm_info) {
                event.ird = params->cm_info->ird;
                event.ord = params->cm_info->ord;
-               event.private_data_len = params->cm_info->private_data_len;
-               event.private_data = (void *)params->cm_info->private_data;
+               /* Only connect_request and reply have valid private data
+                * the rest of the events this may be left overs from
+                * connection establishment. CONNECT_REQUEST is issued via
+                * qedr_iw_mpa_request
+                */
+               if (event_type == IW_CM_EVENT_CONNECT_REPLY) {
+                       event.private_data_len =
+                               params->cm_info->private_data_len;
+                       event.private_data =
+                               (void *)params->cm_info->private_data;
+               }
        }
 
        if (ep->cm_id)
index 511b728..7db35dd 100644 (file)
@@ -1204,7 +1204,7 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
                err = alloc_ud_wq_attr(qp, rdi->dparms.node);
                if (err) {
                        ret = (ERR_PTR(err));
-                       goto bail_driver_priv;
+                       goto bail_rq_rvt;
                }
 
                if (init_attr->create_flags & IB_QP_CREATE_NETDEV_USE)
@@ -1314,9 +1314,11 @@ bail_qpn:
        rvt_free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num);
 
 bail_rq_wq:
-       rvt_free_rq(&qp->r_rq);
        free_ud_wq_attr(qp);
 
+bail_rq_rvt:
+       rvt_free_rq(&qp->r_rq);
+
 bail_driver_priv:
        rdi->driver_f.qp_priv_free(rdi, qp);
 
index 6505202..7271d70 100644 (file)
@@ -139,7 +139,8 @@ static int siw_rx_pbl(struct siw_rx_stream *srx, int *pbl_idx,
                        break;
 
                bytes = min(bytes, len);
-               if (siw_rx_kva(srx, (void *)buf_addr, bytes) == bytes) {
+               if (siw_rx_kva(srx, (void *)(uintptr_t)buf_addr, bytes) ==
+                   bytes) {
                        copied += bytes;
                        offset += bytes;
                        len -= bytes;
index a4ee6b8..b91e472 100644 (file)
@@ -39,8 +39,6 @@
  *     Troy Laramy <t-laramy@ti.com>
  */
 
-#include <asm/cacheflush.h>
-
 #include <linux/clk.h>
 #include <linux/clkdev.h>
 #include <linux/delay.h>
index 10c214b..1ac9aef 100644 (file)
@@ -18,7 +18,6 @@
 #include <linux/sched.h>
 #include <linux/slab.h>
 #include <linux/vmalloc.h>
-#include <asm/cacheflush.h>
 
 #include <media/v4l2-dev.h>
 #include <media/v4l2-ioctl.h>
index 5d3c691..3dd46cd 100644 (file)
@@ -572,6 +572,9 @@ static int bareudp2info(struct nlattr *data[], struct bareudp_conf *conf,
        if (data[IFLA_BAREUDP_SRCPORT_MIN])
                conf->sport_min =  nla_get_u16(data[IFLA_BAREUDP_SRCPORT_MIN]);
 
+       if (data[IFLA_BAREUDP_MULTIPROTO_MODE])
+               conf->multi_proto_mode = true;
+
        return 0;
 }
 
index c7ac63f..946e41f 100644 (file)
@@ -1147,6 +1147,8 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
        set_bit(0, priv->cfp.used);
        set_bit(0, priv->cfp.unique);
 
+       /* Balance of_node_put() done by of_find_node_by_name() */
+       of_node_get(dn);
        ports = of_find_node_by_name(dn, "ports");
        if (ports) {
                bcm_sf2_identify_ports(priv, ports);
index bdfd6c4..af35651 100644 (file)
@@ -7,6 +7,165 @@
 
 #define SJA1105_SIZE_VL_STATUS                 8
 
+/* Insert into the global gate list, sorted by gate action time. */
+static int sja1105_insert_gate_entry(struct sja1105_gating_config *gating_cfg,
+                                    struct sja1105_rule *rule,
+                                    u8 gate_state, s64 entry_time,
+                                    struct netlink_ext_ack *extack)
+{
+       struct sja1105_gate_entry *e;
+       int rc;
+
+       e = kzalloc(sizeof(*e), GFP_KERNEL);
+       if (!e)
+               return -ENOMEM;
+
+       e->rule = rule;
+       e->gate_state = gate_state;
+       e->interval = entry_time;
+
+       if (list_empty(&gating_cfg->entries)) {
+               list_add(&e->list, &gating_cfg->entries);
+       } else {
+               struct sja1105_gate_entry *p;
+
+               list_for_each_entry(p, &gating_cfg->entries, list) {
+                       if (p->interval == e->interval) {
+                               NL_SET_ERR_MSG_MOD(extack,
+                                                  "Gate conflict");
+                               rc = -EBUSY;
+                               goto err;
+                       }
+
+                       if (e->interval < p->interval)
+                               break;
+               }
+               list_add(&e->list, p->list.prev);
+       }
+
+       gating_cfg->num_entries++;
+
+       return 0;
+err:
+       kfree(e);
+       return rc;
+}
+
+/* The gate entries contain absolute times in their e->interval field. Convert
+ * that to proper intervals (i.e. "0, 5, 10, 15" to "5, 5, 5, 5").
+ */
+static void
+sja1105_gating_cfg_time_to_interval(struct sja1105_gating_config *gating_cfg,
+                                   u64 cycle_time)
+{
+       struct sja1105_gate_entry *last_e;
+       struct sja1105_gate_entry *e;
+       struct list_head *prev;
+
+       list_for_each_entry(e, &gating_cfg->entries, list) {
+               struct sja1105_gate_entry *p;
+
+               prev = e->list.prev;
+
+               if (prev == &gating_cfg->entries)
+                       continue;
+
+               p = list_entry(prev, struct sja1105_gate_entry, list);
+               p->interval = e->interval - p->interval;
+       }
+       last_e = list_last_entry(&gating_cfg->entries,
+                                struct sja1105_gate_entry, list);
+       last_e->interval = cycle_time - last_e->interval;
+}
+
+static void sja1105_free_gating_config(struct sja1105_gating_config *gating_cfg)
+{
+       struct sja1105_gate_entry *e, *n;
+
+       list_for_each_entry_safe(e, n, &gating_cfg->entries, list) {
+               list_del(&e->list);
+               kfree(e);
+       }
+}
+
+static int sja1105_compose_gating_subschedule(struct sja1105_private *priv,
+                                             struct netlink_ext_ack *extack)
+{
+       struct sja1105_gating_config *gating_cfg = &priv->tas_data.gating_cfg;
+       struct sja1105_rule *rule;
+       s64 max_cycle_time = 0;
+       s64 its_base_time = 0;
+       int i, rc = 0;
+
+       sja1105_free_gating_config(gating_cfg);
+
+       list_for_each_entry(rule, &priv->flow_block.rules, list) {
+               if (rule->type != SJA1105_RULE_VL)
+                       continue;
+               if (rule->vl.type != SJA1105_VL_TIME_TRIGGERED)
+                       continue;
+
+               if (max_cycle_time < rule->vl.cycle_time) {
+                       max_cycle_time = rule->vl.cycle_time;
+                       its_base_time = rule->vl.base_time;
+               }
+       }
+
+       if (!max_cycle_time)
+               return 0;
+
+       dev_dbg(priv->ds->dev, "max_cycle_time %lld its_base_time %lld\n",
+               max_cycle_time, its_base_time);
+
+       gating_cfg->base_time = its_base_time;
+       gating_cfg->cycle_time = max_cycle_time;
+       gating_cfg->num_entries = 0;
+
+       list_for_each_entry(rule, &priv->flow_block.rules, list) {
+               s64 time;
+               s64 rbt;
+
+               if (rule->type != SJA1105_RULE_VL)
+                       continue;
+               if (rule->vl.type != SJA1105_VL_TIME_TRIGGERED)
+                       continue;
+
+               /* Calculate the difference between this gating schedule's
+                * base time, and the base time of the gating schedule with the
+                * longest cycle time. We call it the relative base time (rbt).
+                */
+               rbt = future_base_time(rule->vl.base_time, rule->vl.cycle_time,
+                                      its_base_time);
+               rbt -= its_base_time;
+
+               time = rbt;
+
+               for (i = 0; i < rule->vl.num_entries; i++) {
+                       u8 gate_state = rule->vl.entries[i].gate_state;
+                       s64 entry_time = time;
+
+                       while (entry_time < max_cycle_time) {
+                               rc = sja1105_insert_gate_entry(gating_cfg, rule,
+                                                              gate_state,
+                                                              entry_time,
+                                                              extack);
+                               if (rc)
+                                       goto err;
+
+                               entry_time += rule->vl.cycle_time;
+                       }
+                       time += rule->vl.entries[i].interval;
+               }
+       }
+
+       sja1105_gating_cfg_time_to_interval(gating_cfg, max_cycle_time);
+
+       return 0;
+err:
+       sja1105_free_gating_config(gating_cfg);
+       return rc;
+}
+
 /* The switch flow classification core implements TTEthernet, which 'thinks' in
  * terms of Virtual Links (VL), a concept borrowed from ARINC 664 part 7.
  * However it also has one other operating mode (VLLUPFORMAT=0) where it acts
@@ -342,7 +501,9 @@ int sja1105_vl_redirect(struct sja1105_private *priv, int port,
                NL_SET_ERR_MSG_MOD(extack,
                                   "Can only redirect based on DMAC");
                return -EOPNOTSUPP;
-       } else if (key->type != SJA1105_KEY_VLAN_AWARE_VL) {
+       } else if ((priv->vlan_state == SJA1105_VLAN_BEST_EFFORT ||
+                   priv->vlan_state == SJA1105_VLAN_FILTERING_FULL) &&
+                  key->type != SJA1105_KEY_VLAN_AWARE_VL) {
                NL_SET_ERR_MSG_MOD(extack,
                                   "Can only redirect based on {DMAC, VID, PCP}");
                return -EOPNOTSUPP;
@@ -388,171 +549,19 @@ int sja1105_vl_delete(struct sja1105_private *priv, int port,
                kfree(rule);
        }
 
-       rc = sja1105_init_virtual_links(priv, extack);
+       rc = sja1105_compose_gating_subschedule(priv, extack);
        if (rc)
                return rc;
 
-       return sja1105_static_config_reload(priv, SJA1105_VIRTUAL_LINKS);
-}
-
-/* Insert into the global gate list, sorted by gate action time. */
-static int sja1105_insert_gate_entry(struct sja1105_gating_config *gating_cfg,
-                                    struct sja1105_rule *rule,
-                                    u8 gate_state, s64 entry_time,
-                                    struct netlink_ext_ack *extack)
-{
-       struct sja1105_gate_entry *e;
-       int rc;
-
-       e = kzalloc(sizeof(*e), GFP_KERNEL);
-       if (!e)
-               return -ENOMEM;
-
-       e->rule = rule;
-       e->gate_state = gate_state;
-       e->interval = entry_time;
-
-       if (list_empty(&gating_cfg->entries)) {
-               list_add(&e->list, &gating_cfg->entries);
-       } else {
-               struct sja1105_gate_entry *p;
-
-               list_for_each_entry(p, &gating_cfg->entries, list) {
-                       if (p->interval == e->interval) {
-                               NL_SET_ERR_MSG_MOD(extack,
-                                                  "Gate conflict");
-                               rc = -EBUSY;
-                               goto err;
-                       }
-
-                       if (e->interval < p->interval)
-                               break;
-               }
-               list_add(&e->list, p->list.prev);
-       }
-
-       gating_cfg->num_entries++;
-
-       return 0;
-err:
-       kfree(e);
-       return rc;
-}
-
-/* The gate entries contain absolute times in their e->interval field. Convert
- * that to proper intervals (i.e. "0, 5, 10, 15" to "5, 5, 5, 5").
- */
-static void
-sja1105_gating_cfg_time_to_interval(struct sja1105_gating_config *gating_cfg,
-                                   u64 cycle_time)
-{
-       struct sja1105_gate_entry *last_e;
-       struct sja1105_gate_entry *e;
-       struct list_head *prev;
-
-       list_for_each_entry(e, &gating_cfg->entries, list) {
-               struct sja1105_gate_entry *p;
-
-               prev = e->list.prev;
-
-               if (prev == &gating_cfg->entries)
-                       continue;
-
-               p = list_entry(prev, struct sja1105_gate_entry, list);
-               p->interval = e->interval - p->interval;
-       }
-       last_e = list_last_entry(&gating_cfg->entries,
-                                struct sja1105_gate_entry, list);
-       if (last_e->list.prev != &gating_cfg->entries)
-               last_e->interval = cycle_time - last_e->interval;
-}
-
-static void sja1105_free_gating_config(struct sja1105_gating_config *gating_cfg)
-{
-       struct sja1105_gate_entry *e, *n;
-
-       list_for_each_entry_safe(e, n, &gating_cfg->entries, list) {
-               list_del(&e->list);
-               kfree(e);
-       }
-}
-
-static int sja1105_compose_gating_subschedule(struct sja1105_private *priv,
-                                             struct netlink_ext_ack *extack)
-{
-       struct sja1105_gating_config *gating_cfg = &priv->tas_data.gating_cfg;
-       struct sja1105_rule *rule;
-       s64 max_cycle_time = 0;
-       s64 its_base_time = 0;
-       int i, rc = 0;
-
-       list_for_each_entry(rule, &priv->flow_block.rules, list) {
-               if (rule->type != SJA1105_RULE_VL)
-                       continue;
-               if (rule->vl.type != SJA1105_VL_TIME_TRIGGERED)
-                       continue;
-
-               if (max_cycle_time < rule->vl.cycle_time) {
-                       max_cycle_time = rule->vl.cycle_time;
-                       its_base_time = rule->vl.base_time;
-               }
-       }
-
-       if (!max_cycle_time)
-               return 0;
-
-       dev_dbg(priv->ds->dev, "max_cycle_time %lld its_base_time %lld\n",
-               max_cycle_time, its_base_time);
-
-       sja1105_free_gating_config(gating_cfg);
-
-       gating_cfg->base_time = its_base_time;
-       gating_cfg->cycle_time = max_cycle_time;
-       gating_cfg->num_entries = 0;
-
-       list_for_each_entry(rule, &priv->flow_block.rules, list) {
-               s64 time;
-               s64 rbt;
-
-               if (rule->type != SJA1105_RULE_VL)
-                       continue;
-               if (rule->vl.type != SJA1105_VL_TIME_TRIGGERED)
-                       continue;
-
-               /* Calculate the difference between this gating schedule's
-                * base time, and the base time of the gating schedule with the
-                * longest cycle time. We call it the relative base time (rbt).
-                */
-               rbt = future_base_time(rule->vl.base_time, rule->vl.cycle_time,
-                                      its_base_time);
-               rbt -= its_base_time;
-
-               time = rbt;
-
-               for (i = 0; i < rule->vl.num_entries; i++) {
-                       u8 gate_state = rule->vl.entries[i].gate_state;
-                       s64 entry_time = time;
-
-                       while (entry_time < max_cycle_time) {
-                               rc = sja1105_insert_gate_entry(gating_cfg, rule,
-                                                              gate_state,
-                                                              entry_time,
-                                                              extack);
-                               if (rc)
-                                       goto err;
-
-                               entry_time += rule->vl.cycle_time;
-                       }
-                       time += rule->vl.entries[i].interval;
-               }
-       }
+       rc = sja1105_init_virtual_links(priv, extack);
+       if (rc)
+               return rc;
 
-       sja1105_gating_cfg_time_to_interval(gating_cfg, max_cycle_time);
+       rc = sja1105_init_scheduling(priv);
+       if (rc < 0)
+               return rc;
 
-       return 0;
-err:
-       sja1105_free_gating_config(gating_cfg);
-       return rc;
+       return sja1105_static_config_reload(priv, SJA1105_VIRTUAL_LINKS);
 }
 
 int sja1105_vl_gate(struct sja1105_private *priv, int port,
@@ -588,14 +597,12 @@ int sja1105_vl_gate(struct sja1105_private *priv, int port,
 
        if (priv->vlan_state == SJA1105_VLAN_UNAWARE &&
            key->type != SJA1105_KEY_VLAN_UNAWARE_VL) {
-               dev_err(priv->ds->dev, "1: vlan state %d key type %d\n",
-                       priv->vlan_state, key->type);
                NL_SET_ERR_MSG_MOD(extack,
                                   "Can only gate based on DMAC");
                return -EOPNOTSUPP;
-       } else if (key->type != SJA1105_KEY_VLAN_AWARE_VL) {
-               dev_err(priv->ds->dev, "2: vlan state %d key type %d\n",
-                       priv->vlan_state, key->type);
+       } else if ((priv->vlan_state == SJA1105_VLAN_BEST_EFFORT ||
+                   priv->vlan_state == SJA1105_VLAN_FILTERING_FULL) &&
+                  key->type != SJA1105_KEY_VLAN_AWARE_VL) {
                NL_SET_ERR_MSG_MOD(extack,
                                   "Can only gate based on {DMAC, VID, PCP}");
                return -EOPNOTSUPP;
index b93e05f..6a884df 100644 (file)
@@ -6292,6 +6292,7 @@ int bnxt_hwrm_set_coal(struct bnxt *bp)
 
 static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
 {
+       struct hwrm_stat_ctx_clr_stats_input req0 = {0};
        struct hwrm_stat_ctx_free_input req = {0};
        int i;
 
@@ -6301,6 +6302,7 @@ static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
        if (BNXT_CHIP_TYPE_NITRO_A0(bp))
                return;
 
+       bnxt_hwrm_cmd_hdr_init(bp, &req0, HWRM_STAT_CTX_CLR_STATS, -1, -1);
        bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1);
 
        mutex_lock(&bp->hwrm_cmd_lock);
@@ -6310,7 +6312,11 @@ static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
 
                if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
                        req.stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
-
+                       if (BNXT_FW_MAJ(bp) <= 20) {
+                               req0.stat_ctx_id = req.stat_ctx_id;
+                               _hwrm_send_message(bp, &req0, sizeof(req0),
+                                                  HWRM_CMD_TIMEOUT);
+                       }
                        _hwrm_send_message(bp, &req, sizeof(req),
                                           HWRM_CMD_TIMEOUT);
 
@@ -6976,7 +6982,8 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
                bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD;
 
        bp->tx_push_thresh = 0;
-       if (flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED)
+       if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) &&
+           BNXT_FW_MAJ(bp) > 217)
                bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
 
        hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
@@ -7240,8 +7247,9 @@ static int __bnxt_hwrm_ver_get(struct bnxt *bp, bool silent)
 static int bnxt_hwrm_ver_get(struct bnxt *bp)
 {
        struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
+       u16 fw_maj, fw_min, fw_bld, fw_rsv;
        u32 dev_caps_cfg, hwrm_ver;
-       int rc;
+       int rc, len;
 
        bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
        mutex_lock(&bp->hwrm_cmd_lock);
@@ -7273,9 +7281,22 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp)
                         resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
                         resp->hwrm_intf_upd_8b);
 
-       snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "%d.%d.%d.%d",
-                resp->hwrm_fw_maj_8b, resp->hwrm_fw_min_8b,
-                resp->hwrm_fw_bld_8b, resp->hwrm_fw_rsvd_8b);
+       fw_maj = le16_to_cpu(resp->hwrm_fw_major);
+       if (bp->hwrm_spec_code > 0x10803 && fw_maj) {
+               fw_min = le16_to_cpu(resp->hwrm_fw_minor);
+               fw_bld = le16_to_cpu(resp->hwrm_fw_build);
+               fw_rsv = le16_to_cpu(resp->hwrm_fw_patch);
+               len = FW_VER_STR_LEN;
+       } else {
+               fw_maj = resp->hwrm_fw_maj_8b;
+               fw_min = resp->hwrm_fw_min_8b;
+               fw_bld = resp->hwrm_fw_bld_8b;
+               fw_rsv = resp->hwrm_fw_rsvd_8b;
+               len = BC_HWRM_STR_LEN;
+       }
+       bp->fw_ver_code = BNXT_FW_VER_CODE(fw_maj, fw_min, fw_bld, fw_rsv);
+       snprintf(bp->fw_ver_str, len, "%d.%d.%d.%d", fw_maj, fw_min, fw_bld,
+                fw_rsv);
 
        if (strlen(resp->active_pkg_name)) {
                int fw_ver_len = strlen(bp->fw_ver_str);
@@ -11892,7 +11913,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        dev->ethtool_ops = &bnxt_ethtool_ops;
        pci_set_drvdata(pdev, dev);
 
-       bnxt_vpd_read_info(bp);
+       if (BNXT_PF(bp))
+               bnxt_vpd_read_info(bp);
 
        rc = bnxt_alloc_hwrm_resources(bp);
        if (rc)
index 9e173d7..78e2fd6 100644 (file)
@@ -1746,6 +1746,11 @@ struct bnxt {
 #define PHY_VER_STR_LEN         (FW_VER_STR_LEN - BC_HWRM_STR_LEN)
        char                    fw_ver_str[FW_VER_STR_LEN];
        char                    hwrm_ver_supp[FW_VER_STR_LEN];
+       u64                     fw_ver_code;
+#define BNXT_FW_VER_CODE(maj, min, bld, rsv)                   \
+       ((u64)(maj) << 48 | (u64)(min) << 32 | (u64)(bld) << 16 | (rsv))
+#define BNXT_FW_MAJ(bp)                ((bp)->fw_ver_code >> 48)
+
        __be16                  vxlan_port;
        u8                      vxlan_port_cnt;
        __le16                  vxlan_fw_dst_port_id;
index 0eef4f5..4a11c1e 100644 (file)
@@ -1889,7 +1889,8 @@ static void bnxt_tc_setup_indr_rel(void *cb_priv)
 }
 
 static int bnxt_tc_setup_indr_block(struct net_device *netdev, struct bnxt *bp,
-                                   struct flow_block_offload *f)
+                                   struct flow_block_offload *f, void *data,
+                                   void (*cleanup)(struct flow_block_cb *block_cb))
 {
        struct bnxt_flower_indr_block_cb_priv *cb_priv;
        struct flow_block_cb *block_cb;
@@ -1907,9 +1908,10 @@ static int bnxt_tc_setup_indr_block(struct net_device *netdev, struct bnxt *bp,
                cb_priv->bp = bp;
                list_add(&cb_priv->list, &bp->tc_indr_block_list);
 
-               block_cb = flow_block_cb_alloc(bnxt_tc_setup_indr_block_cb,
-                                              cb_priv, cb_priv,
-                                              bnxt_tc_setup_indr_rel);
+               block_cb = flow_indr_block_cb_alloc(bnxt_tc_setup_indr_block_cb,
+                                                   cb_priv, cb_priv,
+                                                   bnxt_tc_setup_indr_rel, f,
+                                                   netdev, data, bp, cleanup);
                if (IS_ERR(block_cb)) {
                        list_del(&cb_priv->list);
                        kfree(cb_priv);
@@ -1930,7 +1932,7 @@ static int bnxt_tc_setup_indr_block(struct net_device *netdev, struct bnxt *bp,
                if (!block_cb)
                        return -ENOENT;
 
-               flow_block_cb_remove(block_cb, f);
+               flow_indr_block_cb_remove(block_cb, f);
                list_del(&block_cb->driver_list);
                break;
        default:
@@ -1945,14 +1947,17 @@ static bool bnxt_is_netdev_indr_offload(struct net_device *netdev)
 }
 
 static int bnxt_tc_setup_indr_cb(struct net_device *netdev, void *cb_priv,
-                                enum tc_setup_type type, void *type_data)
+                                enum tc_setup_type type, void *type_data,
+                                void *data,
+                                void (*cleanup)(struct flow_block_cb *block_cb))
 {
        if (!bnxt_is_netdev_indr_offload(netdev))
                return -EOPNOTSUPP;
 
        switch (type) {
        case TC_SETUP_BLOCK:
-               return bnxt_tc_setup_indr_block(netdev, cb_priv, type_data);
+               return bnxt_tc_setup_indr_block(netdev, cb_priv, type_data, data,
+                                               cleanup);
        default:
                break;
        }
@@ -2074,7 +2079,7 @@ void bnxt_shutdown_tc(struct bnxt *bp)
                return;
 
        flow_indr_dev_unregister(bnxt_tc_setup_indr_cb, bp,
-                                bnxt_tc_setup_indr_block_cb);
+                                bnxt_tc_setup_indr_rel);
        rhashtable_destroy(&tc_info->flow_table);
        rhashtable_destroy(&tc_info->l2_table);
        rhashtable_destroy(&tc_info->decap_l2_table);
index ff31da0..af924a8 100644 (file)
@@ -459,17 +459,6 @@ static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv *priv,
                        genet_dma_ring_regs[r]);
 }
 
-static bool bcmgenet_hfb_is_filter_enabled(struct bcmgenet_priv *priv,
-                                          u32 f_index)
-{
-       u32 offset;
-       u32 reg;
-
-       offset = HFB_FLT_ENABLE_V3PLUS + (f_index < 32) * sizeof(u32);
-       reg = bcmgenet_hfb_reg_readl(priv, offset);
-       return !!(reg & (1 << (f_index % 32)));
-}
-
 static void bcmgenet_hfb_enable_filter(struct bcmgenet_priv *priv, u32 f_index)
 {
        u32 offset;
@@ -533,19 +522,6 @@ static void bcmgenet_hfb_set_filter_length(struct bcmgenet_priv *priv,
        bcmgenet_hfb_reg_writel(priv, reg, offset);
 }
 
-static int bcmgenet_hfb_find_unused_filter(struct bcmgenet_priv *priv)
-{
-       u32 f_index;
-
-       /* First MAX_NUM_OF_FS_RULES are reserved for Rx NFC filters */
-       for (f_index = MAX_NUM_OF_FS_RULES;
-            f_index < priv->hw_params->hfb_filter_cnt; f_index++)
-               if (!bcmgenet_hfb_is_filter_enabled(priv, f_index))
-                       return f_index;
-
-       return -ENOMEM;
-}
-
 static int bcmgenet_hfb_validate_mask(void *mask, size_t size)
 {
        while (size) {
@@ -634,8 +610,9 @@ static int bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv,
 {
        struct ethtool_rx_flow_spec *fs = &rule->fs;
        int err = 0, offset = 0, f_length = 0;
-       u16 val_16, mask_16;
        u8 val_8, mask_8;
+       __be16 val_16;
+       u16 mask_16;
        size_t size;
        u32 *f_data;
 
@@ -744,59 +721,6 @@ static int bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv,
        return err;
 }
 
-/* bcmgenet_hfb_add_filter
- *
- * Add new filter to Hardware Filter Block to match and direct Rx traffic to
- * desired Rx queue.
- *
- * f_data is an array of unsigned 32-bit integers where each 32-bit integer
- * provides filter data for 2 bytes (4 nibbles) of Rx frame:
- *
- * bits 31:20 - unused
- * bit  19    - nibble 0 match enable
- * bit  18    - nibble 1 match enable
- * bit  17    - nibble 2 match enable
- * bit  16    - nibble 3 match enable
- * bits 15:12 - nibble 0 data
- * bits 11:8  - nibble 1 data
- * bits 7:4   - nibble 2 data
- * bits 3:0   - nibble 3 data
- *
- * Example:
- * In order to match:
- * - Ethernet frame type = 0x0800 (IP)
- * - IP version field = 4
- * - IP protocol field = 0x11 (UDP)
- *
- * The following filter is needed:
- * u32 hfb_filter_ipv4_udp[] = {
- *   Rx frame offset 0x00: 0x00000000, 0x00000000, 0x00000000, 0x00000000,
- *   Rx frame offset 0x08: 0x00000000, 0x00000000, 0x000F0800, 0x00084000,
- *   Rx frame offset 0x10: 0x00000000, 0x00000000, 0x00000000, 0x00030011,
- * };
- *
- * To add the filter to HFB and direct the traffic to Rx queue 0, call:
- * bcmgenet_hfb_add_filter(priv, hfb_filter_ipv4_udp,
- *                         ARRAY_SIZE(hfb_filter_ipv4_udp), 0);
- */
-int bcmgenet_hfb_add_filter(struct bcmgenet_priv *priv, u32 *f_data,
-                           u32 f_length, u32 rx_queue)
-{
-       int f_index;
-
-       f_index = bcmgenet_hfb_find_unused_filter(priv);
-       if (f_index < 0)
-               return -ENOMEM;
-
-       if (f_length > priv->hw_params->hfb_filter_size)
-               return -EINVAL;
-
-       bcmgenet_hfb_set_filter(priv, f_data, f_length, rx_queue, f_index);
-       bcmgenet_hfb_enable_filter(priv, f_index);
-
-       return 0;
-}
-
 /* bcmgenet_hfb_clear
  *
  * Clear Hardware Filter Block and disable all filtering.
@@ -2118,11 +2042,6 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
                goto out;
        }
 
-       if (skb_padto(skb, ETH_ZLEN)) {
-               ret = NETDEV_TX_OK;
-               goto out;
-       }
-
        /* Retain how many bytes will be sent on the wire, without TSB inserted
         * by transmit checksum offload
         */
@@ -2169,6 +2088,9 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
                len_stat = (size << DMA_BUFLENGTH_SHIFT) |
                           (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT);
 
+               /* Note: if we ever change from DMA_TX_APPEND_CRC below we
+                * will need to restore software padding of "runt" packets
+                */
                if (!i) {
                        len_stat |= DMA_TX_APPEND_CRC | DMA_SOP;
                        if (skb->ip_summed == CHECKSUM_PARTIAL)
index 7a3b22b..ebff1fc 100644 (file)
@@ -18168,8 +18168,8 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
 
        rtnl_lock();
 
-       /* We probably don't have netdev yet */
-       if (!netdev || !netif_running(netdev))
+       /* Could be second call or maybe we don't have netdev yet */
+       if (!netdev || tp->pcierr_recovery || !netif_running(netdev))
                goto done;
 
        /* We needn't recover from permanent error */
index 6793307..52582e8 100644 (file)
@@ -2558,7 +2558,7 @@ static int macb_open(struct net_device *dev)
 
        err = macb_phylink_connect(bp);
        if (err)
-               goto napi_exit;
+               goto reset_hw;
 
        netif_tx_start_all_queues(dev);
 
@@ -2567,9 +2567,11 @@ static int macb_open(struct net_device *dev)
 
        return 0;
 
-napi_exit:
+reset_hw:
+       macb_reset_hw(bp);
        for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
                napi_disable(&queue->napi);
+       macb_free_consistent(bp);
 pm_exit:
        pm_runtime_put_sync(&bp->pdev->dev);
        return err;
@@ -3760,15 +3762,9 @@ static int macb_init(struct platform_device *pdev)
 
 static struct sifive_fu540_macb_mgmt *mgmt;
 
-/* Initialize and start the Receiver and Transmit subsystems */
-static int at91ether_start(struct net_device *dev)
+static int at91ether_alloc_coherent(struct macb *lp)
 {
-       struct macb *lp = netdev_priv(dev);
        struct macb_queue *q = &lp->queues[0];
-       struct macb_dma_desc *desc;
-       dma_addr_t addr;
-       u32 ctl;
-       int i;
 
        q->rx_ring = dma_alloc_coherent(&lp->pdev->dev,
                                         (AT91ETHER_MAX_RX_DESCR *
@@ -3790,6 +3786,43 @@ static int at91ether_start(struct net_device *dev)
                return -ENOMEM;
        }
 
+       return 0;
+}
+
+static void at91ether_free_coherent(struct macb *lp)
+{
+       struct macb_queue *q = &lp->queues[0];
+
+       if (q->rx_ring) {
+               dma_free_coherent(&lp->pdev->dev,
+                                 AT91ETHER_MAX_RX_DESCR *
+                                 macb_dma_desc_get_size(lp),
+                                 q->rx_ring, q->rx_ring_dma);
+               q->rx_ring = NULL;
+       }
+
+       if (q->rx_buffers) {
+               dma_free_coherent(&lp->pdev->dev,
+                                 AT91ETHER_MAX_RX_DESCR *
+                                 AT91ETHER_MAX_RBUFF_SZ,
+                                 q->rx_buffers, q->rx_buffers_dma);
+               q->rx_buffers = NULL;
+       }
+}
+
+/* Initialize and start the Receiver and Transmit subsystems */
+static int at91ether_start(struct macb *lp)
+{
+       struct macb_queue *q = &lp->queues[0];
+       struct macb_dma_desc *desc;
+       dma_addr_t addr;
+       u32 ctl;
+       int i, ret;
+
+       ret = at91ether_alloc_coherent(lp);
+       if (ret)
+               return ret;
+
        addr = q->rx_buffers_dma;
        for (i = 0; i < AT91ETHER_MAX_RX_DESCR; i++) {
                desc = macb_rx_desc(q, i);
@@ -3811,9 +3844,39 @@ static int at91ether_start(struct net_device *dev)
        ctl = macb_readl(lp, NCR);
        macb_writel(lp, NCR, ctl | MACB_BIT(RE) | MACB_BIT(TE));
 
+       /* Enable MAC interrupts */
+       macb_writel(lp, IER, MACB_BIT(RCOMP)    |
+                            MACB_BIT(RXUBR)    |
+                            MACB_BIT(ISR_TUND) |
+                            MACB_BIT(ISR_RLE)  |
+                            MACB_BIT(TCOMP)    |
+                            MACB_BIT(ISR_ROVR) |
+                            MACB_BIT(HRESP));
+
        return 0;
 }
 
+static void at91ether_stop(struct macb *lp)
+{
+       u32 ctl;
+
+       /* Disable MAC interrupts */
+       macb_writel(lp, IDR, MACB_BIT(RCOMP)    |
+                            MACB_BIT(RXUBR)    |
+                            MACB_BIT(ISR_TUND) |
+                            MACB_BIT(ISR_RLE)  |
+                            MACB_BIT(TCOMP)    |
+                            MACB_BIT(ISR_ROVR) |
+                            MACB_BIT(HRESP));
+
+       /* Disable Receiver and Transmitter */
+       ctl = macb_readl(lp, NCR);
+       macb_writel(lp, NCR, ctl & ~(MACB_BIT(TE) | MACB_BIT(RE)));
+
+       /* Free resources. */
+       at91ether_free_coherent(lp);
+}
+
 /* Open the ethernet interface */
 static int at91ether_open(struct net_device *dev)
 {
@@ -3833,63 +3896,36 @@ static int at91ether_open(struct net_device *dev)
 
        macb_set_hwaddr(lp);
 
-       ret = at91ether_start(dev);
+       ret = at91ether_start(lp);
        if (ret)
-               return ret;
-
-       /* Enable MAC interrupts */
-       macb_writel(lp, IER, MACB_BIT(RCOMP)    |
-                            MACB_BIT(RXUBR)    |
-                            MACB_BIT(ISR_TUND) |
-                            MACB_BIT(ISR_RLE)  |
-                            MACB_BIT(TCOMP)    |
-                            MACB_BIT(ISR_ROVR) |
-                            MACB_BIT(HRESP));
+               goto pm_exit;
 
        ret = macb_phylink_connect(lp);
        if (ret)
-               return ret;
+               goto stop;
 
        netif_start_queue(dev);
 
        return 0;
+
+stop:
+       at91ether_stop(lp);
+pm_exit:
+       pm_runtime_put_sync(&lp->pdev->dev);
+       return ret;
 }
 
 /* Close the interface */
 static int at91ether_close(struct net_device *dev)
 {
        struct macb *lp = netdev_priv(dev);
-       struct macb_queue *q = &lp->queues[0];
-       u32 ctl;
-
-       /* Disable Receiver and Transmitter */
-       ctl = macb_readl(lp, NCR);
-       macb_writel(lp, NCR, ctl & ~(MACB_BIT(TE) | MACB_BIT(RE)));
-
-       /* Disable MAC interrupts */
-       macb_writel(lp, IDR, MACB_BIT(RCOMP)    |
-                            MACB_BIT(RXUBR)    |
-                            MACB_BIT(ISR_TUND) |
-                            MACB_BIT(ISR_RLE)  |
-                            MACB_BIT(TCOMP)    |
-                            MACB_BIT(ISR_ROVR) |
-                            MACB_BIT(HRESP));
 
        netif_stop_queue(dev);
 
        phylink_stop(lp->phylink);
        phylink_disconnect_phy(lp->phylink);
 
-       dma_free_coherent(&lp->pdev->dev,
-                         AT91ETHER_MAX_RX_DESCR *
-                         macb_dma_desc_get_size(lp),
-                         q->rx_ring, q->rx_ring_dma);
-       q->rx_ring = NULL;
-
-       dma_free_coherent(&lp->pdev->dev,
-                         AT91ETHER_MAX_RX_DESCR * AT91ETHER_MAX_RBUFF_SZ,
-                         q->rx_buffers, q->rx_buffers_dma);
-       q->rx_buffers = NULL;
+       at91ether_stop(lp);
 
        return pm_runtime_put(&lp->pdev->dev);
 }
index 7b9cd69..d8ab8e3 100644 (file)
@@ -1975,7 +1975,6 @@ int cudbg_collect_dump_context(struct cudbg_init *pdbg_init,
        u8 mem_type[CTXT_INGRESS + 1] = { 0 };
        struct cudbg_buffer temp_buff = { 0 };
        struct cudbg_ch_cntxt *buff;
-       u64 *dst_off, *src_off;
        u8 *ctx_buf;
        u8 i, k;
        int rc;
@@ -2044,8 +2043,11 @@ int cudbg_collect_dump_context(struct cudbg_init *pdbg_init,
                }
 
                for (j = 0; j < max_ctx_qid; j++) {
+                       __be64 *dst_off;
+                       u64 *src_off;
+
                        src_off = (u64 *)(ctx_buf + j * SGE_CTXT_SIZE);
-                       dst_off = (u64 *)buff->data;
+                       dst_off = (__be64 *)buff->data;
 
                        /* The data is stored in 64-bit cpu order.  Convert it
                         * to big endian before parsing.
index d3c654b..80c6627 100644 (file)
@@ -136,6 +136,9 @@ static inline __u8 bitswap_1(unsigned char val)
               ((val & 0x02) << 5) |
               ((val & 0x01) << 7);
 }
+
+extern const char * const dcb_ver_array[];
+
 #define CXGB4_DCB_ENABLED true
 
 #else /* !CONFIG_CHELSIO_T4_DCB */
index 8284992..b477b88 100644 (file)
@@ -2379,7 +2379,6 @@ static const struct file_operations rss_vf_config_debugfs_fops = {
 };
 
 #ifdef CONFIG_CHELSIO_T4_DCB
-extern char *dcb_ver_array[];
 
 /* Data Center Briging information for each port.
  */
index 9fd4967..f27be11 100644 (file)
@@ -588,7 +588,7 @@ static void fw_caps_to_lmm(enum fw_port_type port_type,
 /**
  *     lmm_to_fw_caps - translate ethtool Link Mode Mask to Firmware
  *     capabilities
- *     @et_lmm: ethtool Link Mode Mask
+ *     @link_mode_mask: ethtool Link Mode Mask
  *
  *     Translate ethtool Link Mode Mask into a Firmware Port capabilities
  *     value.
index 7965552..7a7f61a 100644 (file)
@@ -165,6 +165,9 @@ static void set_nat_params(struct adapter *adap, struct filter_entry *f,
                           unsigned int tid, bool dip, bool sip, bool dp,
                           bool sp)
 {
+       u8 *nat_lp = (u8 *)&f->fs.nat_lport;
+       u8 *nat_fp = (u8 *)&f->fs.nat_fport;
+
        if (dip) {
                if (f->fs.type) {
                        set_tcb_field(adap, f, tid, TCB_SND_UNA_RAW_W,
@@ -236,8 +239,9 @@ static void set_nat_params(struct adapter *adap, struct filter_entry *f,
        }
 
        set_tcb_field(adap, f, tid, TCB_PDU_HDR_LEN_W, WORD_MASK,
-                     (dp ? f->fs.nat_lport : 0) |
-                     (sp ? f->fs.nat_fport << 16 : 0), 1);
+                     (dp ? (nat_lp[1] | nat_lp[0] << 8) : 0) |
+                     (sp ? (nat_fp[1] << 16 | nat_fp[0] << 24) : 0),
+                     1);
 }
 
 /* Validate filter spec against configuration done on the card. */
@@ -909,6 +913,9 @@ int set_filter_wr(struct adapter *adapter, int fidx)
        fwr->fpm = htons(f->fs.mask.fport);
 
        if (adapter->params.filter2_wr_support) {
+               u8 *nat_lp = (u8 *)&f->fs.nat_lport;
+               u8 *nat_fp = (u8 *)&f->fs.nat_fport;
+
                fwr->natmode_to_ulp_type =
                        FW_FILTER2_WR_ULP_TYPE_V(f->fs.nat_mode ?
                                                 ULP_MODE_TCPDDP :
@@ -916,8 +923,8 @@ int set_filter_wr(struct adapter *adapter, int fidx)
                        FW_FILTER2_WR_NATMODE_V(f->fs.nat_mode);
                memcpy(fwr->newlip, f->fs.nat_lip, sizeof(fwr->newlip));
                memcpy(fwr->newfip, f->fs.nat_fip, sizeof(fwr->newfip));
-               fwr->newlport = htons(f->fs.nat_lport);
-               fwr->newfport = htons(f->fs.nat_fport);
+               fwr->newlport = htons(nat_lp[1] | nat_lp[0] << 8);
+               fwr->newfport = htons(nat_fp[1] | nat_fp[0] << 8);
        }
 
        /* Mark the filter as "pending" and ship off the Filter Work Request.
@@ -1105,16 +1112,16 @@ static bool is_addr_all_mask(u8 *ipmask, int family)
                struct in_addr *addr;
 
                addr = (struct in_addr *)ipmask;
-               if (addr->s_addr == 0xffffffff)
+               if (ntohl(addr->s_addr) == 0xffffffff)
                        return true;
        } else if (family == AF_INET6) {
                struct in6_addr *addr6;
 
                addr6 = (struct in6_addr *)ipmask;
-               if (addr6->s6_addr32[0] == 0xffffffff &&
-                   addr6->s6_addr32[1] == 0xffffffff &&
-                   addr6->s6_addr32[2] == 0xffffffff &&
-                   addr6->s6_addr32[3] == 0xffffffff)
+               if (ntohl(addr6->s6_addr32[0]) == 0xffffffff &&
+                   ntohl(addr6->s6_addr32[1]) == 0xffffffff &&
+                   ntohl(addr6->s6_addr32[2]) == 0xffffffff &&
+                   ntohl(addr6->s6_addr32[3]) == 0xffffffff)
                        return true;
        }
        return false;
index 854b171..0329a6b 100644 (file)
@@ -449,7 +449,7 @@ static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
  *                or -1
  *     @addr: the new MAC address value
  *     @persist: whether a new MAC allocation should be persistent
- *     @add_smt: if true also add the address to the HW SMT
+ *     @smt_idx: the destination to store the new SMT index.
  *
  *     Modifies an MPS filter and sets it to the new MAC address if
  *     @tcam_idx >= 0, or adds the MAC address to a new filter if
@@ -1615,6 +1615,7 @@ static int tid_init(struct tid_info *t)
  *     @stid: the server TID
  *     @sip: local IP address to bind server to
  *     @sport: the server's TCP port
+ *     @vlan: the VLAN header information
  *     @queue: queue to direct messages from this server to
  *
  *     Create an IP server for the given port and address.
@@ -2609,7 +2610,7 @@ int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
 
        /* Clear out filter specifications */
        memset(&f->fs, 0, sizeof(struct ch_filter_specification));
-       f->fs.val.lport = cpu_to_be16(sport);
+       f->fs.val.lport = be16_to_cpu(sport);
        f->fs.mask.lport  = ~0;
        val = (u8 *)&sip;
        if ((val[0] | val[1] | val[2] | val[3]) != 0) {
@@ -5377,10 +5378,10 @@ static inline bool is_x_10g_port(const struct link_config *lc)
 static int cfg_queues(struct adapter *adap)
 {
        u32 avail_qsets, avail_eth_qsets, avail_uld_qsets;
-       u32 i, n10g = 0, qidx = 0, n1g = 0;
        u32 ncpus = num_online_cpus();
        u32 niqflint, neq, num_ulds;
        struct sge *s = &adap->sge;
+       u32 i, n10g = 0, qidx = 0;
        u32 q10g = 0, q1g;
 
        /* Reduce memory usage in kdump environment, disable all offload. */
@@ -5426,7 +5427,6 @@ static int cfg_queues(struct adapter *adap)
        if (n10g)
                q10g = (avail_eth_qsets - (adap->params.nports - n10g)) / n10g;
 
-       n1g = adap->params.nports - n10g;
 #ifdef CONFIG_CHELSIO_T4_DCB
        /* For Data Center Bridging support we need to be able to support up
         * to 8 Traffic Priorities; each of which will be assigned to its
@@ -5444,7 +5444,8 @@ static int cfg_queues(struct adapter *adap)
        else
                q10g = max(8U, q10g);
 
-       while ((q10g * n10g) > (avail_eth_qsets - n1g * q1g))
+       while ((q10g * n10g) >
+              (avail_eth_qsets - (adap->params.nports - n10g) * q1g))
                q10g--;
 
 #else /* !CONFIG_CHELSIO_T4_DCB */
index f5bc996..70dbee8 100644 (file)
@@ -194,6 +194,7 @@ int cxgb4_ptp_redirect_rx_packet(struct adapter *adapter, struct port_info *pi)
 }
 
 /**
+ * cxgb4_ptp_adjfreq - Adjust frequency of PHC cycle counter
  * @ptp: ptp clock structure
  * @ppb: Desired frequency change in parts per billion
  *
@@ -229,7 +230,7 @@ static int cxgb4_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
 
 /**
  * cxgb4_ptp_fineadjtime - Shift the time of the hardware clock
- * @ptp: ptp clock structure
+ * @adapter: board private structure
  * @delta: Desired change in nanoseconds
  *
  * Adjust the timer by resetting the timecounter structure.
index 4a5fa9e..59b65d4 100644 (file)
@@ -58,10 +58,6 @@ static struct ch_tc_pedit_fields pedits[] = {
        PEDIT_FIELDS(IP6_, DST_63_32, 4, nat_lip, 4),
        PEDIT_FIELDS(IP6_, DST_95_64, 4, nat_lip, 8),
        PEDIT_FIELDS(IP6_, DST_127_96, 4, nat_lip, 12),
-       PEDIT_FIELDS(TCP_, SPORT, 2, nat_fport, 0),
-       PEDIT_FIELDS(TCP_, DPORT, 2, nat_lport, 0),
-       PEDIT_FIELDS(UDP_, SPORT, 2, nat_fport, 0),
-       PEDIT_FIELDS(UDP_, DPORT, 2, nat_lport, 0),
 };
 
 static struct ch_tc_flower_entry *allocate_flower_entry(void)
@@ -156,14 +152,14 @@ static void cxgb4_process_flow_match(struct net_device *dev,
                struct flow_match_ports match;
 
                flow_rule_match_ports(rule, &match);
-               fs->val.lport = cpu_to_be16(match.key->dst);
-               fs->mask.lport = cpu_to_be16(match.mask->dst);
-               fs->val.fport = cpu_to_be16(match.key->src);
-               fs->mask.fport = cpu_to_be16(match.mask->src);
+               fs->val.lport = be16_to_cpu(match.key->dst);
+               fs->mask.lport = be16_to_cpu(match.mask->dst);
+               fs->val.fport = be16_to_cpu(match.key->src);
+               fs->mask.fport = be16_to_cpu(match.mask->src);
 
                /* also initialize nat_lport/fport to same values */
-               fs->nat_lport = cpu_to_be16(match.key->dst);
-               fs->nat_fport = cpu_to_be16(match.key->src);
+               fs->nat_lport = fs->val.lport;
+               fs->nat_fport = fs->val.fport;
        }
 
        if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
@@ -354,12 +350,9 @@ static void process_pedit_field(struct ch_filter_specification *fs, u32 val,
                switch (offset) {
                case PEDIT_TCP_SPORT_DPORT:
                        if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
-                               offload_pedit(fs, cpu_to_be32(val) >> 16,
-                                             cpu_to_be32(mask) >> 16,
-                                             TCP_SPORT);
+                               fs->nat_fport = val;
                        else
-                               offload_pedit(fs, cpu_to_be32(val),
-                                             cpu_to_be32(mask), TCP_DPORT);
+                               fs->nat_lport = val >> 16;
                }
                fs->nat_mode = NAT_MODE_ALL;
                break;
@@ -367,12 +360,9 @@ static void process_pedit_field(struct ch_filter_specification *fs, u32 val,
                switch (offset) {
                case PEDIT_UDP_SPORT_DPORT:
                        if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
-                               offload_pedit(fs, cpu_to_be32(val) >> 16,
-                                             cpu_to_be32(mask) >> 16,
-                                             UDP_SPORT);
+                               fs->nat_fport = val;
                        else
-                               offload_pedit(fs, cpu_to_be32(val),
-                                             cpu_to_be32(mask), UDP_DPORT);
+                               fs->nat_lport = val >> 16;
                }
                fs->nat_mode = NAT_MODE_ALL;
        }
index 3f3c11e..dede025 100644 (file)
@@ -48,7 +48,7 @@ static int fill_match_fields(struct adapter *adap,
                             bool next_header)
 {
        unsigned int i, j;
-       u32 val, mask;
+       __be32 val, mask;
        int off, err;
        bool found;
 
@@ -228,7 +228,7 @@ int cxgb4_config_knode(struct net_device *dev, struct tc_cls_u32_offload *cls)
                const struct cxgb4_next_header *next;
                bool found = false;
                unsigned int i, j;
-               u32 val, mask;
+               __be32 val, mask;
                int off;
 
                if (t->table[link_uhtid - 1].link_handle) {
@@ -242,10 +242,10 @@ int cxgb4_config_knode(struct net_device *dev, struct tc_cls_u32_offload *cls)
 
                /* Try to find matches that allow jumps to next header. */
                for (i = 0; next[i].jump; i++) {
-                       if (next[i].offoff != cls->knode.sel->offoff ||
-                           next[i].shift != cls->knode.sel->offshift ||
-                           next[i].mask != cls->knode.sel->offmask ||
-                           next[i].offset != cls->knode.sel->off)
+                       if (next[i].sel.offoff != cls->knode.sel->offoff ||
+                           next[i].sel.offshift != cls->knode.sel->offshift ||
+                           next[i].sel.offmask != cls->knode.sel->offmask ||
+                           next[i].sel.off != cls->knode.sel->off)
                                continue;
 
                        /* Found a possible candidate.  Find a key that
@@ -257,9 +257,9 @@ int cxgb4_config_knode(struct net_device *dev, struct tc_cls_u32_offload *cls)
                                val = cls->knode.sel->keys[j].val;
                                mask = cls->knode.sel->keys[j].mask;
 
-                               if (next[i].match_off == off &&
-                                   next[i].match_val == val &&
-                                   next[i].match_mask == mask) {
+                               if (next[i].key.off == off &&
+                                   next[i].key.val == val &&
+                                   next[i].key.mask == mask) {
                                        found = true;
                                        break;
                                }
index 125868c..f59dd4b 100644 (file)
 struct cxgb4_match_field {
        int off; /* Offset from the beginning of the header to match */
        /* Fill the value/mask pair in the spec if matched */
-       int (*val)(struct ch_filter_specification *f, u32 val, u32 mask);
+       int (*val)(struct ch_filter_specification *f, __be32 val, __be32 mask);
 };
 
 /* IPv4 match fields */
 static inline int cxgb4_fill_ipv4_tos(struct ch_filter_specification *f,
-                                     u32 val, u32 mask)
+                                     __be32 val, __be32 mask)
 {
        f->val.tos  = (ntohl(val)  >> 16) & 0x000000FF;
        f->mask.tos = (ntohl(mask) >> 16) & 0x000000FF;
@@ -52,7 +52,7 @@ static inline int cxgb4_fill_ipv4_tos(struct ch_filter_specification *f,
 }
 
 static inline int cxgb4_fill_ipv4_frag(struct ch_filter_specification *f,
-                                      u32 val, u32 mask)
+                                      __be32 val, __be32 mask)
 {
        u32 mask_val;
        u8 frag_val;
@@ -74,7 +74,7 @@ static inline int cxgb4_fill_ipv4_frag(struct ch_filter_specification *f,
 }
 
 static inline int cxgb4_fill_ipv4_proto(struct ch_filter_specification *f,
-                                       u32 val, u32 mask)
+                                       __be32 val, __be32 mask)
 {
        f->val.proto  = (ntohl(val)  >> 16) & 0x000000FF;
        f->mask.proto = (ntohl(mask) >> 16) & 0x000000FF;
@@ -83,7 +83,7 @@ static inline int cxgb4_fill_ipv4_proto(struct ch_filter_specification *f,
 }
 
 static inline int cxgb4_fill_ipv4_src_ip(struct ch_filter_specification *f,
-                                        u32 val, u32 mask)
+                                        __be32 val, __be32 mask)
 {
        memcpy(&f->val.fip[0],  &val,  sizeof(u32));
        memcpy(&f->mask.fip[0], &mask, sizeof(u32));
@@ -92,7 +92,7 @@ static inline int cxgb4_fill_ipv4_src_ip(struct ch_filter_specification *f,
 }
 
 static inline int cxgb4_fill_ipv4_dst_ip(struct ch_filter_specification *f,
-                                        u32 val, u32 mask)
+                                        __be32 val, __be32 mask)
 {
        memcpy(&f->val.lip[0],  &val,  sizeof(u32));
        memcpy(&f->mask.lip[0], &mask, sizeof(u32));
@@ -111,7 +111,7 @@ static const struct cxgb4_match_field cxgb4_ipv4_fields[] = {
 
 /* IPv6 match fields */
 static inline int cxgb4_fill_ipv6_tos(struct ch_filter_specification *f,
-                                     u32 val, u32 mask)
+                                     __be32 val, __be32 mask)
 {
        f->val.tos  = (ntohl(val)  >> 20) & 0x000000FF;
        f->mask.tos = (ntohl(mask) >> 20) & 0x000000FF;
@@ -120,7 +120,7 @@ static inline int cxgb4_fill_ipv6_tos(struct ch_filter_specification *f,
 }
 
 static inline int cxgb4_fill_ipv6_proto(struct ch_filter_specification *f,
-                                       u32 val, u32 mask)
+                                       __be32 val, __be32 mask)
 {
        f->val.proto  = (ntohl(val)  >> 8) & 0x000000FF;
        f->mask.proto = (ntohl(mask) >> 8) & 0x000000FF;
@@ -129,7 +129,7 @@ static inline int cxgb4_fill_ipv6_proto(struct ch_filter_specification *f,
 }
 
 static inline int cxgb4_fill_ipv6_src_ip0(struct ch_filter_specification *f,
-                                         u32 val, u32 mask)
+                                         __be32 val, __be32 mask)
 {
        memcpy(&f->val.fip[0],  &val,  sizeof(u32));
        memcpy(&f->mask.fip[0], &mask, sizeof(u32));
@@ -138,7 +138,7 @@ static inline int cxgb4_fill_ipv6_src_ip0(struct ch_filter_specification *f,
 }
 
 static inline int cxgb4_fill_ipv6_src_ip1(struct ch_filter_specification *f,
-                                         u32 val, u32 mask)
+                                         __be32 val, __be32 mask)
 {
        memcpy(&f->val.fip[4],  &val,  sizeof(u32));
        memcpy(&f->mask.fip[4], &mask, sizeof(u32));
@@ -147,7 +147,7 @@ static inline int cxgb4_fill_ipv6_src_ip1(struct ch_filter_specification *f,
 }
 
 static inline int cxgb4_fill_ipv6_src_ip2(struct ch_filter_specification *f,
-                                         u32 val, u32 mask)
+                                         __be32 val, __be32 mask)
 {
        memcpy(&f->val.fip[8],  &val,  sizeof(u32));
        memcpy(&f->mask.fip[8], &mask, sizeof(u32));
@@ -156,7 +156,7 @@ static inline int cxgb4_fill_ipv6_src_ip2(struct ch_filter_specification *f,
 }
 
 static inline int cxgb4_fill_ipv6_src_ip3(struct ch_filter_specification *f,
-                                         u32 val, u32 mask)
+                                         __be32 val, __be32 mask)
 {
        memcpy(&f->val.fip[12],  &val,  sizeof(u32));
        memcpy(&f->mask.fip[12], &mask, sizeof(u32));
@@ -165,7 +165,7 @@ static inline int cxgb4_fill_ipv6_src_ip3(struct ch_filter_specification *f,
 }
 
 static inline int cxgb4_fill_ipv6_dst_ip0(struct ch_filter_specification *f,
-                                         u32 val, u32 mask)
+                                         __be32 val, __be32 mask)
 {
        memcpy(&f->val.lip[0],  &val,  sizeof(u32));
        memcpy(&f->mask.lip[0], &mask, sizeof(u32));
@@ -174,7 +174,7 @@ static inline int cxgb4_fill_ipv6_dst_ip0(struct ch_filter_specification *f,
 }
 
 static inline int cxgb4_fill_ipv6_dst_ip1(struct ch_filter_specification *f,
-                                         u32 val, u32 mask)
+                                         __be32 val, __be32 mask)
 {
        memcpy(&f->val.lip[4],  &val,  sizeof(u32));
        memcpy(&f->mask.lip[4], &mask, sizeof(u32));
@@ -183,7 +183,7 @@ static inline int cxgb4_fill_ipv6_dst_ip1(struct ch_filter_specification *f,
 }
 
 static inline int cxgb4_fill_ipv6_dst_ip2(struct ch_filter_specification *f,
-                                         u32 val, u32 mask)
+                                         __be32 val, __be32 mask)
 {
        memcpy(&f->val.lip[8],  &val,  sizeof(u32));
        memcpy(&f->mask.lip[8], &mask, sizeof(u32));
@@ -192,7 +192,7 @@ static inline int cxgb4_fill_ipv6_dst_ip2(struct ch_filter_specification *f,
 }
 
 static inline int cxgb4_fill_ipv6_dst_ip3(struct ch_filter_specification *f,
-                                         u32 val, u32 mask)
+                                         __be32 val, __be32 mask)
 {
        memcpy(&f->val.lip[12],  &val,  sizeof(u32));
        memcpy(&f->mask.lip[12], &mask, sizeof(u32));
@@ -216,7 +216,7 @@ static const struct cxgb4_match_field cxgb4_ipv6_fields[] = {
 
 /* TCP/UDP match */
 static inline int cxgb4_fill_l4_ports(struct ch_filter_specification *f,
-                                     u32 val, u32 mask)
+                                     __be32 val, __be32 mask)
 {
        f->val.fport  = ntohl(val)  >> 16;
        f->mask.fport = ntohl(mask) >> 16;
@@ -237,19 +237,13 @@ static const struct cxgb4_match_field cxgb4_udp_fields[] = {
 };
 
 struct cxgb4_next_header {
-       unsigned int offset; /* Offset to next header */
-       /* offset, shift, and mask added to offset above
+       /* Offset, shift, and mask added to beginning of the header
         * to get to next header.  Useful when using a header
         * field's value to jump to next header such as IHL field
         * in IPv4 header.
         */
-       unsigned int offoff;
-       u32 shift;
-       u32 mask;
-       /* match criteria to make this jump */
-       unsigned int match_off;
-       u32 match_val;
-       u32 match_mask;
+       struct tc_u32_sel sel;
+       struct tc_u32_key key;
        /* location of jump to make */
        const struct cxgb4_match_field *jump;
 };
@@ -258,26 +252,74 @@ struct cxgb4_next_header {
  * IPv4 header.
  */
 static const struct cxgb4_next_header cxgb4_ipv4_jumps[] = {
-       { .offset = 0, .offoff = 0, .shift = 6, .mask = 0xF,
-         .match_off = 8, .match_val = 0x600, .match_mask = 0xFF00,
-         .jump = cxgb4_tcp_fields },
-       { .offset = 0, .offoff = 0, .shift = 6, .mask = 0xF,
-         .match_off = 8, .match_val = 0x1100, .match_mask = 0xFF00,
-         .jump = cxgb4_udp_fields },
-       { .jump = NULL }
+       {
+               /* TCP Jump */
+               .sel = {
+                       .off = 0,
+                       .offoff = 0,
+                       .offshift = 6,
+                       .offmask = cpu_to_be16(0x0f00),
+               },
+               .key = {
+                       .off = 8,
+                       .val = cpu_to_be32(0x00060000),
+                       .mask = cpu_to_be32(0x00ff0000),
+               },
+               .jump = cxgb4_tcp_fields,
+       },
+       {
+               /* UDP Jump */
+               .sel = {
+                       .off = 0,
+                       .offoff = 0,
+                       .offshift = 6,
+                       .offmask = cpu_to_be16(0x0f00),
+               },
+               .key = {
+                       .off = 8,
+                       .val = cpu_to_be32(0x00110000),
+                       .mask = cpu_to_be32(0x00ff0000),
+               },
+               .jump = cxgb4_udp_fields,
+       },
+       { .jump = NULL },
 };
 
 /* Accept a rule with a jump directly past the 40 Bytes of IPv6 fixed header
  * to get to transport layer header.
  */
 static const struct cxgb4_next_header cxgb4_ipv6_jumps[] = {
-       { .offset = 0x28, .offoff = 0, .shift = 0, .mask = 0,
-         .match_off = 4, .match_val = 0x60000, .match_mask = 0xFF0000,
-         .jump = cxgb4_tcp_fields },
-       { .offset = 0x28, .offoff = 0, .shift = 0, .mask = 0,
-         .match_off = 4, .match_val = 0x110000, .match_mask = 0xFF0000,
-         .jump = cxgb4_udp_fields },
-       { .jump = NULL }
+       {
+               /* TCP Jump */
+               .sel = {
+                       .off = 40,
+                       .offoff = 0,
+                       .offshift = 0,
+                       .offmask = 0,
+               },
+               .key = {
+                       .off = 4,
+                       .val = cpu_to_be32(0x00000600),
+                       .mask = cpu_to_be32(0x0000ff00),
+               },
+               .jump = cxgb4_tcp_fields,
+       },
+       {
+               /* UDP Jump */
+               .sel = {
+                       .off = 40,
+                       .offoff = 0,
+                       .offshift = 0,
+                       .offmask = 0,
+               },
+               .key = {
+                       .off = 4,
+                       .val = cpu_to_be32(0x00001100),
+                       .mask = cpu_to_be32(0x0000ff00),
+               },
+               .jump = cxgb4_udp_fields,
+       },
+       { .jump = NULL },
 };
 
 struct cxgb4_link {
index 72b37a6..c486412 100644 (file)
@@ -503,40 +503,19 @@ u64 cxgb4_select_ntuple(struct net_device *dev,
 EXPORT_SYMBOL(cxgb4_select_ntuple);
 
 /*
- * Called when address resolution fails for an L2T entry to handle packets
- * on the arpq head.  If a packet specifies a failure handler it is invoked,
- * otherwise the packet is sent to the device.
- */
-static void handle_failed_resolution(struct adapter *adap, struct l2t_entry *e)
-{
-       struct sk_buff *skb;
-
-       while ((skb = __skb_dequeue(&e->arpq)) != NULL) {
-               const struct l2t_skb_cb *cb = L2T_SKB_CB(skb);
-
-               spin_unlock(&e->lock);
-               if (cb->arp_err_handler)
-                       cb->arp_err_handler(cb->handle, skb);
-               else
-                       t4_ofld_send(adap, skb);
-               spin_lock(&e->lock);
-       }
-}
-
-/*
  * Called when the host's neighbor layer makes a change to some entry that is
  * loaded into the HW L2 table.
  */
 void t4_l2t_update(struct adapter *adap, struct neighbour *neigh)
 {
-       struct l2t_entry *e;
-       struct sk_buff_head *arpq = NULL;
-       struct l2t_data *d = adap->l2t;
        unsigned int addr_len = neigh->tbl->key_len;
        u32 *addr = (u32 *) neigh->primary_key;
-       int ifidx = neigh->dev->ifindex;
-       int hash = addr_hash(d, addr, addr_len, ifidx);
+       int hash, ifidx = neigh->dev->ifindex;
+       struct sk_buff_head *arpq = NULL;
+       struct l2t_data *d = adap->l2t;
+       struct l2t_entry *e;
 
+       hash = addr_hash(d, addr, addr_len, ifidx);
        read_lock_bh(&d->lock);
        for (e = d->l2tab[hash].first; e; e = e->next)
                if (!addreq(e, addr) && e->ifindex == ifidx) {
@@ -569,8 +548,25 @@ void t4_l2t_update(struct adapter *adap, struct neighbour *neigh)
                        write_l2e(adap, e, 0);
        }
 
-       if (arpq)
-               handle_failed_resolution(adap, e);
+       if (arpq) {
+               struct sk_buff *skb;
+
+               /* Called when address resolution fails for an L2T
+                * entry to handle packets on the arpq head. If a
+                * packet specifies a failure handler it is invoked,
+                * otherwise the packet is sent to the device.
+                */
+               while ((skb = __skb_dequeue(&e->arpq)) != NULL) {
+                       const struct l2t_skb_cb *cb = L2T_SKB_CB(skb);
+
+                       spin_unlock(&e->lock);
+                       if (cb->arp_err_handler)
+                               cb->arp_err_handler(cb->handle, skb);
+                       else
+                               t4_ofld_send(adap, skb);
+                       spin_lock(&e->lock);
+               }
+       }
        spin_unlock_bh(&e->lock);
 }
 
@@ -613,6 +609,7 @@ struct l2t_entry *t4_l2t_alloc_switching(struct adapter *adap, u16 vlan,
 }
 
 /**
+ * cxgb4_l2t_alloc_switching - Allocates an L2T entry for switch filters
  * @dev: net_device pointer
  * @vlan: VLAN Id
  * @port: Associated port
index fde93c5..a1b1446 100644 (file)
@@ -598,7 +598,7 @@ struct sched_class *cxgb4_sched_class_alloc(struct net_device *dev,
 /**
  * cxgb4_sched_class_free - free a scheduling class
  * @dev: net_device pointer
- * @e: scheduling class
+ * @classid: scheduling class id to free
  *
  * Frees a scheduling class if there are no users.
  */
index 1359158..32a45dc 100644 (file)
@@ -302,7 +302,7 @@ static void deferred_unmap_destructor(struct sk_buff *skb)
 
 /**
  *     free_tx_desc - reclaims Tx descriptors and their buffers
- *     @adapter: the adapter
+ *     @adap: the adapter
  *     @q: the Tx queue to reclaim descriptors from
  *     @n: the number of descriptors to reclaim
  *     @unmap: whether the buffers should be unmapped for DMA
@@ -722,6 +722,7 @@ static inline unsigned int flits_to_desc(unsigned int n)
 /**
  *     is_eth_imm - can an Ethernet packet be sent as immediate data?
  *     @skb: the packet
+ *     @chip_ver: chip version
  *
  *     Returns whether an Ethernet packet is small enough to fit as
  *     immediate data. Return value corresponds to headroom required.
@@ -749,6 +750,7 @@ static inline int is_eth_imm(const struct sk_buff *skb, unsigned int chip_ver)
 /**
  *     calc_tx_flits - calculate the number of flits for a packet Tx WR
  *     @skb: the packet
+ *     @chip_ver: chip version
  *
  *     Returns the number of flits needed for a Tx WR for the given Ethernet
  *     packet, including the needed WR and CPL headers.
@@ -804,6 +806,7 @@ static inline unsigned int calc_tx_flits(const struct sk_buff *skb,
 /**
  *     calc_tx_descs - calculate the number of Tx descriptors for a packet
  *     @skb: the packet
+ *     @chip_ver: chip version
  *
  *     Returns the number of Tx descriptors needed for the given Ethernet
  *     packet, including the needed WR and CPL headers.
@@ -1425,12 +1428,10 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
 
        qidx = skb_get_queue_mapping(skb);
        if (ptp_enabled) {
-               spin_lock(&adap->ptp_lock);
                if (!(adap->ptp_tx_skb)) {
                        skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
                        adap->ptp_tx_skb = skb_get(skb);
                } else {
-                       spin_unlock(&adap->ptp_lock);
                        goto out_free;
                }
                q = &adap->sge.ptptxq;
@@ -1444,11 +1445,8 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
 
 #ifdef CONFIG_CHELSIO_T4_FCOE
        ret = cxgb_fcoe_offload(skb, adap, pi, &cntrl);
-       if (unlikely(ret == -ENOTSUPP)) {
-               if (ptp_enabled)
-                       spin_unlock(&adap->ptp_lock);
+       if (unlikely(ret == -EOPNOTSUPP))
                goto out_free;
-       }
 #endif /* CONFIG_CHELSIO_T4_FCOE */
 
        chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip);
@@ -1461,8 +1459,6 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
                dev_err(adap->pdev_dev,
                        "%s: Tx ring %u full while queue awake!\n",
                        dev->name, qidx);
-               if (ptp_enabled)
-                       spin_unlock(&adap->ptp_lock);
                return NETDEV_TX_BUSY;
        }
 
@@ -1481,8 +1477,6 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
            unlikely(cxgb4_map_skb(adap->pdev_dev, skb, sgl_sdesc->addr) < 0)) {
                memset(sgl_sdesc->addr, 0, sizeof(sgl_sdesc->addr));
                q->mapping_err++;
-               if (ptp_enabled)
-                       spin_unlock(&adap->ptp_lock);
                goto out_free;
        }
 
@@ -1533,8 +1527,7 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
                        if (iph->version == 4) {
                                iph->check = 0;
                                iph->tot_len = 0;
-                               iph->check = (u16)(~ip_fast_csum((u8 *)iph,
-                                                                iph->ihl));
+                               iph->check = ~ip_fast_csum((u8 *)iph, iph->ihl);
                        }
                        if (skb->ip_summed == CHECKSUM_PARTIAL)
                                cntrl = hwcsum(adap->params.chip, skb);
@@ -1630,8 +1623,6 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
        txq_advance(&q->q, ndesc);
 
        cxgb4_ring_tx_db(adap, &q->q, ndesc);
-       if (ptp_enabled)
-               spin_unlock(&adap->ptp_lock);
        return NETDEV_TX_OK;
 
 out_free:
@@ -2377,6 +2368,16 @@ netdev_tx_t t4_start_xmit(struct sk_buff *skb, struct net_device *dev)
        if (unlikely(qid >= pi->nqsets))
                return cxgb4_ethofld_xmit(skb, dev);
 
+       if (is_ptp_enabled(skb, dev)) {
+               struct adapter *adap = netdev2adap(dev);
+               netdev_tx_t ret;
+
+               spin_lock(&adap->ptp_lock);
+               ret = cxgb4_eth_xmit(skb, dev);
+               spin_unlock(&adap->ptp_lock);
+               return ret;
+       }
+
        return cxgb4_eth_xmit(skb, dev);
 }
 
@@ -2410,9 +2411,9 @@ static void eosw_txq_flush_pending_skbs(struct sge_eosw_txq *eosw_txq)
 
 /**
  * cxgb4_ethofld_send_flowc - Send ETHOFLD flowc request to bind eotid to tc.
- * @dev - netdevice
- * @eotid - ETHOFLD tid to bind/unbind
- * @tc - traffic class. If set to FW_SCHED_CLS_NONE, then unbinds the @eotid
+ * @dev: netdevice
+ * @eotid: ETHOFLD tid to bind/unbind
+ * @tc: traffic class. If set to FW_SCHED_CLS_NONE, then unbinds the @eotid
  *
  * Send a FLOWC work request to bind an ETHOFLD TID to a traffic class.
  * If @tc is set to FW_SCHED_CLS_NONE, then the @eotid is unbound from
@@ -2691,7 +2692,6 @@ static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
 
 /**
  *     txq_stop_maperr - stop a Tx queue due to I/O MMU exhaustion
- *     @adap: the adapter
  *     @q: the queue to stop
  *
  *     Mark a Tx queue stopped due to I/O MMU exhaustion and resulting
@@ -3286,7 +3286,7 @@ enum {
 
 /**
  *     t4_systim_to_hwstamp - read hardware time stamp
- *     @adap: the adapter
+ *     @adapter: the adapter
  *     @skb: the packet
  *
  *     Read Time Stamp from MPS packet and insert in skb which
@@ -3313,15 +3313,16 @@ static noinline int t4_systim_to_hwstamp(struct adapter *adapter,
 
        hwtstamps = skb_hwtstamps(skb);
        memset(hwtstamps, 0, sizeof(*hwtstamps));
-       hwtstamps->hwtstamp = ns_to_ktime(be64_to_cpu(*((u64 *)data)));
+       hwtstamps->hwtstamp = ns_to_ktime(get_unaligned_be64(data));
 
        return RX_PTP_PKT_SUC;
 }
 
 /**
  *     t4_rx_hststamp - Recv PTP Event Message
- *     @adap: the adapter
+ *     @adapter: the adapter
  *     @rsp: the response queue descriptor holding the RX_PKT message
+ *     @rxq: the response queue holding the RX_PKT message
  *     @skb: the packet
  *
  *     PTP enabled and MPS packet, read HW timestamp
@@ -3345,7 +3346,7 @@ static int t4_rx_hststamp(struct adapter *adapter, const __be64 *rsp,
 
 /**
  *      t4_tx_hststamp - Loopback PTP Transmit Event Message
- *      @adap: the adapter
+ *      @adapter: the adapter
  *      @skb: the packet
  *      @dev: the ingress net device
  *
index 01c65d1..cbe72ed 100644 (file)
@@ -103,6 +103,7 @@ static void t4_smte_free(struct smt_entry *e)
 }
 
 /**
+ * cxgb4_smt_release - Release SMT entry
  * @e: smt entry to release
  *
  * Releases ref count and frees up an smt entry from SMT table
@@ -231,6 +232,7 @@ static struct smt_entry *t4_smt_alloc_switching(struct adapter *adap, u16 pfvf,
 }
 
 /**
+ * cxgb4_smt_alloc_switching - Allocates an SMT entry for switch filters.
  * @dev: net_device pointer
  * @smac: MAC address to add to SMT
  * Returns pointer to the SMT entry created
index 1c8068c..1aa6dc1 100644 (file)
@@ -3163,7 +3163,7 @@ int t4_get_tp_version(struct adapter *adapter, u32 *vers)
 
 /**
  *     t4_get_exprom_version - return the Expansion ROM version (if any)
- *     @adapter: the adapter
+ *     @adap: the adapter
  *     @vers: where to place the version
  *
  *     Reads the Expansion ROM header from FLASH and returns the version
@@ -5310,7 +5310,7 @@ static unsigned int t4_use_ldst(struct adapter *adap)
  * @cmd: TP fw ldst address space type
  * @vals: where the indirect register values are stored/written
  * @nregs: how many indirect registers to read/write
- * @start_idx: index of first indirect register to read/write
+ * @start_index: index of first indirect register to read/write
  * @rw: Read (1) or Write (0)
  * @sleep_ok: if true we may sleep while awaiting command completion
  *
@@ -6115,7 +6115,7 @@ void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
 
 /**
  *     compute_mps_bg_map - compute the MPS Buffer Group Map for a Port
- *     @adap: the adapter
+ *     @adapter: the adapter
  *     @pidx: the port index
  *
  *     Computes and returns a bitmap indicating which MPS buffer groups are
@@ -6252,7 +6252,7 @@ static unsigned int t4_get_tp_e2c_map(struct adapter *adapter, int pidx)
 
 /**
  *     t4_get_tp_ch_map - return TP ingress channels associated with a port
- *     @adapter: the adapter
+ *     @adap: the adapter
  *     @pidx: the port index
  *
  *     Returns a bitmap indicating which TP Ingress Channels are associated
@@ -6589,7 +6589,7 @@ int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
  *     @phy_addr: the PHY address
  *     @mmd: the PHY MMD to access (0 for clause 22 PHYs)
  *     @reg: the register to write
- *     @valp: value to write
+ *     @val: value to write
  *
  *     Issues a FW command through the given mailbox to write a PHY register.
  */
@@ -6615,7 +6615,7 @@ int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
 
 /**
  *     t4_sge_decode_idma_state - decode the idma state
- *     @adap: the adapter
+ *     @adapter: the adapter
  *     @state: the state idma is stuck in
  */
 void t4_sge_decode_idma_state(struct adapter *adapter, int state)
@@ -6782,7 +6782,7 @@ void t4_sge_decode_idma_state(struct adapter *adapter, int state)
  *      t4_sge_ctxt_flush - flush the SGE context cache
  *      @adap: the adapter
  *      @mbox: mailbox to use for the FW command
- *      @ctx_type: Egress or Ingress
+ *      @ctxt_type: Egress or Ingress
  *
  *      Issues a FW command through the given mailbox to flush the
  *      SGE context cache.
@@ -6809,7 +6809,7 @@ int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox, int ctxt_type)
 
 /**
  *     t4_read_sge_dbqtimers - read SGE Doorbell Queue Timer values
- *     @adap - the adapter
+ *     @adap: the adapter
  *     @ndbqtimers: size of the provided SGE Doorbell Queue Timer table
  *     @dbqtimers: SGE Doorbell Queue Timer table
  *
@@ -7092,6 +7092,7 @@ static int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
 /**
  *     t4_fw_restart - restart the firmware by taking the uP out of RESET
  *     @adap: the adapter
+ *     @mbox: mailbox to use for the FW command
  *     @reset: if we want to do a RESET to restart things
  *
  *     Restart firmware previously halted by t4_fw_halt().  On successful
@@ -7630,6 +7631,8 @@ int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
  *     @nmac: number of MAC addresses needed (1 to 5)
  *     @mac: the MAC addresses of the VI
  *     @rss_size: size of RSS table slice associated with this VI
+ *     @vivld: the destination to store the VI Valid value.
+ *     @vin: the destination to store the VIN value.
  *
  *     Allocates a virtual interface for the given physical port.  If @mac is
  *     not %NULL it contains the MAC addresses of the VI as assigned by FW.
@@ -7848,7 +7851,7 @@ int t4_free_raw_mac_filt(struct adapter *adap, unsigned int viid,
  *      t4_alloc_encap_mac_filt - Adds a mac entry in mps tcam with VNI support
  *      @adap: the adapter
  *      @viid: the VI id
- *      @mac: the MAC address
+ *      @addr: the MAC address
  *      @mask: the mask
  *      @vni: the VNI id for the tunnel protocol
  *      @vni_mask: mask for the VNI id
@@ -7897,11 +7900,11 @@ int t4_alloc_encap_mac_filt(struct adapter *adap, unsigned int viid,
  *     t4_alloc_raw_mac_filt - Adds a mac entry in mps tcam
  *     @adap: the adapter
  *     @viid: the VI id
- *     @mac: the MAC address
+ *     @addr: the MAC address
  *     @mask: the mask
  *     @idx: index at which to add this entry
- *     @port_id: the port index
  *     @lookup_type: MAC address for inner (1) or outer (0) header
+ *     @port_id: the port index
  *     @sleep_ok: call is allowed to sleep
  *
  *     Adds the mac entry at the specified index using raw mac interface.
@@ -8126,7 +8129,7 @@ int t4_free_mac_filt(struct adapter *adap, unsigned int mbox,
  *     @idx: index of existing filter for old value of MAC address, or -1
  *     @addr: the new MAC address value
  *     @persist: whether a new MAC allocation should be persistent
- *     @add_smt: if true also add the address to the HW SMT
+ *     @smt_idx: the destination to store the new SMT index.
  *
  *     Modifies an exact-match filter and sets it to the new MAC address.
  *     Note that in general it is not possible to modify the value of a given
@@ -8448,7 +8451,6 @@ int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
 
 /**
  *     t4_link_down_rc_str - return a string for a Link Down Reason Code
- *     @adap: the adapter
  *     @link_down_rc: Link Down Reason Code
  *
  *     Returns a string representation of the Link Down Reason Code.
@@ -8472,9 +8474,7 @@ static const char *t4_link_down_rc_str(unsigned char link_down_rc)
        return reason[link_down_rc];
 }
 
-/**
- * Return the highest speed set in the port capabilities, in Mb/s.
- */
+/* Return the highest speed set in the port capabilities, in Mb/s. */
 static unsigned int fwcap_to_speed(fw_port_cap32_t caps)
 {
        #define TEST_SPEED_RETURN(__caps_speed, __speed) \
@@ -9110,7 +9110,6 @@ found:
 /**
  *     t4_prep_adapter - prepare SW and HW for operation
  *     @adapter: the adapter
- *     @reset: if true perform a HW reset
  *
  *     Initialize adapter SW state for the various HW modules, set initial
  *     values for some adapter tunables, take PHYs out of reset, and
@@ -10395,6 +10394,7 @@ int t4_sched_params(struct adapter *adapter, u8 type, u8 level, u8 mode,
 /**
  *     t4_i2c_rd - read I2C data from adapter
  *     @adap: the adapter
+ *     @mbox: mailbox to use for the FW command
  *     @port: Port number if per-port device; <0 if not
  *     @devid: per-port device ID or absolute device ID
  *     @offset: byte offset into device I2C space
@@ -10450,7 +10450,7 @@ int t4_i2c_rd(struct adapter *adap, unsigned int mbox, int port,
 
 /**
  *      t4_set_vlan_acl - Set a VLAN id for the specified VF
- *      @adapter: the adapter
+ *      @adap: the adapter
  *      @mbox: mailbox to use for the FW command
  *      @vf: one of the VFs instantiated by the specified PF
  *      @vlan: The vlanid to be set
index cec865a..a7641be 100644 (file)
@@ -260,8 +260,7 @@ static int cxgb4vf_set_addr_hash(struct port_info *pi)
  *     @tcam_idx: TCAM index of existing filter for old value of MAC address,
  *                or -1
  *     @addr: the new MAC address value
- *     @persist: whether a new MAC allocation should be persistent
- *     @add_smt: if true also add the address to the HW SMT
+ *     @persistent: whether a new MAC allocation should be persistent
  *
  *     Modifies an MPS filter and sets it to the new MAC address if
  *     @tcam_idx >= 0, or adds the MAC address to a new filter if
index f71c973..8c3d6e1 100644 (file)
@@ -1692,7 +1692,7 @@ static inline bool is_new_response(const struct rsp_ctrl *rc,
  *     restore_rx_bufs - put back a packet's RX buffers
  *     @gl: the packet gather list
  *     @fl: the SGE Free List
- *     @nfrags: how many fragments in @si
+ *     @frags: how many fragments in @si
  *
  *     Called when we find out that the current packet, @si, can't be
  *     processed right away for some reason.  This is a very rare event and
@@ -2054,7 +2054,7 @@ irq_handler_t t4vf_intr_handler(struct adapter *adapter)
 
 /**
  *     sge_rx_timer_cb - perform periodic maintenance of SGE RX queues
- *     @data: the adapter
+ *     @t: Rx timer
  *
  *     Runs periodically from a timer to perform maintenance of SGE RX queues.
  *
@@ -2113,7 +2113,7 @@ static void sge_rx_timer_cb(struct timer_list *t)
 
 /**
  *     sge_tx_timer_cb - perform periodic maintenance of SGE Tx queues
- *     @data: the adapter
+ *     @t: Tx timer
  *
  *     Runs periodically from a timer to perform maintenance of SGE TX queues.
  *
@@ -2405,6 +2405,7 @@ err:
  *     t4vf_sge_alloc_eth_txq - allocate an SGE Ethernet TX Queue
  *     @adapter: the adapter
  *     @txq: pointer to the new txq to be filled in
+ *     @dev: the network device
  *     @devq: the network TX queue associated with the new txq
  *     @iqid: the relative ingress queue ID to which events relating to
  *             the new txq should be directed
index 9d49ff2..a31b873 100644 (file)
@@ -389,9 +389,7 @@ static inline enum cc_fec fwcap_to_cc_fec(fw_port_cap32_t fw_fec)
        return cc_fec;
 }
 
-/**
- * Return the highest speed set in the port capabilities, in Mb/s.
- */
+/* Return the highest speed set in the port capabilities, in Mb/s. */
 static unsigned int fwcap_to_speed(fw_port_cap32_t caps)
 {
        #define TEST_SPEED_RETURN(__caps_speed, __speed) \
@@ -1467,6 +1465,7 @@ int t4vf_identify_port(struct adapter *adapter, unsigned int viid,
  *     @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
  *     @vlanex: 1 to enable hardware VLAN Tag extraction, 0 to disable it,
  *             -1 no change
+ *     @sleep_ok: call is allowed to sleep
  *
  *     Sets Rx properties of a virtual interface.
  */
@@ -1906,7 +1905,7 @@ static const char *t4vf_link_down_rc_str(unsigned char link_down_rc)
 /**
  *     t4vf_handle_get_port_info - process a FW reply message
  *     @pi: the port info
- *     @rpl: start of the FW message
+ *     @cmd: start of the FW message
  *
  *     Processes a GET_PORT_INFO FW reply message.
  */
@@ -2137,8 +2136,6 @@ int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl)
        return 0;
 }
 
-/**
- */
 int t4vf_prep_adapter(struct adapter *adapter)
 {
        int err;
index 298c557..96831f4 100644 (file)
@@ -1595,6 +1595,24 @@ static int enetc_set_psfp(struct net_device *ndev, int en)
        return 0;
 }
 
+static void enetc_enable_rxvlan(struct net_device *ndev, bool en)
+{
+       struct enetc_ndev_priv *priv = netdev_priv(ndev);
+       int i;
+
+       for (i = 0; i < priv->num_rx_rings; i++)
+               enetc_bdr_enable_rxvlan(&priv->si->hw, i, en);
+}
+
+static void enetc_enable_txvlan(struct net_device *ndev, bool en)
+{
+       struct enetc_ndev_priv *priv = netdev_priv(ndev);
+       int i;
+
+       for (i = 0; i < priv->num_tx_rings; i++)
+               enetc_bdr_enable_txvlan(&priv->si->hw, i, en);
+}
+
 int enetc_set_features(struct net_device *ndev,
                       netdev_features_t features)
 {
@@ -1604,6 +1622,14 @@ int enetc_set_features(struct net_device *ndev,
        if (changed & NETIF_F_RXHASH)
                enetc_set_rss(ndev, !!(features & NETIF_F_RXHASH));
 
+       if (changed & NETIF_F_HW_VLAN_CTAG_RX)
+               enetc_enable_rxvlan(ndev,
+                                   !!(features & NETIF_F_HW_VLAN_CTAG_RX));
+
+       if (changed & NETIF_F_HW_VLAN_CTAG_TX)
+               enetc_enable_txvlan(ndev,
+                                   !!(features & NETIF_F_HW_VLAN_CTAG_TX));
+
        if (changed & NETIF_F_HW_TC)
                err = enetc_set_psfp(ndev, !!(features & NETIF_F_HW_TC));
 
index 6314051..ce0d321 100644 (file)
@@ -531,22 +531,22 @@ struct enetc_msg_cmd_header {
 
 /* Common H/W utility functions */
 
-static inline void enetc_enable_rxvlan(struct enetc_hw *hw, int si_idx,
-                                      bool en)
+static inline void enetc_bdr_enable_rxvlan(struct enetc_hw *hw, int idx,
+                                          bool en)
 {
-       u32 val = enetc_rxbdr_rd(hw, si_idx, ENETC_RBMR);
+       u32 val = enetc_rxbdr_rd(hw, idx, ENETC_RBMR);
 
        val = (val & ~ENETC_RBMR_VTE) | (en ? ENETC_RBMR_VTE : 0);
-       enetc_rxbdr_wr(hw, si_idx, ENETC_RBMR, val);
+       enetc_rxbdr_wr(hw, idx, ENETC_RBMR, val);
 }
 
-static inline void enetc_enable_txvlan(struct enetc_hw *hw, int si_idx,
-                                      bool en)
+static inline void enetc_bdr_enable_txvlan(struct enetc_hw *hw, int idx,
+                                          bool en)
 {
-       u32 val = enetc_txbdr_rd(hw, si_idx, ENETC_TBMR);
+       u32 val = enetc_txbdr_rd(hw, idx, ENETC_TBMR);
 
        val = (val & ~ENETC_TBMR_VIH) | (en ? ENETC_TBMR_VIH : 0);
-       enetc_txbdr_wr(hw, si_idx, ENETC_TBMR, val);
+       enetc_txbdr_wr(hw, idx, ENETC_TBMR, val);
 }
 
 static inline void enetc_set_bdr_prio(struct enetc_hw *hw, int bdr_idx,
index 824d211..4fac57d 100644 (file)
@@ -649,14 +649,6 @@ static int enetc_pf_set_features(struct net_device *ndev,
        netdev_features_t changed = ndev->features ^ features;
        struct enetc_ndev_priv *priv = netdev_priv(ndev);
 
-       if (changed & NETIF_F_HW_VLAN_CTAG_RX)
-               enetc_enable_rxvlan(&priv->si->hw, 0,
-                                   !!(features & NETIF_F_HW_VLAN_CTAG_RX));
-
-       if (changed & NETIF_F_HW_VLAN_CTAG_TX)
-               enetc_enable_txvlan(&priv->si->hw, 0,
-                                   !!(features & NETIF_F_HW_VLAN_CTAG_TX));
-
        if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
                struct enetc_pf *pf = enetc_si_priv(priv->si);
 
index c117074..23f278e 100644 (file)
@@ -699,7 +699,7 @@ static void hns_nic_rx_up_pro(struct hns_nic_ring_data *ring_data,
        struct net_device *ndev = ring_data->napi.dev;
 
        skb->protocol = eth_type_trans(skb, ndev);
-       (void)napi_gro_receive(&ring_data->napi, skb);
+       napi_gro_receive(&ring_data->napi, skb);
 }
 
 static int hns_desc_unused(struct hnae_ring *ring)
index 96d36ae..c5c7326 100644 (file)
@@ -1715,7 +1715,7 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
        }
 
        netdev->min_mtu = IBMVETH_MIN_MTU;
-       netdev->max_mtu = ETH_MAX_MTU;
+       netdev->max_mtu = ETH_MAX_MTU - IBMVETH_BUFF_OH;
 
        memcpy(netdev->dev_addr, mac_addr_p, ETH_ALEN);
 
index 2baf7b3..0fd7eae 100644 (file)
@@ -1971,13 +1971,18 @@ static int do_reset(struct ibmvnic_adapter *adapter,
                        release_sub_crqs(adapter, 1);
                } else {
                        rc = ibmvnic_reset_crq(adapter);
-                       if (!rc)
+                       if (rc == H_CLOSED || rc == H_SUCCESS) {
                                rc = vio_enable_interrupts(adapter->vdev);
+                               if (rc)
+                                       netdev_err(adapter->netdev,
+                                                  "Reset failed to enable interrupts. rc=%d\n",
+                                                  rc);
+                       }
                }
 
                if (rc) {
                        netdev_err(adapter->netdev,
-                                  "Couldn't initialize crq. rc=%d\n", rc);
+                                  "Reset couldn't initialize crq. rc=%d\n", rc);
                        goto out;
                }
 
index aa8026b..67806b7 100644 (file)
@@ -2072,6 +2072,9 @@ static int i40e_set_ringparam(struct net_device *netdev,
                        err = i40e_setup_rx_descriptors(&rx_rings[i]);
                        if (err)
                                goto rx_unwind;
+                       err = i40e_alloc_rx_bi(&rx_rings[i]);
+                       if (err)
+                               goto rx_unwind;
 
                        /* now allocate the Rx buffers to make sure the OS
                         * has enough memory, any failure here means abort
index 5d807c8..56ecd6c 100644 (file)
@@ -439,11 +439,15 @@ static void i40e_get_netdev_stats_struct(struct net_device *netdev,
                i40e_get_netdev_stats_struct_tx(ring, stats);
 
                if (i40e_enabled_xdp_vsi(vsi)) {
-                       ring++;
+                       ring = READ_ONCE(vsi->xdp_rings[i]);
+                       if (!ring)
+                               continue;
                        i40e_get_netdev_stats_struct_tx(ring, stats);
                }
 
-               ring++;
+               ring = READ_ONCE(vsi->rx_rings[i]);
+               if (!ring)
+                       continue;
                do {
                        start   = u64_stats_fetch_begin_irq(&ring->syncp);
                        packets = ring->stats.packets;
@@ -787,6 +791,8 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
        for (q = 0; q < vsi->num_queue_pairs; q++) {
                /* locate Tx ring */
                p = READ_ONCE(vsi->tx_rings[q]);
+               if (!p)
+                       continue;
 
                do {
                        start = u64_stats_fetch_begin_irq(&p->syncp);
@@ -800,8 +806,11 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
                tx_linearize += p->tx_stats.tx_linearize;
                tx_force_wb += p->tx_stats.tx_force_wb;
 
-               /* Rx queue is part of the same block as Tx queue */
-               p = &p[1];
+               /* locate Rx ring */
+               p = READ_ONCE(vsi->rx_rings[q]);
+               if (!p)
+                       continue;
+
                do {
                        start = u64_stats_fetch_begin_irq(&p->syncp);
                        packets = p->stats.packets;
@@ -10824,10 +10833,10 @@ static void i40e_vsi_clear_rings(struct i40e_vsi *vsi)
        if (vsi->tx_rings && vsi->tx_rings[0]) {
                for (i = 0; i < vsi->alloc_queue_pairs; i++) {
                        kfree_rcu(vsi->tx_rings[i], rcu);
-                       vsi->tx_rings[i] = NULL;
-                       vsi->rx_rings[i] = NULL;
+                       WRITE_ONCE(vsi->tx_rings[i], NULL);
+                       WRITE_ONCE(vsi->rx_rings[i], NULL);
                        if (vsi->xdp_rings)
-                               vsi->xdp_rings[i] = NULL;
+                               WRITE_ONCE(vsi->xdp_rings[i], NULL);
                }
        }
 }
@@ -10861,7 +10870,7 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi)
                if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE)
                        ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
                ring->itr_setting = pf->tx_itr_default;
-               vsi->tx_rings[i] = ring++;
+               WRITE_ONCE(vsi->tx_rings[i], ring++);
 
                if (!i40e_enabled_xdp_vsi(vsi))
                        goto setup_rx;
@@ -10879,7 +10888,7 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi)
                        ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
                set_ring_xdp(ring);
                ring->itr_setting = pf->tx_itr_default;
-               vsi->xdp_rings[i] = ring++;
+               WRITE_ONCE(vsi->xdp_rings[i], ring++);
 
 setup_rx:
                ring->queue_index = i;
@@ -10892,7 +10901,7 @@ setup_rx:
                ring->size = 0;
                ring->dcb_tc = 0;
                ring->itr_setting = pf->rx_itr_default;
-               vsi->rx_rings[i] = ring;
+               WRITE_ONCE(vsi->rx_rings[i], ring);
        }
 
        return 0;
index 28b46cc..2e3a39c 100644 (file)
@@ -1194,7 +1194,7 @@ static void ice_vsi_clear_rings(struct ice_vsi *vsi)
                for (i = 0; i < vsi->alloc_txq; i++) {
                        if (vsi->tx_rings[i]) {
                                kfree_rcu(vsi->tx_rings[i], rcu);
-                               vsi->tx_rings[i] = NULL;
+                               WRITE_ONCE(vsi->tx_rings[i], NULL);
                        }
                }
        }
@@ -1202,7 +1202,7 @@ static void ice_vsi_clear_rings(struct ice_vsi *vsi)
                for (i = 0; i < vsi->alloc_rxq; i++) {
                        if (vsi->rx_rings[i]) {
                                kfree_rcu(vsi->rx_rings[i], rcu);
-                               vsi->rx_rings[i] = NULL;
+                               WRITE_ONCE(vsi->rx_rings[i], NULL);
                        }
                }
        }
@@ -1235,7 +1235,7 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
                ring->vsi = vsi;
                ring->dev = dev;
                ring->count = vsi->num_tx_desc;
-               vsi->tx_rings[i] = ring;
+               WRITE_ONCE(vsi->tx_rings[i], ring);
        }
 
        /* Allocate Rx rings */
@@ -1254,7 +1254,7 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
                ring->netdev = vsi->netdev;
                ring->dev = dev;
                ring->count = vsi->num_rx_desc;
-               vsi->rx_rings[i] = ring;
+               WRITE_ONCE(vsi->rx_rings[i], ring);
        }
 
        return 0;
index 082825e..4cbd49c 100644 (file)
@@ -1702,7 +1702,7 @@ static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
                xdp_ring->netdev = NULL;
                xdp_ring->dev = dev;
                xdp_ring->count = vsi->num_tx_desc;
-               vsi->xdp_rings[i] = xdp_ring;
+               WRITE_ONCE(vsi->xdp_rings[i], xdp_ring);
                if (ice_setup_tx_ring(xdp_ring))
                        goto free_xdp_rings;
                ice_set_ring_xdp(xdp_ring);
index fd9f5d4..2e35c57 100644 (file)
@@ -921,7 +921,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
                ring->queue_index = txr_idx;
 
                /* assign ring to adapter */
-               adapter->tx_ring[txr_idx] = ring;
+               WRITE_ONCE(adapter->tx_ring[txr_idx], ring);
 
                /* update count and index */
                txr_count--;
@@ -948,7 +948,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
                set_ring_xdp(ring);
 
                /* assign ring to adapter */
-               adapter->xdp_ring[xdp_idx] = ring;
+               WRITE_ONCE(adapter->xdp_ring[xdp_idx], ring);
 
                /* update count and index */
                xdp_count--;
@@ -991,7 +991,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
                ring->queue_index = rxr_idx;
 
                /* assign ring to adapter */
-               adapter->rx_ring[rxr_idx] = ring;
+               WRITE_ONCE(adapter->rx_ring[rxr_idx], ring);
 
                /* update count and index */
                rxr_count--;
@@ -1020,13 +1020,13 @@ static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx)
 
        ixgbe_for_each_ring(ring, q_vector->tx) {
                if (ring_is_xdp(ring))
-                       adapter->xdp_ring[ring->queue_index] = NULL;
+                       WRITE_ONCE(adapter->xdp_ring[ring->queue_index], NULL);
                else
-                       adapter->tx_ring[ring->queue_index] = NULL;
+                       WRITE_ONCE(adapter->tx_ring[ring->queue_index], NULL);
        }
 
        ixgbe_for_each_ring(ring, q_vector->rx)
-               adapter->rx_ring[ring->queue_index] = NULL;
+               WRITE_ONCE(adapter->rx_ring[ring->queue_index], NULL);
 
        adapter->q_vector[v_idx] = NULL;
        napi_hash_del(&q_vector->napi);
index f162b8b..97a423e 100644 (file)
@@ -7051,7 +7051,10 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
        }
 
        for (i = 0; i < adapter->num_rx_queues; i++) {
-               struct ixgbe_ring *rx_ring = adapter->rx_ring[i];
+               struct ixgbe_ring *rx_ring = READ_ONCE(adapter->rx_ring[i]);
+
+               if (!rx_ring)
+                       continue;
                non_eop_descs += rx_ring->rx_stats.non_eop_descs;
                alloc_rx_page += rx_ring->rx_stats.alloc_rx_page;
                alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed;
@@ -7072,15 +7075,20 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
        packets = 0;
        /* gather some stats to the adapter struct that are per queue */
        for (i = 0; i < adapter->num_tx_queues; i++) {
-               struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
+               struct ixgbe_ring *tx_ring = READ_ONCE(adapter->tx_ring[i]);
+
+               if (!tx_ring)
+                       continue;
                restart_queue += tx_ring->tx_stats.restart_queue;
                tx_busy += tx_ring->tx_stats.tx_busy;
                bytes += tx_ring->stats.bytes;
                packets += tx_ring->stats.packets;
        }
        for (i = 0; i < adapter->num_xdp_queues; i++) {
-               struct ixgbe_ring *xdp_ring = adapter->xdp_ring[i];
+               struct ixgbe_ring *xdp_ring = READ_ONCE(adapter->xdp_ring[i]);
 
+               if (!xdp_ring)
+                       continue;
                restart_queue += xdp_ring->tx_stats.restart_queue;
                tx_busy += xdp_ring->tx_stats.tx_busy;
                bytes += xdp_ring->stats.bytes;
index 946925b..c639e3a 100644 (file)
 #define      MVNETA_TX_IN_PRGRS                  BIT(1)
 #define      MVNETA_TX_FIFO_EMPTY                BIT(8)
 #define MVNETA_RX_MIN_FRAME_SIZE                 0x247c
+/* Only exists on Armada XP and Armada 370 */
 #define MVNETA_SERDES_CFG                       0x24A0
 #define      MVNETA_SGMII_SERDES_PROTO          0x0cc7
 #define      MVNETA_QSGMII_SERDES_PROTO                 0x0667
+#define      MVNETA_HSGMII_SERDES_PROTO                 0x1107
 #define MVNETA_TYPE_PRIO                         0x24bc
 #define      MVNETA_FORCE_UNI                    BIT(21)
 #define MVNETA_TXQ_CMD_1                         0x24e4
@@ -3529,26 +3531,60 @@ static int mvneta_setup_txqs(struct mvneta_port *pp)
        return 0;
 }
 
-static int mvneta_comphy_init(struct mvneta_port *pp)
+static int mvneta_comphy_init(struct mvneta_port *pp, phy_interface_t interface)
 {
        int ret;
 
-       if (!pp->comphy)
-               return 0;
-
-       ret = phy_set_mode_ext(pp->comphy, PHY_MODE_ETHERNET,
-                              pp->phy_interface);
+       ret = phy_set_mode_ext(pp->comphy, PHY_MODE_ETHERNET, interface);
        if (ret)
                return ret;
 
        return phy_power_on(pp->comphy);
 }
 
+static int mvneta_config_interface(struct mvneta_port *pp,
+                                  phy_interface_t interface)
+{
+       int ret = 0;
+
+       if (pp->comphy) {
+               if (interface == PHY_INTERFACE_MODE_SGMII ||
+                   interface == PHY_INTERFACE_MODE_1000BASEX ||
+                   interface == PHY_INTERFACE_MODE_2500BASEX) {
+                       ret = mvneta_comphy_init(pp, interface);
+               }
+       } else {
+               switch (interface) {
+               case PHY_INTERFACE_MODE_QSGMII:
+                       mvreg_write(pp, MVNETA_SERDES_CFG,
+                                   MVNETA_QSGMII_SERDES_PROTO);
+                       break;
+
+               case PHY_INTERFACE_MODE_SGMII:
+               case PHY_INTERFACE_MODE_1000BASEX:
+                       mvreg_write(pp, MVNETA_SERDES_CFG,
+                                   MVNETA_SGMII_SERDES_PROTO);
+                       break;
+
+               case PHY_INTERFACE_MODE_2500BASEX:
+                       mvreg_write(pp, MVNETA_SERDES_CFG,
+                                   MVNETA_HSGMII_SERDES_PROTO);
+                       break;
+               default:
+                       break;
+               }
+       }
+
+       pp->phy_interface = interface;
+
+       return ret;
+}
+
 static void mvneta_start_dev(struct mvneta_port *pp)
 {
        int cpu;
 
-       WARN_ON(mvneta_comphy_init(pp));
+       WARN_ON(mvneta_config_interface(pp, pp->phy_interface));
 
        mvneta_max_rx_size_set(pp, pp->pkt_size);
        mvneta_txq_max_tx_size_set(pp, pp->pkt_size);
@@ -3926,14 +3962,10 @@ static void mvneta_mac_config(struct phylink_config *config, unsigned int mode,
        if (state->speed == SPEED_2500)
                new_ctrl4 |= MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE;
 
-       if (pp->comphy && pp->phy_interface != state->interface &&
-           (state->interface == PHY_INTERFACE_MODE_SGMII ||
-            state->interface == PHY_INTERFACE_MODE_1000BASEX ||
-            state->interface == PHY_INTERFACE_MODE_2500BASEX)) {
-               pp->phy_interface = state->interface;
-
-               WARN_ON(phy_power_off(pp->comphy));
-               WARN_ON(mvneta_comphy_init(pp));
+       if (pp->phy_interface != state->interface) {
+               if (pp->comphy)
+                       WARN_ON(phy_power_off(pp->comphy));
+               WARN_ON(mvneta_config_interface(pp, state->interface));
        }
 
        if (new_ctrl0 != gmac_ctrl0)
@@ -4982,12 +5014,10 @@ static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
        /* MAC Cause register should be cleared */
        mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0);
 
-       if (phy_mode == PHY_INTERFACE_MODE_QSGMII)
-               mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_QSGMII_SERDES_PROTO);
-       else if (phy_mode == PHY_INTERFACE_MODE_SGMII ||
-                phy_interface_mode_is_8023z(phy_mode))
-               mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO);
-       else if (!phy_interface_mode_is_rgmii(phy_mode))
+       if (phy_mode != PHY_INTERFACE_MODE_QSGMII &&
+           phy_mode != PHY_INTERFACE_MODE_SGMII &&
+           !phy_interface_mode_is_8023z(phy_mode) &&
+           !phy_interface_mode_is_rgmii(phy_mode))
                return -EINVAL;
 
        return 0;
@@ -5176,10 +5206,10 @@ static int mvneta_probe(struct platform_device *pdev)
        if (err < 0)
                goto err_netdev;
 
-       err = mvneta_port_power_up(pp, phy_mode);
+       err = mvneta_port_power_up(pp, pp->phy_interface);
        if (err < 0) {
                dev_err(&pdev->dev, "can't power up port\n");
-               goto err_netdev;
+               return err;
        }
 
        /* Armada3700 network controller does not support per-cpu
index 8071312..eefeb1c 100644 (file)
@@ -407,7 +407,9 @@ static int
 mlx5e_rep_indr_setup_block(struct net_device *netdev,
                           struct mlx5e_rep_priv *rpriv,
                           struct flow_block_offload *f,
-                          flow_setup_cb_t *setup_cb)
+                          flow_setup_cb_t *setup_cb,
+                          void *data,
+                          void (*cleanup)(struct flow_block_cb *block_cb))
 {
        struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
        struct mlx5e_rep_indr_block_priv *indr_priv;
@@ -438,8 +440,10 @@ mlx5e_rep_indr_setup_block(struct net_device *netdev,
                list_add(&indr_priv->list,
                         &rpriv->uplink_priv.tc_indr_block_priv_list);
 
-               block_cb = flow_block_cb_alloc(setup_cb, indr_priv, indr_priv,
-                                              mlx5e_rep_indr_block_unbind);
+               block_cb = flow_indr_block_cb_alloc(setup_cb, indr_priv, indr_priv,
+                                                   mlx5e_rep_indr_block_unbind,
+                                                   f, netdev, data, rpriv,
+                                                   cleanup);
                if (IS_ERR(block_cb)) {
                        list_del(&indr_priv->list);
                        kfree(indr_priv);
@@ -458,7 +462,7 @@ mlx5e_rep_indr_setup_block(struct net_device *netdev,
                if (!block_cb)
                        return -ENOENT;
 
-               flow_block_cb_remove(block_cb, f);
+               flow_indr_block_cb_remove(block_cb, f);
                list_del(&block_cb->driver_list);
                return 0;
        default:
@@ -469,15 +473,19 @@ mlx5e_rep_indr_setup_block(struct net_device *netdev,
 
 static
 int mlx5e_rep_indr_setup_cb(struct net_device *netdev, void *cb_priv,
-                           enum tc_setup_type type, void *type_data)
+                           enum tc_setup_type type, void *type_data,
+                           void *data,
+                           void (*cleanup)(struct flow_block_cb *block_cb))
 {
        switch (type) {
        case TC_SETUP_BLOCK:
                return mlx5e_rep_indr_setup_block(netdev, cb_priv, type_data,
-                                                 mlx5e_rep_indr_setup_tc_cb);
+                                                 mlx5e_rep_indr_setup_tc_cb,
+                                                 data, cleanup);
        case TC_SETUP_FT:
                return mlx5e_rep_indr_setup_block(netdev, cb_priv, type_data,
-                                                 mlx5e_rep_indr_setup_ft_cb);
+                                                 mlx5e_rep_indr_setup_ft_cb,
+                                                 data, cleanup);
        default:
                return -EOPNOTSUPP;
        }
@@ -496,7 +504,7 @@ int mlx5e_rep_tc_netdevice_event_register(struct mlx5e_rep_priv *rpriv)
 void mlx5e_rep_tc_netdevice_event_unregister(struct mlx5e_rep_priv *rpriv)
 {
        flow_indr_dev_unregister(mlx5e_rep_indr_setup_cb, rpriv,
-                                mlx5e_rep_indr_setup_tc_cb);
+                                mlx5e_rep_indr_block_unbind);
 }
 
 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
index 55af877..029ea34 100644 (file)
@@ -978,10 +978,10 @@ int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
 
                lossy = !(pfc || pause_en);
                thres_cells = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu);
-               mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, &thres_cells);
+               thres_cells = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, thres_cells);
                delay_cells = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay,
                                                        pfc, pause_en);
-               mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, &delay_cells);
+               delay_cells = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, delay_cells);
                total_cells = thres_cells + delay_cells;
 
                taken_headroom_cells += total_cells;
index 6e87457..3abe3e7 100644 (file)
@@ -374,17 +374,15 @@ mlxsw_sp_port_vlan_find_by_vid(const struct mlxsw_sp_port *mlxsw_sp_port,
        return NULL;
 }
 
-static inline void
+static inline u32
 mlxsw_sp_port_headroom_8x_adjust(const struct mlxsw_sp_port *mlxsw_sp_port,
-                                u16 *p_size)
+                                u32 size_cells)
 {
        /* Ports with eight lanes use two headroom buffers between which the
         * configured headroom size is split. Therefore, multiply the calculated
         * headroom size by two.
         */
-       if (mlxsw_sp_port->mapping.width != 8)
-               return;
-       *p_size *= 2;
+       return mlxsw_sp_port->mapping.width == 8 ? 2 * size_cells : size_cells;
 }
 
 enum mlxsw_sp_flood_type {
index f25a8b0..6f84557 100644 (file)
@@ -312,7 +312,7 @@ static int mlxsw_sp_port_pb_init(struct mlxsw_sp_port *mlxsw_sp_port)
 
                if (i == MLXSW_SP_PB_UNUSED)
                        continue;
-               mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, &size);
+               size = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, size);
                mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, i, size);
        }
        mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl,
index f843545..92351a7 100644 (file)
@@ -782,7 +782,7 @@ mlxsw_sp_span_port_buffer_update(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
                speed = 0;
 
        buffsize = mlxsw_sp_span_buffsize_get(mlxsw_sp, speed, mtu);
-       mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, (u16 *) &buffsize);
+       buffsize = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, buffsize);
        mlxsw_reg_sbib_pack(sbib_pl, mlxsw_sp_port->local_port, buffsize);
        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
 }
index 628fa9b..3731651 100644 (file)
@@ -297,7 +297,7 @@ struct vxge_hw_fifo_config {
  * @greedy_return: If Set it forces the device to return absolutely all RxD
  *             that are consumed and still on board when a timer interrupt
  *             triggers. If Clear, then if the device has already returned
- *             RxD before current timer interrupt trigerred and after the
+ *             RxD before current timer interrupt triggered and after the
  *             previous timer interrupt triggered, then the device is not
  *             forced to returned the rest of the consumed RxD that it has
  *             on board which account for a byte count less than the one
index c393276..bb448c8 100644 (file)
@@ -861,7 +861,7 @@ static void nfp_flower_clean(struct nfp_app *app)
        flush_work(&app_priv->cmsg_work);
 
        flow_indr_dev_unregister(nfp_flower_indr_setup_tc_cb, app,
-                                nfp_flower_setup_indr_block_cb);
+                                nfp_flower_setup_indr_tc_release);
 
        if (app_priv->flower_ext_feats & NFP_FL_FEATS_VF_RLIM)
                nfp_flower_qos_cleanup(app);
index 6c3dc3b..7f54a62 100644 (file)
@@ -459,9 +459,10 @@ int nfp_flower_setup_qos_offload(struct nfp_app *app, struct net_device *netdev,
                                 struct tc_cls_matchall_offload *flow);
 void nfp_flower_stats_rlim_reply(struct nfp_app *app, struct sk_buff *skb);
 int nfp_flower_indr_setup_tc_cb(struct net_device *netdev, void *cb_priv,
-                               enum tc_setup_type type, void *type_data);
-int nfp_flower_setup_indr_block_cb(enum tc_setup_type type, void *type_data,
-                                  void *cb_priv);
+                               enum tc_setup_type type, void *type_data,
+                               void *data,
+                               void (*cleanup)(struct flow_block_cb *block_cb));
+void nfp_flower_setup_indr_tc_release(void *cb_priv);
 
 void
 __nfp_flower_non_repr_priv_get(struct nfp_flower_non_repr_priv *non_repr_priv);
index 695d24b..d7340dc 100644 (file)
@@ -1619,8 +1619,8 @@ nfp_flower_indr_block_cb_priv_lookup(struct nfp_app *app,
        return NULL;
 }
 
-int nfp_flower_setup_indr_block_cb(enum tc_setup_type type,
-                                  void *type_data, void *cb_priv)
+static int nfp_flower_setup_indr_block_cb(enum tc_setup_type type,
+                                         void *type_data, void *cb_priv)
 {
        struct nfp_flower_indr_block_cb_priv *priv = cb_priv;
        struct flow_cls_offload *flower = type_data;
@@ -1637,7 +1637,7 @@ int nfp_flower_setup_indr_block_cb(enum tc_setup_type type,
        }
 }
 
-static void nfp_flower_setup_indr_tc_release(void *cb_priv)
+void nfp_flower_setup_indr_tc_release(void *cb_priv)
 {
        struct nfp_flower_indr_block_cb_priv *priv = cb_priv;
 
@@ -1647,7 +1647,8 @@ static void nfp_flower_setup_indr_tc_release(void *cb_priv)
 
 static int
 nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app,
-                              struct flow_block_offload *f)
+                              struct flow_block_offload *f, void *data,
+                              void (*cleanup)(struct flow_block_cb *block_cb))
 {
        struct nfp_flower_indr_block_cb_priv *cb_priv;
        struct nfp_flower_priv *priv = app->priv;
@@ -1676,9 +1677,10 @@ nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app,
                cb_priv->app = app;
                list_add(&cb_priv->list, &priv->indr_block_cb_priv);
 
-               block_cb = flow_block_cb_alloc(nfp_flower_setup_indr_block_cb,
-                                              cb_priv, cb_priv,
-                                              nfp_flower_setup_indr_tc_release);
+               block_cb = flow_indr_block_cb_alloc(nfp_flower_setup_indr_block_cb,
+                                                   cb_priv, cb_priv,
+                                                   nfp_flower_setup_indr_tc_release,
+                                                   f, netdev, data, app, cleanup);
                if (IS_ERR(block_cb)) {
                        list_del(&cb_priv->list);
                        kfree(cb_priv);
@@ -1699,7 +1701,7 @@ nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app,
                if (!block_cb)
                        return -ENOENT;
 
-               flow_block_cb_remove(block_cb, f);
+               flow_indr_block_cb_remove(block_cb, f);
                list_del(&block_cb->driver_list);
                return 0;
        default:
@@ -1710,7 +1712,9 @@ nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app,
 
 int
 nfp_flower_indr_setup_tc_cb(struct net_device *netdev, void *cb_priv,
-                           enum tc_setup_type type, void *type_data)
+                           enum tc_setup_type type, void *type_data,
+                           void *data,
+                           void (*cleanup)(struct flow_block_cb *block_cb))
 {
        if (!nfp_fl_is_netdev_to_offload(netdev))
                return -EOPNOTSUPP;
@@ -1718,7 +1722,7 @@ nfp_flower_indr_setup_tc_cb(struct net_device *netdev, void *cb_priv,
        switch (type) {
        case TC_SETUP_BLOCK:
                return nfp_flower_setup_indr_tc_block(netdev, cb_priv,
-                                                     type_data);
+                                                     type_data, data, cleanup);
        default:
                return -EOPNOTSUPP;
        }
index 32b9d77..55cef5b 100644 (file)
@@ -147,7 +147,7 @@ struct pch_gbe_regs {
 #define PCH_GBE_RH_ALM_FULL_8   0x00001000      /* 8 words */
 #define PCH_GBE_RH_ALM_FULL_16  0x00002000      /* 16 words */
 #define PCH_GBE_RH_ALM_FULL_32  0x00003000      /* 32 words */
-/* RX FIFO Read Triger Threshold */
+/* RX FIFO Read Trigger Threshold */
 #define PCH_GBE_RH_RD_TRG_4     0x00000000      /* 4 words */
 #define PCH_GBE_RH_RD_TRG_8     0x00000200      /* 8 words */
 #define PCH_GBE_RH_RD_TRG_16    0x00000400      /* 16 words */
index 9d8c969..aaa00ed 100644 (file)
@@ -96,7 +96,8 @@ static void ionic_link_status_check(struct ionic_lif *lif)
        u16 link_status;
        bool link_up;
 
-       if (!test_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state))
+       if (!test_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state) ||
+           test_bit(IONIC_LIF_F_QUEUE_RESET, lif->state))
                return;
 
        link_status = le16_to_cpu(lif->info->status.link_status);
@@ -1245,6 +1246,7 @@ static int ionic_init_nic_features(struct ionic_lif *lif)
 
        netdev->hw_features |= netdev->hw_enc_features;
        netdev->features |= netdev->hw_features;
+       netdev->vlan_features |= netdev->features & ~NETIF_F_VLAN_FEATURES;
 
        netdev->priv_flags |= IFF_UNICAST_FLT |
                              IFF_LIVE_ADDR_CHANGE;
@@ -1692,15 +1694,15 @@ static void ionic_stop_queues(struct ionic_lif *lif)
        if (!test_and_clear_bit(IONIC_LIF_F_UP, lif->state))
                return;
 
-       ionic_txrx_disable(lif);
        netif_tx_disable(lif->netdev);
+       ionic_txrx_disable(lif);
 }
 
 int ionic_stop(struct net_device *netdev)
 {
        struct ionic_lif *lif = netdev_priv(netdev);
 
-       if (!netif_device_present(netdev))
+       if (test_bit(IONIC_LIF_F_FW_RESET, lif->state))
                return 0;
 
        ionic_stop_queues(lif);
@@ -1983,18 +1985,19 @@ int ionic_reset_queues(struct ionic_lif *lif)
        bool running;
        int err = 0;
 
-       /* Put off the next watchdog timeout */
-       netif_trans_update(lif->netdev);
-
        err = ionic_wait_for_bit(lif, IONIC_LIF_F_QUEUE_RESET);
        if (err)
                return err;
 
        running = netif_running(lif->netdev);
-       if (running)
+       if (running) {
+               netif_device_detach(lif->netdev);
                err = ionic_stop(lif->netdev);
-       if (!err && running)
+       }
+       if (!err && running) {
                ionic_open(lif->netdev);
+               netif_device_attach(lif->netdev);
+       }
 
        clear_bit(IONIC_LIF_F_QUEUE_RESET, lif->state);
 
index 7b76667..08ba9d5 100644 (file)
@@ -271,7 +271,7 @@ static void qed_cxt_qm_iids(struct qed_hwfn *p_hwfn,
                vf_tids += segs[NUM_TASK_PF_SEGMENTS].count;
        }
 
-       iids->vf_cids += vf_cids * p_mngr->vf_count;
+       iids->vf_cids = vf_cids;
        iids->tids += vf_tids * p_mngr->vf_count;
 
        DP_VERBOSE(p_hwfn, QED_MSG_ILT,
@@ -465,6 +465,20 @@ static struct qed_ilt_cli_blk *qed_cxt_set_blk(struct qed_ilt_cli_blk *p_blk)
        return p_blk;
 }
 
+static void qed_cxt_ilt_blk_reset(struct qed_hwfn *p_hwfn)
+{
+       struct qed_ilt_client_cfg *clients = p_hwfn->p_cxt_mngr->clients;
+       u32 cli_idx, blk_idx;
+
+       for (cli_idx = 0; cli_idx < MAX_ILT_CLIENTS; cli_idx++) {
+               for (blk_idx = 0; blk_idx < ILT_CLI_PF_BLOCKS; blk_idx++)
+                       clients[cli_idx].pf_blks[blk_idx].total_size = 0;
+
+               for (blk_idx = 0; blk_idx < ILT_CLI_VF_BLOCKS; blk_idx++)
+                       clients[cli_idx].vf_blks[blk_idx].total_size = 0;
+       }
+}
+
 int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn, u32 *line_count)
 {
        struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
@@ -484,6 +498,11 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn, u32 *line_count)
 
        p_mngr->pf_start_line = RESC_START(p_hwfn, QED_ILT);
 
+       /* Reset all ILT blocks at the beginning of ILT computing in order
+        * to prevent memory allocation for irrelevant blocks afterwards.
+        */
+       qed_cxt_ilt_blk_reset(p_hwfn);
+
        DP_VERBOSE(p_hwfn, QED_MSG_ILT,
                   "hwfn [%d] - Set context manager starting line to be 0x%08x\n",
                   p_hwfn->my_id, p_hwfn->p_cxt_mngr->pf_start_line);
index 57a0dab..81e8fbe 100644 (file)
@@ -5568,7 +5568,8 @@ static const char * const s_status_str[] = {
 
        /* DBG_STATUS_INVALID_FILTER_TRIGGER_DWORDS */
        "The filter/trigger constraint dword offsets are not enabled for recording",
-
+       /* DBG_STATUS_NO_MATCHING_FRAMING_MODE */
+       "No matching framing mode",
 
        /* DBG_STATUS_VFC_READ_ERROR */
        "Error reading from VFC",
index 1eebf30..3aa5137 100644 (file)
@@ -980,7 +980,7 @@ int qed_llh_add_mac_filter(struct qed_dev *cdev,
        struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
        struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
        union qed_llh_filter filter = {};
-       u8 filter_idx, abs_ppfid;
+       u8 filter_idx, abs_ppfid = 0;
        u32 high, low, ref_cnt;
        int rc = 0;
 
@@ -1368,6 +1368,8 @@ static void qed_dbg_user_data_free(struct qed_hwfn *p_hwfn)
 
 void qed_resc_free(struct qed_dev *cdev)
 {
+       struct qed_rdma_info *rdma_info;
+       struct qed_hwfn *p_hwfn;
        int i;
 
        if (IS_VF(cdev)) {
@@ -1385,7 +1387,8 @@ void qed_resc_free(struct qed_dev *cdev)
        qed_llh_free(cdev);
 
        for_each_hwfn(cdev, i) {
-               struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+               p_hwfn = cdev->hwfns + i;
+               rdma_info = p_hwfn->p_rdma_info;
 
                qed_cxt_mngr_free(p_hwfn);
                qed_qm_info_free(p_hwfn);
@@ -1404,8 +1407,10 @@ void qed_resc_free(struct qed_dev *cdev)
                        qed_ooo_free(p_hwfn);
                }
 
-               if (QED_IS_RDMA_PERSONALITY(p_hwfn))
+               if (QED_IS_RDMA_PERSONALITY(p_hwfn) && rdma_info) {
+                       qed_spq_unregister_async_cb(p_hwfn, rdma_info->proto);
                        qed_rdma_info_free(p_hwfn);
+               }
 
                qed_iov_free(p_hwfn);
                qed_l2_free(p_hwfn);
index d2fe61a..5409a2d 100644 (file)
@@ -2836,8 +2836,6 @@ int qed_iwarp_stop(struct qed_hwfn *p_hwfn)
        if (rc)
                return rc;
 
-       qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_IWARP);
-
        return qed_iwarp_ll2_stop(p_hwfn);
 }
 
index 4566815..7271dd7 100644 (file)
@@ -113,7 +113,6 @@ void qed_roce_stop(struct qed_hwfn *p_hwfn)
                        break;
                }
        }
-       qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_ROCE);
 }
 
 static void qed_rdma_copy_gids(struct qed_rdma_qp *qp, __le32 *src_gid,
index 856051f..adc2c8f 100644 (file)
@@ -81,12 +81,17 @@ static void qed_vf_pf_req_end(struct qed_hwfn *p_hwfn, int req_status)
        mutex_unlock(&(p_hwfn->vf_iov_info->mutex));
 }
 
+#define QED_VF_CHANNEL_USLEEP_ITERATIONS       90
+#define QED_VF_CHANNEL_USLEEP_DELAY            100
+#define QED_VF_CHANNEL_MSLEEP_ITERATIONS       10
+#define QED_VF_CHANNEL_MSLEEP_DELAY            25
+
 static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done, u32 resp_size)
 {
        union vfpf_tlvs *p_req = p_hwfn->vf_iov_info->vf2pf_request;
        struct ustorm_trigger_vf_zone trigger;
        struct ustorm_vf_zone *zone_data;
-       int rc = 0, time = 100;
+       int iter, rc = 0;
 
        zone_data = (struct ustorm_vf_zone *)PXP_VF_BAR0_START_USDM_ZONE_B;
 
@@ -126,11 +131,19 @@ static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done, u32 resp_size)
        REG_WR(p_hwfn, (uintptr_t)&zone_data->trigger, *((u32 *)&trigger));
 
        /* When PF would be done with the response, it would write back to the
-        * `done' address. Poll until then.
+        * `done' address from a coherent DMA zone. Poll until then.
         */
-       while ((!*done) && time) {
-               msleep(25);
-               time--;
+
+       iter = QED_VF_CHANNEL_USLEEP_ITERATIONS;
+       while (!*done && iter--) {
+               udelay(QED_VF_CHANNEL_USLEEP_DELAY);
+               dma_rmb();
+       }
+
+       iter = QED_VF_CHANNEL_MSLEEP_ITERATIONS;
+       while (!*done && iter--) {
+               msleep(QED_VF_CHANNEL_MSLEEP_DELAY);
+               dma_rmb();
        }
 
        if (!*done) {
index 756c05e..29e2854 100644 (file)
@@ -1229,7 +1229,7 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
 
        /* PTP not supported on VFs */
        if (!is_vf)
-               qede_ptp_enable(edev, (mode == QEDE_PROBE_NORMAL));
+               qede_ptp_enable(edev);
 
        edev->ops->register_ops(cdev, &qede_ll_ops, edev);
 
@@ -1318,6 +1318,7 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
        if (system_state == SYSTEM_POWER_OFF)
                return;
        qed_ops->common->remove(cdev);
+       edev->cdev = NULL;
 
        /* Since this can happen out-of-sync with other flows,
         * don't release the netdevice until after slowpath stop
index 4c7f7a7..cd5841a 100644 (file)
@@ -412,6 +412,7 @@ void qede_ptp_disable(struct qede_dev *edev)
        if (ptp->tx_skb) {
                dev_kfree_skb_any(ptp->tx_skb);
                ptp->tx_skb = NULL;
+               clear_bit_unlock(QEDE_FLAGS_PTP_TX_IN_PRORGESS, &edev->flags);
        }
 
        /* Disable PTP in HW */
@@ -423,7 +424,7 @@ void qede_ptp_disable(struct qede_dev *edev)
        edev->ptp = NULL;
 }
 
-static int qede_ptp_init(struct qede_dev *edev, bool init_tc)
+static int qede_ptp_init(struct qede_dev *edev)
 {
        struct qede_ptp *ptp;
        int rc;
@@ -444,25 +445,19 @@ static int qede_ptp_init(struct qede_dev *edev, bool init_tc)
        /* Init work queue for Tx timestamping */
        INIT_WORK(&ptp->work, qede_ptp_task);
 
-       /* Init cyclecounter and timecounter. This is done only in the first
-        * load. If done in every load, PTP application will fail when doing
-        * unload / load (e.g. MTU change) while it is running.
-        */
-       if (init_tc) {
-               memset(&ptp->cc, 0, sizeof(ptp->cc));
-               ptp->cc.read = qede_ptp_read_cc;
-               ptp->cc.mask = CYCLECOUNTER_MASK(64);
-               ptp->cc.shift = 0;
-               ptp->cc.mult = 1;
-
-               timecounter_init(&ptp->tc, &ptp->cc,
-                                ktime_to_ns(ktime_get_real()));
-       }
+       /* Init cyclecounter and timecounter */
+       memset(&ptp->cc, 0, sizeof(ptp->cc));
+       ptp->cc.read = qede_ptp_read_cc;
+       ptp->cc.mask = CYCLECOUNTER_MASK(64);
+       ptp->cc.shift = 0;
+       ptp->cc.mult = 1;
 
-       return rc;
+       timecounter_init(&ptp->tc, &ptp->cc, ktime_to_ns(ktime_get_real()));
+
+       return 0;
 }
 
-int qede_ptp_enable(struct qede_dev *edev, bool init_tc)
+int qede_ptp_enable(struct qede_dev *edev)
 {
        struct qede_ptp *ptp;
        int rc;
@@ -483,7 +478,7 @@ int qede_ptp_enable(struct qede_dev *edev, bool init_tc)
 
        edev->ptp = ptp;
 
-       rc = qede_ptp_init(edev, init_tc);
+       rc = qede_ptp_init(edev);
        if (rc)
                goto err1;
 
index 691a14c..89c7f3c 100644 (file)
@@ -41,7 +41,7 @@ void qede_ptp_rx_ts(struct qede_dev *edev, struct sk_buff *skb);
 void qede_ptp_tx_ts(struct qede_dev *edev, struct sk_buff *skb);
 int qede_ptp_hw_ts(struct qede_dev *edev, struct ifreq *req);
 void qede_ptp_disable(struct qede_dev *edev);
-int qede_ptp_enable(struct qede_dev *edev, bool init_tc);
+int qede_ptp_enable(struct qede_dev *edev);
 int qede_ptp_get_ts_info(struct qede_dev *edev, struct ethtool_ts_info *ts);
 
 static inline void qede_ptp_record_rx_ts(struct qede_dev *edev,
index 2d873ae..668ccc9 100644 (file)
@@ -105,6 +105,7 @@ static void qede_rdma_destroy_wq(struct qede_dev *edev)
 
        qede_rdma_cleanup_event(edev);
        destroy_workqueue(edev->rdma_info.rdma_wq);
+       edev->rdma_info.rdma_wq = NULL;
 }
 
 int qede_rdma_dev_add(struct qede_dev *edev, bool recovery)
@@ -325,7 +326,7 @@ static void qede_rdma_add_event(struct qede_dev *edev,
        if (edev->rdma_info.exp_recovery)
                return;
 
-       if (!edev->rdma_info.qedr_dev)
+       if (!edev->rdma_info.qedr_dev || !edev->rdma_info.rdma_wq)
                return;
 
        /* We don't want the cleanup flow to start while we're allocating and
index dad84ec..b660ddb 100644 (file)
@@ -2114,8 +2114,11 @@ static void rtl_release_firmware(struct rtl8169_private *tp)
 void r8169_apply_firmware(struct rtl8169_private *tp)
 {
        /* TODO: release firmware if rtl_fw_write_firmware signals failure. */
-       if (tp->rtl_fw)
+       if (tp->rtl_fw) {
                rtl_fw_write_firmware(tp, tp->rtl_fw);
+               /* At least one firmware doesn't reset tp->ocp_base. */
+               tp->ocp_base = OCP_STD_PHY_BASE;
+       }
 }
 
 static void rtl8168_config_eee_mac(struct rtl8169_private *tp)
index 328bc38..0f366cc 100644 (file)
@@ -1044,8 +1044,9 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
                        skb->ip_summed = CHECKSUM_UNNECESSARY;
 
 next:
-               if ((skb && napi_gro_receive(&priv->napi, skb) != GRO_DROP) ||
-                   xdp_result) {
+               if (skb)
+                       napi_gro_receive(&priv->napi, skb);
+               if (skb || xdp_result) {
                        ndev->stats.rx_packets++;
                        ndev->stats.rx_bytes += xdp.data_end - xdp.data;
                }
index 7526658..4661ef8 100644 (file)
@@ -1649,6 +1649,7 @@ static int geneve_changelink(struct net_device *dev, struct nlattr *tb[],
        geneve->collect_md = metadata;
        geneve->use_udp6_rx_checksums = use_udp6_rx_checksums;
        geneve->ttl_inherit = ttl_inherit;
+       geneve->df = df;
        geneve_unquiesce(geneve, gs4, gs6);
 
        return 0;
index f257023..e351d65 100644 (file)
@@ -480,8 +480,7 @@ config MICROCHIP_T1_PHY
 config MICROSEMI_PHY
        tristate "Microsemi PHYs"
        depends on MACSEC || MACSEC=n
-       select CRYPTO_AES
-       select CRYPTO_ECB
+       select CRYPTO_LIB_AES if MACSEC
        help
          Currently supports VSC8514, VSC8530, VSC8531, VSC8540 and VSC8541 PHYs
 
index b4d3dc4..d53ca88 100644 (file)
@@ -10,7 +10,7 @@
 #include <linux/phy.h>
 #include <dt-bindings/net/mscc-phy-vsc8531.h>
 
-#include <crypto/skcipher.h>
+#include <crypto/aes.h>
 
 #include <net/macsec.h>
 
@@ -500,39 +500,17 @@ static u32 vsc8584_macsec_flow_context_id(struct macsec_flow *flow)
 static int vsc8584_macsec_derive_key(const u8 key[MACSEC_KEYID_LEN],
                                     u16 key_len, u8 hkey[16])
 {
-       struct crypto_skcipher *tfm = crypto_alloc_skcipher("ecb(aes)", 0, 0);
-       struct skcipher_request *req = NULL;
-       struct scatterlist src, dst;
-       DECLARE_CRYPTO_WAIT(wait);
-       u32 input[4] = {0};
+       const u8 input[AES_BLOCK_SIZE] = {0};
+       struct crypto_aes_ctx ctx;
        int ret;
 
-       if (IS_ERR(tfm))
-               return PTR_ERR(tfm);
-
-       req = skcipher_request_alloc(tfm, GFP_KERNEL);
-       if (!req) {
-               ret = -ENOMEM;
-               goto out;
-       }
-
-       skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
-                                     CRYPTO_TFM_REQ_MAY_SLEEP, crypto_req_done,
-                                     &wait);
-       ret = crypto_skcipher_setkey(tfm, key, key_len);
-       if (ret < 0)
-               goto out;
-
-       sg_init_one(&src, input, 16);
-       sg_init_one(&dst, hkey, 16);
-       skcipher_request_set_crypt(req, &src, &dst, 16, NULL);
-
-       ret = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
+       ret = aes_expandkey(&ctx, key, key_len);
+       if (ret)
+               return ret;
 
-out:
-       skcipher_request_free(req);
-       crypto_free_skcipher(tfm);
-       return ret;
+       aes_encrypt(&ctx, hkey, input);
+       memzero_explicit(&ctx, sizeof(ctx));
+       return 0;
 }
 
 static int vsc8584_macsec_transformation(struct phy_device *phydev,
index 1de3938..56cfae9 100644 (file)
@@ -840,7 +840,7 @@ static void phy_error(struct phy_device *phydev)
  * phy_disable_interrupts - Disable the PHY interrupts from the PHY side
  * @phydev: target phy_device struct
  */
-static int phy_disable_interrupts(struct phy_device *phydev)
+int phy_disable_interrupts(struct phy_device *phydev)
 {
        int err;
 
index 04946de..b4978c5 100644 (file)
@@ -794,8 +794,10 @@ static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id,
 
        /* Grab the bits from PHYIR2, and put them in the lower half */
        phy_reg = mdiobus_read(bus, addr, MII_PHYSID2);
-       if (phy_reg < 0)
-               return -EIO;
+       if (phy_reg < 0) {
+               /* returning -ENODEV doesn't stop bus scanning */
+               return (phy_reg == -EIO || phy_reg == -ENODEV) ? -ENODEV : -EIO;
+       }
 
        *phy_id |= phy_reg;
 
@@ -1090,6 +1092,10 @@ int phy_init_hw(struct phy_device *phydev)
        if (ret < 0)
                return ret;
 
+       ret = phy_disable_interrupts(phydev);
+       if (ret)
+               return ret;
+
        if (phydev->drv->config_init)
                ret = phydev->drv->config_init(phydev);
 
index 0ab65fb..3b7c70e 100644 (file)
@@ -1463,6 +1463,8 @@ int phylink_ethtool_set_pauseparam(struct phylink *pl,
                                   struct ethtool_pauseparam *pause)
 {
        struct phylink_link_state *config = &pl->link_config;
+       bool manual_changed;
+       int pause_state;
 
        ASSERT_RTNL();
 
@@ -1477,15 +1479,15 @@ int phylink_ethtool_set_pauseparam(struct phylink *pl,
            !pause->autoneg && pause->rx_pause != pause->tx_pause)
                return -EINVAL;
 
-       mutex_lock(&pl->state_mutex);
-       config->pause = 0;
+       pause_state = 0;
        if (pause->autoneg)
-               config->pause |= MLO_PAUSE_AN;
+               pause_state |= MLO_PAUSE_AN;
        if (pause->rx_pause)
-               config->pause |= MLO_PAUSE_RX;
+               pause_state |= MLO_PAUSE_RX;
        if (pause->tx_pause)
-               config->pause |= MLO_PAUSE_TX;
+               pause_state |= MLO_PAUSE_TX;
 
+       mutex_lock(&pl->state_mutex);
        /*
         * See the comments for linkmode_set_pause(), wrt the deficiencies
         * with the current implementation.  A solution to this issue would
@@ -1502,18 +1504,35 @@ int phylink_ethtool_set_pauseparam(struct phylink *pl,
        linkmode_set_pause(config->advertising, pause->tx_pause,
                           pause->rx_pause);
 
-       /* If we have a PHY, phylib will call our link state function if the
-        * mode has changed, which will trigger a resolve and update the MAC
-        * configuration.
+       manual_changed = (config->pause ^ pause_state) & MLO_PAUSE_AN ||
+                        (!(pause_state & MLO_PAUSE_AN) &&
+                          (config->pause ^ pause_state) & MLO_PAUSE_TXRX_MASK);
+
+       config->pause = pause_state;
+
+       if (!pl->phydev && !test_bit(PHYLINK_DISABLE_STOPPED,
+                                    &pl->phylink_disable_state))
+               phylink_pcs_config(pl, true, &pl->link_config);
+
+       mutex_unlock(&pl->state_mutex);
+
+       /* If we have a PHY, a change of the pause frame advertisement will
+        * cause phylib to renegotiate (if AN is enabled) which will in turn
+        * call our phylink_phy_change() and trigger a resolve.  Note that
+        * we can't hold our state mutex while calling phy_set_asym_pause().
         */
-       if (pl->phydev) {
+       if (pl->phydev)
                phy_set_asym_pause(pl->phydev, pause->rx_pause,
                                   pause->tx_pause);
-       } else if (!test_bit(PHYLINK_DISABLE_STOPPED,
-                            &pl->phylink_disable_state)) {
-               phylink_pcs_config(pl, true, &pl->link_config);
+
+       /* If the manual pause settings changed, make sure we trigger a
+        * resolve to update their state; we can not guarantee that the
+        * link will cycle.
+        */
+       if (manual_changed) {
+               pl->mac_link_dropped = true;
+               phylink_run_resolve(pl);
        }
-       mutex_unlock(&pl->state_mutex);
 
        return 0;
 }
index 93da7d3..74568ae 100644 (file)
@@ -122,10 +122,13 @@ static int lan87xx_read_status(struct phy_device *phydev)
                if (rc < 0)
                        return rc;
 
-               /* Wait max 640 ms to detect energy */
-               phy_read_poll_timeout(phydev, MII_LAN83C185_CTRL_STATUS, rc,
-                                     rc & MII_LAN83C185_ENERGYON, 10000,
-                                     640000, true);
+               /* Wait max 640 ms to detect energy and the timeout is not
+                * an actual error.
+                */
+               read_poll_timeout(phy_read, rc,
+                                 rc & MII_LAN83C185_ENERGYON || rc < 0,
+                                 10000, 640000, true, phydev,
+                                 MII_LAN83C185_CTRL_STATUS);
                if (rc < 0)
                        return rc;
 
index 9507114..a38e868 100644 (file)
@@ -1491,10 +1491,10 @@ static int ax88179_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
                }
 
                if (pkt_cnt == 0) {
-                       /* Skip IP alignment psudo header */
-                       skb_pull(skb, 2);
                        skb->len = pkt_len;
-                       skb_set_tail_pointer(skb, pkt_len);
+                       /* Skip IP alignment pseudo header */
+                       skb_pull(skb, 2);
+                       skb_set_tail_pointer(skb, skb->len);
                        skb->truesize = pkt_len + sizeof(struct sk_buff);
                        ax88179_rx_checksum(skb, pkt_hdr);
                        return 1;
@@ -1503,8 +1503,9 @@ static int ax88179_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
                ax_skb = skb_clone(skb, GFP_ATOMIC);
                if (ax_skb) {
                        ax_skb->len = pkt_len;
-                       ax_skb->data = skb->data + 2;
-                       skb_set_tail_pointer(ax_skb, pkt_len);
+                       /* Skip IP alignment pseudo header */
+                       skb_pull(ax_skb, 2);
+                       skb_set_tail_pointer(ax_skb, ax_skb->len);
                        ax_skb->truesize = pkt_len + sizeof(struct sk_buff);
                        ax88179_rx_checksum(ax_skb, pkt_hdr);
                        usbnet_skb_return(dev, ax_skb);
index 355be77..3cf4dc3 100644 (file)
@@ -1324,7 +1324,7 @@ static void smsc95xx_unbind(struct usbnet *dev, struct usb_interface *intf)
        struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
 
        if (pdata) {
-               cancel_delayed_work(&pdata->carrier_check);
+               cancel_delayed_work_sync(&pdata->carrier_check);
                netif_dbg(dev, ifdown, dev->net, "free pdata\n");
                kfree(pdata);
                pdata = NULL;
index e8085ab..89d85dc 100644 (file)
@@ -1380,6 +1380,8 @@ static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
                        struct vxlan_rdst *rd;
 
                        if (rcu_access_pointer(f->nh)) {
+                               if (*idx < cb->args[2])
+                                       goto skip_nh;
                                err = vxlan_fdb_info(skb, vxlan, f,
                                                     NETLINK_CB(cb->skb).portid,
                                                     cb->nlh->nlmsg_seq,
@@ -1387,6 +1389,8 @@ static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
                                                     NLM_F_MULTI, NULL);
                                if (err < 0)
                                        goto out;
+skip_nh:
+                               *idx += 1;
                                continue;
                        }
 
index 3ac3f85..a8f151b 100644 (file)
@@ -45,17 +45,18 @@ static int wg_open(struct net_device *dev)
        if (dev_v6)
                dev_v6->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_NONE;
 
+       mutex_lock(&wg->device_update_lock);
        ret = wg_socket_init(wg, wg->incoming_port);
        if (ret < 0)
-               return ret;
-       mutex_lock(&wg->device_update_lock);
+               goto out;
        list_for_each_entry(peer, &wg->peer_list, peer_list) {
                wg_packet_send_staged_packets(peer);
                if (peer->persistent_keepalive_interval)
                        wg_packet_send_keepalive(peer);
        }
+out:
        mutex_unlock(&wg->device_update_lock);
-       return 0;
+       return ret;
 }
 
 #ifdef CONFIG_PM_SLEEP
@@ -225,6 +226,7 @@ static void wg_destruct(struct net_device *dev)
        list_del(&wg->device_list);
        rtnl_unlock();
        mutex_lock(&wg->device_update_lock);
+       rcu_assign_pointer(wg->creating_net, NULL);
        wg->incoming_port = 0;
        wg_socket_reinit(wg, NULL, NULL);
        /* The final references are cleared in the below calls to destroy_workqueue. */
@@ -240,13 +242,11 @@ static void wg_destruct(struct net_device *dev)
        skb_queue_purge(&wg->incoming_handshakes);
        free_percpu(dev->tstats);
        free_percpu(wg->incoming_handshakes_worker);
-       if (wg->have_creating_net_ref)
-               put_net(wg->creating_net);
        kvfree(wg->index_hashtable);
        kvfree(wg->peer_hashtable);
        mutex_unlock(&wg->device_update_lock);
 
-       pr_debug("%s: Interface deleted\n", dev->name);
+       pr_debug("%s: Interface destroyed\n", dev->name);
        free_netdev(dev);
 }
 
@@ -292,7 +292,7 @@ static int wg_newlink(struct net *src_net, struct net_device *dev,
        struct wg_device *wg = netdev_priv(dev);
        int ret = -ENOMEM;
 
-       wg->creating_net = src_net;
+       rcu_assign_pointer(wg->creating_net, src_net);
        init_rwsem(&wg->static_identity.lock);
        mutex_init(&wg->socket_update_lock);
        mutex_init(&wg->device_update_lock);
@@ -393,30 +393,26 @@ static struct rtnl_link_ops link_ops __read_mostly = {
        .newlink                = wg_newlink,
 };
 
-static int wg_netdevice_notification(struct notifier_block *nb,
-                                    unsigned long action, void *data)
+static void wg_netns_pre_exit(struct net *net)
 {
-       struct net_device *dev = ((struct netdev_notifier_info *)data)->dev;
-       struct wg_device *wg = netdev_priv(dev);
-
-       ASSERT_RTNL();
-
-       if (action != NETDEV_REGISTER || dev->netdev_ops != &netdev_ops)
-               return 0;
+       struct wg_device *wg;
 
-       if (dev_net(dev) == wg->creating_net && wg->have_creating_net_ref) {
-               put_net(wg->creating_net);
-               wg->have_creating_net_ref = false;
-       } else if (dev_net(dev) != wg->creating_net &&
-                  !wg->have_creating_net_ref) {
-               wg->have_creating_net_ref = true;
-               get_net(wg->creating_net);
+       rtnl_lock();
+       list_for_each_entry(wg, &device_list, device_list) {
+               if (rcu_access_pointer(wg->creating_net) == net) {
+                       pr_debug("%s: Creating namespace exiting\n", wg->dev->name);
+                       netif_carrier_off(wg->dev);
+                       mutex_lock(&wg->device_update_lock);
+                       rcu_assign_pointer(wg->creating_net, NULL);
+                       wg_socket_reinit(wg, NULL, NULL);
+                       mutex_unlock(&wg->device_update_lock);
+               }
        }
-       return 0;
+       rtnl_unlock();
 }
 
-static struct notifier_block netdevice_notifier = {
-       .notifier_call = wg_netdevice_notification
+static struct pernet_operations pernet_ops = {
+       .pre_exit = wg_netns_pre_exit
 };
 
 int __init wg_device_init(void)
@@ -429,18 +425,18 @@ int __init wg_device_init(void)
                return ret;
 #endif
 
-       ret = register_netdevice_notifier(&netdevice_notifier);
+       ret = register_pernet_device(&pernet_ops);
        if (ret)
                goto error_pm;
 
        ret = rtnl_link_register(&link_ops);
        if (ret)
-               goto error_netdevice;
+               goto error_pernet;
 
        return 0;
 
-error_netdevice:
-       unregister_netdevice_notifier(&netdevice_notifier);
+error_pernet:
+       unregister_pernet_device(&pernet_ops);
 error_pm:
 #ifdef CONFIG_PM_SLEEP
        unregister_pm_notifier(&pm_notifier);
@@ -451,7 +447,7 @@ error_pm:
 void wg_device_uninit(void)
 {
        rtnl_link_unregister(&link_ops);
-       unregister_netdevice_notifier(&netdevice_notifier);
+       unregister_pernet_device(&pernet_ops);
 #ifdef CONFIG_PM_SLEEP
        unregister_pm_notifier(&pm_notifier);
 #endif
index b15a8be..4d0144e 100644 (file)
@@ -40,7 +40,7 @@ struct wg_device {
        struct net_device *dev;
        struct crypt_queue encrypt_queue, decrypt_queue;
        struct sock __rcu *sock4, *sock6;
-       struct net *creating_net;
+       struct net __rcu *creating_net;
        struct noise_static_identity static_identity;
        struct workqueue_struct *handshake_receive_wq, *handshake_send_wq;
        struct workqueue_struct *packet_crypt_wq;
@@ -56,7 +56,6 @@ struct wg_device {
        unsigned int num_peers, device_update_gen;
        u32 fwmark;
        u16 incoming_port;
-       bool have_creating_net_ref;
 };
 
 int wg_device_init(void);
index 802099c..20a4f3c 100644 (file)
@@ -511,11 +511,15 @@ static int wg_set_device(struct sk_buff *skb, struct genl_info *info)
        if (flags & ~__WGDEVICE_F_ALL)
                goto out;
 
-       ret = -EPERM;
-       if ((info->attrs[WGDEVICE_A_LISTEN_PORT] ||
-            info->attrs[WGDEVICE_A_FWMARK]) &&
-           !ns_capable(wg->creating_net->user_ns, CAP_NET_ADMIN))
-               goto out;
+       if (info->attrs[WGDEVICE_A_LISTEN_PORT] || info->attrs[WGDEVICE_A_FWMARK]) {
+               struct net *net;
+               rcu_read_lock();
+               net = rcu_dereference(wg->creating_net);
+               ret = !net || !ns_capable(net->user_ns, CAP_NET_ADMIN) ? -EPERM : 0;
+               rcu_read_unlock();
+               if (ret)
+                       goto out;
+       }
 
        ++wg->device_update_gen;
 
index 6264336..201a226 100644 (file)
@@ -617,8 +617,8 @@ wg_noise_handshake_consume_initiation(struct message_handshake_initiation *src,
        memcpy(handshake->hash, hash, NOISE_HASH_LEN);
        memcpy(handshake->chaining_key, chaining_key, NOISE_HASH_LEN);
        handshake->remote_index = src->sender_index;
-       if ((s64)(handshake->last_initiation_consumption -
-           (initiation_consumption = ktime_get_coarse_boottime_ns())) < 0)
+       initiation_consumption = ktime_get_coarse_boottime_ns();
+       if ((s64)(handshake->last_initiation_consumption - initiation_consumption) < 0)
                handshake->last_initiation_consumption = initiation_consumption;
        handshake->state = HANDSHAKE_CONSUMED_INITIATION;
        up_write(&handshake->lock);
index 9143814..9b2ab6f 100644 (file)
@@ -414,14 +414,8 @@ static void wg_packet_consume_data_done(struct wg_peer *peer,
        if (unlikely(routed_peer != peer))
                goto dishonest_packet_peer;
 
-       if (unlikely(napi_gro_receive(&peer->napi, skb) == GRO_DROP)) {
-               ++dev->stats.rx_dropped;
-               net_dbg_ratelimited("%s: Failed to give packet to userspace from peer %llu (%pISpfsc)\n",
-                                   dev->name, peer->internal_id,
-                                   &peer->endpoint.addr);
-       } else {
-               update_rx_stats(peer, message_data_len(len_before_trim));
-       }
+       napi_gro_receive(&peer->napi, skb);
+       update_rx_stats(peer, message_data_len(len_before_trim));
        return;
 
 dishonest_packet_peer:
index f901802..c33e2c8 100644 (file)
@@ -347,6 +347,7 @@ static void set_sock_opts(struct socket *sock)
 
 int wg_socket_init(struct wg_device *wg, u16 port)
 {
+       struct net *net;
        int ret;
        struct udp_tunnel_sock_cfg cfg = {
                .sk_user_data = wg,
@@ -371,37 +372,47 @@ int wg_socket_init(struct wg_device *wg, u16 port)
        };
 #endif
 
+       rcu_read_lock();
+       net = rcu_dereference(wg->creating_net);
+       net = net ? maybe_get_net(net) : NULL;
+       rcu_read_unlock();
+       if (unlikely(!net))
+               return -ENONET;
+
 #if IS_ENABLED(CONFIG_IPV6)
 retry:
 #endif
 
-       ret = udp_sock_create(wg->creating_net, &port4, &new4);
+       ret = udp_sock_create(net, &port4, &new4);
        if (ret < 0) {
                pr_err("%s: Could not create IPv4 socket\n", wg->dev->name);
-               return ret;
+               goto out;
        }
        set_sock_opts(new4);
-       setup_udp_tunnel_sock(wg->creating_net, new4, &cfg);
+       setup_udp_tunnel_sock(net, new4, &cfg);
 
 #if IS_ENABLED(CONFIG_IPV6)
        if (ipv6_mod_enabled()) {
                port6.local_udp_port = inet_sk(new4->sk)->inet_sport;
-               ret = udp_sock_create(wg->creating_net, &port6, &new6);
+               ret = udp_sock_create(net, &port6, &new6);
                if (ret < 0) {
                        udp_tunnel_sock_release(new4);
                        if (ret == -EADDRINUSE && !port && retries++ < 100)
                                goto retry;
                        pr_err("%s: Could not create IPv6 socket\n",
                               wg->dev->name);
-                       return ret;
+                       goto out;
                }
                set_sock_opts(new6);
-               setup_udp_tunnel_sock(wg->creating_net, new6, &cfg);
+               setup_udp_tunnel_sock(net, new6, &cfg);
        }
 #endif
 
        wg_socket_reinit(wg, new4->sk, new6 ? new6->sk : NULL);
-       return 0;
+       ret = 0;
+out:
+       put_net(net);
+       return ret;
 }
 
 void wg_socket_reinit(struct wg_device *wg, struct sock *new4,
index bc8c15f..080e5aa 100644 (file)
@@ -897,7 +897,6 @@ static void wil_rx_handle_eapol(struct wil6210_vif *vif, struct sk_buff *skb)
 void wil_netif_rx(struct sk_buff *skb, struct net_device *ndev, int cid,
                  struct wil_net_stats *stats, bool gro)
 {
-       gro_result_t rc = GRO_NORMAL;
        struct wil6210_vif *vif = ndev_to_vif(ndev);
        struct wil6210_priv *wil = ndev_to_wil(ndev);
        struct wireless_dev *wdev = vif_to_wdev(vif);
@@ -908,22 +907,16 @@ void wil_netif_rx(struct sk_buff *skb, struct net_device *ndev, int cid,
         */
        int mcast = is_multicast_ether_addr(da);
        struct sk_buff *xmit_skb = NULL;
-       static const char * const gro_res_str[] = {
-               [GRO_MERGED]            = "GRO_MERGED",
-               [GRO_MERGED_FREE]       = "GRO_MERGED_FREE",
-               [GRO_HELD]              = "GRO_HELD",
-               [GRO_NORMAL]            = "GRO_NORMAL",
-               [GRO_DROP]              = "GRO_DROP",
-               [GRO_CONSUMED]          = "GRO_CONSUMED",
-       };
 
        if (wdev->iftype == NL80211_IFTYPE_STATION) {
                sa = wil_skb_get_sa(skb);
                if (mcast && ether_addr_equal(sa, ndev->dev_addr)) {
                        /* mcast packet looped back to us */
-                       rc = GRO_DROP;
                        dev_kfree_skb(skb);
-                       goto stats;
+                       ndev->stats.rx_dropped++;
+                       stats->rx_dropped++;
+                       wil_dbg_txrx(wil, "Rx drop %d bytes\n", len);
+                       return;
                }
        } else if (wdev->iftype == NL80211_IFTYPE_AP && !vif->ap_isolate) {
                if (mcast) {
@@ -967,26 +960,16 @@ void wil_netif_rx(struct sk_buff *skb, struct net_device *ndev, int cid,
                        wil_rx_handle_eapol(vif, skb);
 
                if (gro)
-                       rc = napi_gro_receive(&wil->napi_rx, skb);
+                       napi_gro_receive(&wil->napi_rx, skb);
                else
                        netif_rx_ni(skb);
-               wil_dbg_txrx(wil, "Rx complete %d bytes => %s\n",
-                            len, gro_res_str[rc]);
-       }
-stats:
-       /* statistics. rc set to GRO_NORMAL for AP bridging */
-       if (unlikely(rc == GRO_DROP)) {
-               ndev->stats.rx_dropped++;
-               stats->rx_dropped++;
-               wil_dbg_txrx(wil, "Rx drop %d bytes\n", len);
-       } else {
-               ndev->stats.rx_packets++;
-               stats->rx_packets++;
-               ndev->stats.rx_bytes += len;
-               stats->rx_bytes += len;
-               if (mcast)
-                       ndev->stats.multicast++;
        }
+       ndev->stats.rx_packets++;
+       stats->rx_packets++;
+       ndev->stats.rx_bytes += len;
+       stats->rx_bytes += len;
+       if (mcast)
+               ndev->stats.multicast++;
 }
 
 void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
index a04afe7..ef6f818 100644 (file)
@@ -314,10 +314,15 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
                                 child, addr);
 
                        if (of_mdiobus_child_is_phy(child)) {
+                               /* -ENODEV is the return code that PHYLIB has
+                                * standardized on to indicate that bus
+                                * scanning should continue.
+                                */
                                rc = of_mdiobus_register_phy(mdio, child, addr);
-                               if (rc && rc != -ENODEV)
+                               if (!rc)
+                                       break;
+                               if (rc != -ENODEV)
                                        goto unregister;
-                               break;
                        }
                }
        }
index 8f677f5..edb1c4f 100644 (file)
@@ -684,7 +684,7 @@ config REGULATOR_MT6323
 
 config REGULATOR_MT6358
        tristate "MediaTek MT6358 PMIC"
-       depends on MFD_MT6397 && BROKEN
+       depends on MFD_MT6397
        help
          Say y here to select this option to enable the power regulator of
          MediaTek MT6358 PMIC.
index e1d6c8f..fe65b5a 100644 (file)
@@ -512,7 +512,6 @@ static const struct da9063_regulator_info da9063_regulator_info[] = {
        },
        {
                DA9063_LDO(DA9063, LDO9, 950, 50, 3600),
-               .suspend = BFIELD(DA9063_REG_LDO9_CONT, DA9063_VLDO9_SEL),
        },
        {
                DA9063_LDO(DA9063, LDO11, 900, 50, 3600),
index e970e9d..e4bb09b 100644 (file)
@@ -486,7 +486,7 @@ int regulator_map_voltage_pickable_linear_range(struct regulator_dev *rdev,
                        continue;
                }
 
-               ret = selector + sel;
+               ret = selector + sel - range->min_sel;
 
                voltage = rdev->desc->ops->list_voltage(rdev, ret);
 
index 6895379..4c8e8b4 100644 (file)
@@ -209,6 +209,19 @@ static const struct regulator_ops pfuze100_swb_regulator_ops = {
 
 };
 
+static const struct regulator_ops pfuze3000_sw_regulator_ops = {
+       .enable = regulator_enable_regmap,
+       .disable = regulator_disable_regmap,
+       .is_enabled = regulator_is_enabled_regmap,
+       .list_voltage = regulator_list_voltage_table,
+       .map_voltage = regulator_map_voltage_ascend,
+       .set_voltage_sel = regulator_set_voltage_sel_regmap,
+       .get_voltage_sel = regulator_get_voltage_sel_regmap,
+       .set_voltage_time_sel = regulator_set_voltage_time_sel,
+       .set_ramp_delay = pfuze100_set_ramp_delay,
+
+};
+
 #define PFUZE100_FIXED_REG(_chip, _name, base, voltage)        \
        [_chip ## _ ## _name] = {       \
                .desc = {       \
@@ -318,23 +331,28 @@ static const struct regulator_ops pfuze100_swb_regulator_ops = {
        .stby_mask = 0x20,      \
 }
 
-
-#define PFUZE3000_SW2_REG(_chip, _name, base, min, max, step)  {       \
-       .desc = {       \
-               .name = #_name,\
-               .n_voltages = ((max) - (min)) / (step) + 1,     \
-               .ops = &pfuze100_sw_regulator_ops,      \
-               .type = REGULATOR_VOLTAGE,      \
-               .id = _chip ## _ ## _name,      \
-               .owner = THIS_MODULE,   \
-               .min_uV = (min),        \
-               .uV_step = (step),      \
-               .vsel_reg = (base) + PFUZE100_VOL_OFFSET,       \
-               .vsel_mask = 0x7,       \
-       },      \
-       .stby_reg = (base) + PFUZE100_STANDBY_OFFSET,   \
-       .stby_mask = 0x7,       \
-}
+/* No linar case for the some switches of PFUZE3000 */
+#define PFUZE3000_SW_REG(_chip, _name, base, mask, voltages)   \
+       [_chip ## _ ##  _name] = {      \
+               .desc = {       \
+                       .name = #_name, \
+                       .n_voltages = ARRAY_SIZE(voltages),     \
+                       .ops = &pfuze3000_sw_regulator_ops,     \
+                       .type = REGULATOR_VOLTAGE,      \
+                       .id = _chip ## _ ## _name,      \
+                       .owner = THIS_MODULE,   \
+                       .volt_table = voltages, \
+                       .vsel_reg = (base) + PFUZE100_VOL_OFFSET,       \
+                       .vsel_mask = (mask),    \
+                       .enable_reg = (base) + PFUZE100_MODE_OFFSET,    \
+                       .enable_mask = 0xf,     \
+                       .enable_val = 0x8,      \
+                       .enable_time = 500,     \
+               },      \
+               .stby_reg = (base) + PFUZE100_STANDBY_OFFSET,   \
+               .stby_mask = (mask),    \
+               .sw_reg = true,         \
+       }
 
 #define PFUZE3000_SW3_REG(_chip, _name, base, min, max, step)  {       \
        .desc = {       \
@@ -391,9 +409,9 @@ static struct pfuze_regulator pfuze200_regulators[] = {
 };
 
 static struct pfuze_regulator pfuze3000_regulators[] = {
-       PFUZE100_SWB_REG(PFUZE3000, SW1A, PFUZE100_SW1ABVOL, 0x1f, pfuze3000_sw1a),
+       PFUZE3000_SW_REG(PFUZE3000, SW1A, PFUZE100_SW1ABVOL, 0x1f, pfuze3000_sw1a),
        PFUZE100_SW_REG(PFUZE3000, SW1B, PFUZE100_SW1CVOL, 700000, 1475000, 25000),
-       PFUZE100_SWB_REG(PFUZE3000, SW2, PFUZE100_SW2VOL, 0x7, pfuze3000_sw2lo),
+       PFUZE3000_SW_REG(PFUZE3000, SW2, PFUZE100_SW2VOL, 0x7, pfuze3000_sw2lo),
        PFUZE3000_SW3_REG(PFUZE3000, SW3, PFUZE100_SW3AVOL, 900000, 1650000, 50000),
        PFUZE100_SWB_REG(PFUZE3000, SWBST, PFUZE100_SWBSTCON1, 0x3, pfuze100_swbst),
        PFUZE100_SWB_REG(PFUZE3000, VSNVS, PFUZE100_VSNVSVOL, 0x7, pfuze100_vsnvs),
@@ -407,8 +425,8 @@ static struct pfuze_regulator pfuze3000_regulators[] = {
 };
 
 static struct pfuze_regulator pfuze3001_regulators[] = {
-       PFUZE100_SWB_REG(PFUZE3001, SW1, PFUZE100_SW1ABVOL, 0x1f, pfuze3000_sw1a),
-       PFUZE100_SWB_REG(PFUZE3001, SW2, PFUZE100_SW2VOL, 0x7, pfuze3000_sw2lo),
+       PFUZE3000_SW_REG(PFUZE3001, SW1, PFUZE100_SW1ABVOL, 0x1f, pfuze3000_sw1a),
+       PFUZE3000_SW_REG(PFUZE3001, SW2, PFUZE100_SW2VOL, 0x7, pfuze3000_sw2lo),
        PFUZE3000_SW3_REG(PFUZE3001, SW3, PFUZE100_SW3AVOL, 900000, 1650000, 50000),
        PFUZE100_SWB_REG(PFUZE3001, VSNVS, PFUZE100_VSNVSVOL, 0x7, pfuze100_vsnvs),
        PFUZE100_VGEN_REG(PFUZE3001, VLDO1, PFUZE100_VGEN1VOL, 1800000, 3300000, 100000),
index 18a0fb7..88e998d 100644 (file)
@@ -4544,9 +4544,6 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
        int fallback = *(int *)reply->param;
 
        QETH_CARD_TEXT(card, 4, "setaccb");
-       if (cmd->hdr.return_code)
-               return -EIO;
-       qeth_setadpparms_inspect_rc(cmd);
 
        access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
        QETH_CARD_TEXT_(card, 2, "rc=%d",
@@ -4556,7 +4553,7 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
                QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_CTRL(%#x) on device %x: %#x\n",
                                 access_ctrl_req->subcmd_code, CARD_DEVID(card),
                                 cmd->data.setadapterparms.hdr.return_code);
-       switch (cmd->data.setadapterparms.hdr.return_code) {
+       switch (qeth_setadpparms_inspect_rc(cmd)) {
        case SET_ACCESS_CTRL_RC_SUCCESS:
                if (card->options.isolation == ISOLATION_MODE_NONE) {
                        dev_info(&card->gdev->dev,
@@ -6840,9 +6837,11 @@ netdev_features_t qeth_features_check(struct sk_buff *skb,
                                      struct net_device *dev,
                                      netdev_features_t features)
 {
+       struct qeth_card *card = dev->ml_priv;
+
        /* Traffic with local next-hop is not eligible for some offloads: */
-       if (skb->ip_summed == CHECKSUM_PARTIAL) {
-               struct qeth_card *card = dev->ml_priv;
+       if (skb->ip_summed == CHECKSUM_PARTIAL &&
+           card->options.isolation != ISOLATION_MODE_FWD) {
                netdev_features_t restricted = 0;
 
                if (skb_is_gso(skb) && !netif_needs_gso(skb, features))
index a35face..58190c9 100644 (file)
@@ -588,14 +588,14 @@ static void dspi_release_dma(struct fsl_dspi *dspi)
                return;
 
        if (dma->chan_tx) {
-               dma_unmap_single(dma->chan_tx->device->dev, dma->tx_dma_phys,
-                                dma_bufsize, DMA_TO_DEVICE);
+               dma_free_coherent(dma->chan_tx->device->dev, dma_bufsize,
+                                 dma->tx_dma_buf, dma->tx_dma_phys);
                dma_release_channel(dma->chan_tx);
        }
 
        if (dma->chan_rx) {
-               dma_unmap_single(dma->chan_rx->device->dev, dma->rx_dma_phys,
-                                dma_bufsize, DMA_FROM_DEVICE);
+               dma_free_coherent(dma->chan_rx->device->dev, dma_bufsize,
+                                 dma->rx_dma_buf, dma->rx_dma_phys);
                dma_release_channel(dma->chan_rx);
        }
 }
index 06192c9..cbc2387 100644 (file)
 
 struct rspi_data {
        void __iomem *addr;
-       u32 max_speed_hz;
+       u32 speed_hz;
        struct spi_controller *ctlr;
        struct platform_device *pdev;
        wait_queue_head_t wait;
@@ -258,8 +258,7 @@ static int rspi_set_config_register(struct rspi_data *rspi, int access_size)
        rspi_write8(rspi, rspi->sppcr, RSPI_SPPCR);
 
        /* Sets transfer bit rate */
-       spbr = DIV_ROUND_UP(clk_get_rate(rspi->clk),
-                           2 * rspi->max_speed_hz) - 1;
+       spbr = DIV_ROUND_UP(clk_get_rate(rspi->clk), 2 * rspi->speed_hz) - 1;
        rspi_write8(rspi, clamp(spbr, 0, 255), RSPI_SPBR);
 
        /* Disable dummy transmission, set 16-bit word access, 1 frame */
@@ -299,14 +298,14 @@ static int rspi_rz_set_config_register(struct rspi_data *rspi, int access_size)
 
        clksrc = clk_get_rate(rspi->clk);
        while (div < 3) {
-               if (rspi->max_speed_hz >= clksrc/4) /* 4=(CLK/2)/2 */
+               if (rspi->speed_hz >= clksrc/4) /* 4=(CLK/2)/2 */
                        break;
                div++;
                clksrc /= 2;
        }
 
        /* Sets transfer bit rate */
-       spbr = DIV_ROUND_UP(clksrc, 2 * rspi->max_speed_hz) - 1;
+       spbr = DIV_ROUND_UP(clksrc, 2 * rspi->speed_hz) - 1;
        rspi_write8(rspi, clamp(spbr, 0, 255), RSPI_SPBR);
        rspi->spcmd |= div << 2;
 
@@ -341,7 +340,7 @@ static int qspi_set_config_register(struct rspi_data *rspi, int access_size)
        rspi_write8(rspi, rspi->sppcr, RSPI_SPPCR);
 
        /* Sets transfer bit rate */
-       spbr = DIV_ROUND_UP(clk_get_rate(rspi->clk), 2 * rspi->max_speed_hz);
+       spbr = DIV_ROUND_UP(clk_get_rate(rspi->clk), 2 * rspi->speed_hz);
        rspi_write8(rspi, clamp(spbr, 0, 255), RSPI_SPBR);
 
        /* Disable dummy transmission, set byte access */
@@ -949,9 +948,24 @@ static int rspi_prepare_message(struct spi_controller *ctlr,
 {
        struct rspi_data *rspi = spi_controller_get_devdata(ctlr);
        struct spi_device *spi = msg->spi;
+       const struct spi_transfer *xfer;
        int ret;
 
-       rspi->max_speed_hz = spi->max_speed_hz;
+       /*
+        * As the Bit Rate Register must not be changed while the device is
+        * active, all transfers in a message must use the same bit rate.
+        * In theory, the sequencer could be enabled, and each Command Register
+        * could divide the base bit rate by a different value.
+        * However, most RSPI variants do not have Transfer Data Length
+        * Multiplier Setting Registers, so each sequence step would be limited
+        * to a single word, making this feature unsuitable for large
+        * transfers, which would gain most from it.
+        */
+       rspi->speed_hz = spi->max_speed_hz;
+       list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+               if (xfer->speed_hz < rspi->speed_hz)
+                       rspi->speed_hz = xfer->speed_hz;
+       }
 
        rspi->spcmd = SPCMD_SSLKP;
        if (spi->mode & SPI_CPOL)
index 88e6543..bd23c46 100644 (file)
@@ -389,9 +389,9 @@ static int sprd_adi_restart_handler(struct notifier_block *this,
        sprd_adi_write(sadi, sadi->slave_pbase + REG_WDG_CTRL, val);
 
        /* Load the watchdog timeout value, 50ms is always enough. */
+       sprd_adi_write(sadi, sadi->slave_pbase + REG_WDG_LOAD_HIGH, 0);
        sprd_adi_write(sadi, sadi->slave_pbase + REG_WDG_LOAD_LOW,
                       WDG_LOAD_VAL & WDG_LOAD_MASK);
-       sprd_adi_write(sadi, sadi->slave_pbase + REG_WDG_LOAD_HIGH, 0);
 
        /* Start the watchdog to reset system */
        sprd_adi_read(sadi, sadi->slave_pbase + REG_WDG_CTRL, &val);
index 3c44bb2..a900962 100644 (file)
@@ -553,20 +553,6 @@ static const struct spi_controller_mem_ops stm32_qspi_mem_ops = {
        .exec_op = stm32_qspi_exec_op,
 };
 
-static void stm32_qspi_release(struct stm32_qspi *qspi)
-{
-       pm_runtime_get_sync(qspi->dev);
-       /* disable qspi */
-       writel_relaxed(0, qspi->io_base + QSPI_CR);
-       stm32_qspi_dma_free(qspi);
-       mutex_destroy(&qspi->lock);
-       pm_runtime_put_noidle(qspi->dev);
-       pm_runtime_disable(qspi->dev);
-       pm_runtime_set_suspended(qspi->dev);
-       pm_runtime_dont_use_autosuspend(qspi->dev);
-       clk_disable_unprepare(qspi->clk);
-}
-
 static int stm32_qspi_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
@@ -642,7 +628,7 @@ static int stm32_qspi_probe(struct platform_device *pdev)
        if (IS_ERR(rstc)) {
                ret = PTR_ERR(rstc);
                if (ret == -EPROBE_DEFER)
-                       goto err_qspi_release;
+                       goto err_clk_disable;
        } else {
                reset_control_assert(rstc);
                udelay(2);
@@ -653,7 +639,7 @@ static int stm32_qspi_probe(struct platform_device *pdev)
        platform_set_drvdata(pdev, qspi);
        ret = stm32_qspi_dma_setup(qspi);
        if (ret)
-               goto err_qspi_release;
+               goto err_dma_free;
 
        mutex_init(&qspi->lock);
 
@@ -673,15 +659,26 @@ static int stm32_qspi_probe(struct platform_device *pdev)
 
        ret = devm_spi_register_master(dev, ctrl);
        if (ret)
-               goto err_qspi_release;
+               goto err_pm_runtime_free;
 
        pm_runtime_mark_last_busy(dev);
        pm_runtime_put_autosuspend(dev);
 
        return 0;
 
-err_qspi_release:
-       stm32_qspi_release(qspi);
+err_pm_runtime_free:
+       pm_runtime_get_sync(qspi->dev);
+       /* disable qspi */
+       writel_relaxed(0, qspi->io_base + QSPI_CR);
+       mutex_destroy(&qspi->lock);
+       pm_runtime_put_noidle(qspi->dev);
+       pm_runtime_disable(qspi->dev);
+       pm_runtime_set_suspended(qspi->dev);
+       pm_runtime_dont_use_autosuspend(qspi->dev);
+err_dma_free:
+       stm32_qspi_dma_free(qspi);
+err_clk_disable:
+       clk_disable_unprepare(qspi->clk);
 err_master_put:
        spi_master_put(qspi->ctrl);
 
@@ -692,7 +689,16 @@ static int stm32_qspi_remove(struct platform_device *pdev)
 {
        struct stm32_qspi *qspi = platform_get_drvdata(pdev);
 
-       stm32_qspi_release(qspi);
+       pm_runtime_get_sync(qspi->dev);
+       /* disable qspi */
+       writel_relaxed(0, qspi->io_base + QSPI_CR);
+       stm32_qspi_dma_free(qspi);
+       mutex_destroy(&qspi->lock);
+       pm_runtime_put_noidle(qspi->dev);
+       pm_runtime_disable(qspi->dev);
+       pm_runtime_set_suspended(qspi->dev);
+       pm_runtime_dont_use_autosuspend(qspi->dev);
+       clk_disable_unprepare(qspi->clk);
 
        return 0;
 }
index d753df7..59e0767 100644 (file)
@@ -609,15 +609,20 @@ err_find_dev:
 static int spidev_release(struct inode *inode, struct file *filp)
 {
        struct spidev_data      *spidev;
+       int                     dofree;
 
        mutex_lock(&device_list_lock);
        spidev = filp->private_data;
        filp->private_data = NULL;
 
+       spin_lock_irq(&spidev->spi_lock);
+       /* ... after we unbound from the underlying device? */
+       dofree = (spidev->spi == NULL);
+       spin_unlock_irq(&spidev->spi_lock);
+
        /* last close? */
        spidev->users--;
        if (!spidev->users) {
-               int             dofree;
 
                kfree(spidev->tx_buffer);
                spidev->tx_buffer = NULL;
@@ -625,19 +630,14 @@ static int spidev_release(struct inode *inode, struct file *filp)
                kfree(spidev->rx_buffer);
                spidev->rx_buffer = NULL;
 
-               spin_lock_irq(&spidev->spi_lock);
-               if (spidev->spi)
-                       spidev->speed_hz = spidev->spi->max_speed_hz;
-
-               /* ... after we unbound from the underlying device? */
-               dofree = (spidev->spi == NULL);
-               spin_unlock_irq(&spidev->spi_lock);
-
                if (dofree)
                        kfree(spidev);
+               else
+                       spidev->speed_hz = spidev->spi->max_speed_hz;
        }
 #ifdef CONFIG_SPI_SLAVE
-       spi_slave_abort(spidev->spi);
+       if (!dofree)
+               spi_slave_abort(spidev->spi);
 #endif
        mutex_unlock(&device_list_lock);
 
@@ -787,13 +787,13 @@ static int spidev_remove(struct spi_device *spi)
 {
        struct spidev_data      *spidev = spi_get_drvdata(spi);
 
+       /* prevent new opens */
+       mutex_lock(&device_list_lock);
        /* make sure ops on existing fds can abort cleanly */
        spin_lock_irq(&spidev->spi_lock);
        spidev->spi = NULL;
        spin_unlock_irq(&spidev->spi_lock);
 
-       /* prevent new opens */
-       mutex_lock(&device_list_lock);
        list_del(&spidev->device_entry);
        device_destroy(spidev_class, spidev->devt);
        clear_bit(MINOR(spidev->devt), minors);
index ff6562f..de211ef 100644 (file)
@@ -63,7 +63,7 @@ static void vdpa_release_dev(struct device *d)
  * @config: the bus operations that is supported by this device
  * @size: size of the parent structure that contains private data
  *
- * Drvier should use vdap_alloc_device() wrapper macro instead of
+ * Driver should use vdpa_alloc_device() wrapper macro instead of
  * using this directly.
  *
  * Returns an error when parent/config/dma_dev is not set or fail to get
index 0466921..a09dedc 100644 (file)
@@ -263,9 +263,62 @@ static int vhost_test_set_features(struct vhost_test *n, u64 features)
        return 0;
 }
 
+static long vhost_test_set_backend(struct vhost_test *n, unsigned index, int fd)
+{
+       static void *backend;
+
+       const bool enable = fd != -1;
+       struct vhost_virtqueue *vq;
+       int r;
+
+       mutex_lock(&n->dev.mutex);
+       r = vhost_dev_check_owner(&n->dev);
+       if (r)
+               goto err;
+
+       if (index >= VHOST_TEST_VQ_MAX) {
+               r = -ENOBUFS;
+               goto err;
+       }
+       vq = &n->vqs[index];
+       mutex_lock(&vq->mutex);
+
+       /* Verify that ring has been setup correctly. */
+       if (!vhost_vq_access_ok(vq)) {
+               r = -EFAULT;
+               goto err_vq;
+       }
+       if (!enable) {
+               vhost_poll_stop(&vq->poll);
+               backend = vhost_vq_get_backend(vq);
+               vhost_vq_set_backend(vq, NULL);
+       } else {
+               vhost_vq_set_backend(vq, backend);
+               r = vhost_vq_init_access(vq);
+               if (r == 0)
+                       r = vhost_poll_start(&vq->poll, vq->kick);
+       }
+
+       mutex_unlock(&vq->mutex);
+
+       if (enable) {
+               vhost_test_flush_vq(n, index);
+       }
+
+       mutex_unlock(&n->dev.mutex);
+       return 0;
+
+err_vq:
+       mutex_unlock(&vq->mutex);
+err:
+       mutex_unlock(&n->dev.mutex);
+       return r;
+}
+
 static long vhost_test_ioctl(struct file *f, unsigned int ioctl,
                             unsigned long arg)
 {
+       struct vhost_vring_file backend;
        struct vhost_test *n = f->private_data;
        void __user *argp = (void __user *)arg;
        u64 __user *featurep = argp;
@@ -277,6 +330,10 @@ static long vhost_test_ioctl(struct file *f, unsigned int ioctl,
                if (copy_from_user(&test, argp, sizeof test))
                        return -EFAULT;
                return vhost_test_run(n, test);
+       case VHOST_TEST_SET_BACKEND:
+               if (copy_from_user(&backend, argp, sizeof backend))
+                       return -EFAULT;
+               return vhost_test_set_backend(n, backend.index, backend.fd);
        case VHOST_GET_FEATURES:
                features = VHOST_FEATURES;
                if (copy_to_user(featurep, &features, sizeof features))
index 7dd265b..822bc4b 100644 (file)
@@ -4,5 +4,6 @@
 
 /* Start a given test on the virtio null device. 0 stops all tests. */
 #define VHOST_TEST_RUN _IOW(VHOST_VIRTIO, 0x31, int)
+#define VHOST_TEST_SET_BACKEND _IOW(VHOST_VIRTIO, 0x32, int)
 
 #endif
index 7580e34..a54b60d 100644 (file)
@@ -818,7 +818,7 @@ static int vhost_vdpa_mmap(struct file *file, struct vm_area_struct *vma)
        struct vdpa_device *vdpa = v->vdpa;
        const struct vdpa_config_ops *ops = vdpa->config;
        struct vdpa_notification_area notify;
-       int index = vma->vm_pgoff;
+       unsigned long index = vma->vm_pgoff;
 
        if (vma->vm_end - vma->vm_start != PAGE_SIZE)
                return -EINVAL;
index 9d28a8e..e2a490c 100644 (file)
@@ -2402,7 +2402,8 @@ static int fbcon_blank(struct vc_data *vc, int blank, int mode_switch)
                ops->graphics = 1;
 
                if (!blank) {
-                       var.activate = FB_ACTIVATE_NOW | FB_ACTIVATE_FORCE;
+                       var.activate = FB_ACTIVATE_NOW | FB_ACTIVATE_FORCE |
+                               FB_ACTIVATE_KD_TEXT;
                        fb_set_var(info, &var);
                        ops->graphics = 0;
                        ops->var = info->var;
index bee29aa..def14ac 100644 (file)
@@ -1836,7 +1836,7 @@ static int uvesafb_setup(char *options)
                else if (!strcmp(this_opt, "noedid"))
                        noedid = true;
                else if (!strcmp(this_opt, "noblank"))
-                       blank = true;
+                       blank = false;
                else if (!strncmp(this_opt, "vtotal:", 7))
                        vram_total = simple_strtoul(this_opt + 7, NULL, 0);
                else if (!strncmp(this_opt, "vremap:", 7))
index 50c689f..f26f5f6 100644 (file)
@@ -101,6 +101,11 @@ struct virtio_mem {
 
        /* The parent resource for all memory added via this device. */
        struct resource *parent_resource;
+       /*
+        * Copy of "System RAM (virtio_mem)" to be used for
+        * add_memory_driver_managed().
+        */
+       const char *resource_name;
 
        /* Summary of all memory block states. */
        unsigned long nb_mb_state[VIRTIO_MEM_MB_STATE_COUNT];
@@ -414,8 +419,20 @@ static int virtio_mem_mb_add(struct virtio_mem *vm, unsigned long mb_id)
        if (nid == NUMA_NO_NODE)
                nid = memory_add_physaddr_to_nid(addr);
 
+       /*
+        * When force-unloading the driver and we still have memory added to
+        * Linux, the resource name has to stay.
+        */
+       if (!vm->resource_name) {
+               vm->resource_name = kstrdup_const("System RAM (virtio_mem)",
+                                                 GFP_KERNEL);
+               if (!vm->resource_name)
+                       return -ENOMEM;
+       }
+
        dev_dbg(&vm->vdev->dev, "adding memory block: %lu\n", mb_id);
-       return add_memory(nid, addr, memory_block_size_bytes());
+       return add_memory_driver_managed(nid, addr, memory_block_size_bytes(),
+                                        vm->resource_name);
 }
 
 /*
@@ -1192,7 +1209,7 @@ static int virtio_mem_mb_plug_any_sb(struct virtio_mem *vm, unsigned long mb_id,
                                                VIRTIO_MEM_MB_STATE_OFFLINE);
        }
 
-       return rc;
+       return 0;
 }
 
 /*
@@ -1890,10 +1907,12 @@ static void virtio_mem_remove(struct virtio_device *vdev)
            vm->nb_mb_state[VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL] ||
            vm->nb_mb_state[VIRTIO_MEM_MB_STATE_ONLINE] ||
            vm->nb_mb_state[VIRTIO_MEM_MB_STATE_ONLINE_PARTIAL] ||
-           vm->nb_mb_state[VIRTIO_MEM_MB_STATE_ONLINE_MOVABLE])
+           vm->nb_mb_state[VIRTIO_MEM_MB_STATE_ONLINE_MOVABLE]) {
                dev_warn(&vdev->dev, "device still has system memory added\n");
-       else
+       } else {
                virtio_mem_delete_resource(vm);
+               kfree_const(vm->resource_name);
+       }
 
        /* remove all tracking data - no locking needed */
        vfree(vm->mb_state);
index 176e8a2..c037ef5 100644 (file)
@@ -940,7 +940,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
        path = btrfs_alloc_path();
        if (!path) {
                ret = -ENOMEM;
-               goto out_put_group;
+               goto out;
        }
 
        /*
@@ -978,7 +978,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
                ret = btrfs_orphan_add(trans, BTRFS_I(inode));
                if (ret) {
                        btrfs_add_delayed_iput(inode);
-                       goto out_put_group;
+                       goto out;
                }
                clear_nlink(inode);
                /* One for the block groups ref */
@@ -1001,13 +1001,13 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
 
        ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
        if (ret < 0)
-               goto out_put_group;
+               goto out;
        if (ret > 0)
                btrfs_release_path(path);
        if (ret == 0) {
                ret = btrfs_del_item(trans, tree_root, path);
                if (ret)
-                       goto out_put_group;
+                       goto out;
                btrfs_release_path(path);
        }
 
@@ -1016,6 +1016,9 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
                 &fs_info->block_group_cache_tree);
        RB_CLEAR_NODE(&block_group->cache_node);
 
+       /* Once for the block groups rbtree */
+       btrfs_put_block_group(block_group);
+
        if (fs_info->first_logical_byte == block_group->start)
                fs_info->first_logical_byte = (u64)-1;
        spin_unlock(&fs_info->block_group_cache_lock);
@@ -1089,6 +1092,25 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
 
        spin_unlock(&block_group->space_info->lock);
 
+       /*
+        * Remove the free space for the block group from the free space tree
+        * and the block group's item from the extent tree before marking the
+        * block group as removed. This is to prevent races with tasks that
+        * freeze and unfreeze a block group, this task and another task
+        * allocating a new block group - the unfreeze task ends up removing
+        * the block group's extent map before the task calling this function
+        * deletes the block group item from the extent tree, allowing for
+        * another task to attempt to create another block group with the same
+        * item key (and failing with -EEXIST and a transaction abort).
+        */
+       ret = remove_block_group_free_space(trans, block_group);
+       if (ret)
+               goto out;
+
+       ret = remove_block_group_item(trans, path, block_group);
+       if (ret < 0)
+               goto out;
+
        mutex_lock(&fs_info->chunk_mutex);
        spin_lock(&block_group->lock);
        block_group->removed = 1;
@@ -1123,17 +1145,6 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
 
        mutex_unlock(&fs_info->chunk_mutex);
 
-       ret = remove_block_group_free_space(trans, block_group);
-       if (ret)
-               goto out_put_group;
-
-       /* Once for the block groups rbtree */
-       btrfs_put_block_group(block_group);
-
-       ret = remove_block_group_item(trans, path, block_group);
-       if (ret < 0)
-               goto out;
-
        if (remove_em) {
                struct extent_map_tree *em_tree;
 
@@ -1145,10 +1156,9 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
                free_extent_map(em);
        }
 
-out_put_group:
+out:
        /* Once for the lookup reference */
        btrfs_put_block_group(block_group);
-out:
        if (remove_rsv)
                btrfs_delayed_refs_rsv_release(fs_info, 1);
        btrfs_free_path(path);
index 30ce703..d404cce 100644 (file)
@@ -1009,6 +1009,8 @@ enum {
        BTRFS_ROOT_DEAD_RELOC_TREE,
        /* Mark dead root stored on device whose cleanup needs to be resumed */
        BTRFS_ROOT_DEAD_TREE,
+       /* The root has a log tree. Used only for subvolume roots. */
+       BTRFS_ROOT_HAS_LOG_TREE,
 };
 
 /*
index 2c14312..2520605 100644 (file)
@@ -1533,7 +1533,7 @@ lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages,
 }
 
 static noinline int check_can_nocow(struct btrfs_inode *inode, loff_t pos,
-                                   size_t *write_bytes)
+                                   size_t *write_bytes, bool nowait)
 {
        struct btrfs_fs_info *fs_info = inode->root->fs_info;
        struct btrfs_root *root = inode->root;
@@ -1541,27 +1541,43 @@ static noinline int check_can_nocow(struct btrfs_inode *inode, loff_t pos,
        u64 num_bytes;
        int ret;
 
-       if (!btrfs_drew_try_write_lock(&root->snapshot_lock))
+       if (!nowait && !btrfs_drew_try_write_lock(&root->snapshot_lock))
                return -EAGAIN;
 
        lockstart = round_down(pos, fs_info->sectorsize);
        lockend = round_up(pos + *write_bytes,
                           fs_info->sectorsize) - 1;
+       num_bytes = lockend - lockstart + 1;
 
-       btrfs_lock_and_flush_ordered_range(inode, lockstart,
-                                          lockend, NULL);
+       if (nowait) {
+               struct btrfs_ordered_extent *ordered;
+
+               if (!try_lock_extent(&inode->io_tree, lockstart, lockend))
+                       return -EAGAIN;
+
+               ordered = btrfs_lookup_ordered_range(inode, lockstart,
+                                                    num_bytes);
+               if (ordered) {
+                       btrfs_put_ordered_extent(ordered);
+                       ret = -EAGAIN;
+                       goto out_unlock;
+               }
+       } else {
+               btrfs_lock_and_flush_ordered_range(inode, lockstart,
+                                                  lockend, NULL);
+       }
 
-       num_bytes = lockend - lockstart + 1;
        ret = can_nocow_extent(&inode->vfs_inode, lockstart, &num_bytes,
                        NULL, NULL, NULL);
        if (ret <= 0) {
                ret = 0;
-               btrfs_drew_write_unlock(&root->snapshot_lock);
+               if (!nowait)
+                       btrfs_drew_write_unlock(&root->snapshot_lock);
        } else {
                *write_bytes = min_t(size_t, *write_bytes ,
                                     num_bytes - pos + lockstart);
        }
-
+out_unlock:
        unlock_extent(&inode->io_tree, lockstart, lockend);
 
        return ret;
@@ -1633,7 +1649,7 @@ static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb,
                        if ((BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW |
                                                      BTRFS_INODE_PREALLOC)) &&
                            check_can_nocow(BTRFS_I(inode), pos,
-                                       &write_bytes) > 0) {
+                                           &write_bytes, false) > 0) {
                                /*
                                 * For nodata cow case, no need to reserve
                                 * data space.
@@ -1904,13 +1920,25 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
        pos = iocb->ki_pos;
        count = iov_iter_count(from);
        if (iocb->ki_flags & IOCB_NOWAIT) {
+               size_t nocow_bytes = count;
+
                /*
                 * We will allocate space in case nodatacow is not set,
                 * so bail
                 */
                if (!(BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW |
                                              BTRFS_INODE_PREALLOC)) ||
-                   check_can_nocow(BTRFS_I(inode), pos, &count) <= 0) {
+                   check_can_nocow(BTRFS_I(inode), pos, &nocow_bytes,
+                                   true) <= 0) {
+                       inode_unlock(inode);
+                       return -EAGAIN;
+               }
+               /*
+                * There are holes in the range or parts of the range that must
+                * be COWed (shared extents, RO block groups, etc), so just bail
+                * out.
+                */
+               if (nocow_bytes < count) {
                        inode_unlock(inode);
                        return -EAGAIN;
                }
index d04c82c..18d384f 100644 (file)
@@ -985,6 +985,7 @@ static noinline int cow_file_range(struct inode *inode,
        u64 num_bytes;
        unsigned long ram_size;
        u64 cur_alloc_size = 0;
+       u64 min_alloc_size;
        u64 blocksize = fs_info->sectorsize;
        struct btrfs_key ins;
        struct extent_map *em;
@@ -1035,10 +1036,26 @@ static noinline int cow_file_range(struct inode *inode,
        btrfs_drop_extent_cache(BTRFS_I(inode), start,
                        start + num_bytes - 1, 0);
 
+       /*
+        * Relocation relies on the relocated extents to have exactly the same
+        * size as the original extents. Normally writeback for relocation data
+        * extents follows a NOCOW path because relocation preallocates the
+        * extents. However, due to an operation such as scrub turning a block
+        * group to RO mode, it may fallback to COW mode, so we must make sure
+        * an extent allocated during COW has exactly the requested size and can
+        * not be split into smaller extents, otherwise relocation breaks and
+        * fails during the stage where it updates the bytenr of file extent
+        * items.
+        */
+       if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
+               min_alloc_size = num_bytes;
+       else
+               min_alloc_size = fs_info->sectorsize;
+
        while (num_bytes > 0) {
                cur_alloc_size = num_bytes;
                ret = btrfs_reserve_extent(root, cur_alloc_size, cur_alloc_size,
-                                          fs_info->sectorsize, 0, alloc_hint,
+                                          min_alloc_size, 0, alloc_hint,
                                           &ins, 1, 1);
                if (ret < 0)
                        goto out_unlock;
@@ -1361,6 +1378,8 @@ static int fallback_to_cow(struct inode *inode, struct page *locked_page,
                           int *page_started, unsigned long *nr_written)
 {
        const bool is_space_ino = btrfs_is_free_space_inode(BTRFS_I(inode));
+       const bool is_reloc_ino = (BTRFS_I(inode)->root->root_key.objectid ==
+                                  BTRFS_DATA_RELOC_TREE_OBJECTID);
        const u64 range_bytes = end + 1 - start;
        struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
        u64 range_start = start;
@@ -1391,18 +1410,23 @@ static int fallback_to_cow(struct inode *inode, struct page *locked_page,
         *    data space info, which we incremented in the step above.
         *
         * If we need to fallback to cow and the inode corresponds to a free
-        * space cache inode, we must also increment bytes_may_use of the data
-        * space_info for the same reason. Space caches always get a prealloc
+        * space cache inode or an inode of the data relocation tree, we must
+        * also increment bytes_may_use of the data space_info for the same
+        * reason. Space caches and relocated data extents always get a prealloc
         * extent for them, however scrub or balance may have set the block
-        * group that contains that extent to RO mode.
+        * group that contains that extent to RO mode and therefore force COW
+        * when starting writeback.
         */
        count = count_range_bits(io_tree, &range_start, end, range_bytes,
                                 EXTENT_NORESERVE, 0);
-       if (count > 0 || is_space_ino) {
-               const u64 bytes = is_space_ino ? range_bytes : count;
+       if (count > 0 || is_space_ino || is_reloc_ino) {
+               u64 bytes = count;
                struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
                struct btrfs_space_info *sinfo = fs_info->data_sinfo;
 
+               if (is_space_ino || is_reloc_ino)
+                       bytes = range_bytes;
+
                spin_lock(&sinfo->lock);
                btrfs_space_info_update_bytes_may_use(fs_info, sinfo, bytes);
                spin_unlock(&sinfo->lock);
@@ -7865,9 +7889,6 @@ static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
                        dio_data.overwrite = 1;
                        inode_unlock(inode);
                        relock = true;
-               } else if (iocb->ki_flags & IOCB_NOWAIT) {
-                       ret = -EAGAIN;
-                       goto out;
                }
                ret = btrfs_delalloc_reserve_space(inode, &data_reserved,
                                                   offset, count);
index 168deb8..e8f7c5f 100644 (file)
@@ -2692,7 +2692,7 @@ out:
        btrfs_put_root(root);
 out_free:
        btrfs_free_path(path);
-       kzfree(subvol_info);
+       kfree(subvol_info);
        return ret;
 }
 
index 920cee3..cd5348f 100644 (file)
@@ -169,6 +169,7 @@ static int start_log_trans(struct btrfs_trans_handle *trans,
                if (ret)
                        goto out;
 
+               set_bit(BTRFS_ROOT_HAS_LOG_TREE, &root->state);
                clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
                root->log_start_pid = current->pid;
        }
@@ -195,6 +196,9 @@ static int join_running_log_trans(struct btrfs_root *root)
 {
        int ret = -ENOENT;
 
+       if (!test_bit(BTRFS_ROOT_HAS_LOG_TREE, &root->state))
+               return ret;
+
        mutex_lock(&root->log_mutex);
        if (root->log_root) {
                ret = 0;
@@ -3303,6 +3307,7 @@ int btrfs_free_log(struct btrfs_trans_handle *trans, struct btrfs_root *root)
        if (root->log_root) {
                free_log_tree(trans, root->log_root);
                root->log_root = NULL;
+               clear_bit(BTRFS_ROOT_HAS_LOG_TREE, &root->state);
        }
        return 0;
 }
index 7824f55..9b66c28 100644 (file)
@@ -144,22 +144,22 @@ static inline void z_erofs_onlinepage_init(struct page *page)
 static inline void z_erofs_onlinepage_fixup(struct page *page,
        uintptr_t index, bool down)
 {
-       unsigned long *p, o, v, id;
-repeat:
-       p = &page_private(page);
-       o = READ_ONCE(*p);
+       union z_erofs_onlinepage_converter u = { .v = &page_private(page) };
+       int orig, orig_index, val;
 
-       id = o >> Z_EROFS_ONLINEPAGE_INDEX_SHIFT;
-       if (id) {
+repeat:
+       orig = atomic_read(u.o);
+       orig_index = orig >> Z_EROFS_ONLINEPAGE_INDEX_SHIFT;
+       if (orig_index) {
                if (!index)
                        return;
 
-               DBG_BUGON(id != index);
+               DBG_BUGON(orig_index != index);
        }
 
-       v = (index << Z_EROFS_ONLINEPAGE_INDEX_SHIFT) |
-               ((o & Z_EROFS_ONLINEPAGE_COUNT_MASK) + (unsigned int)down);
-       if (cmpxchg(p, o, v) != o)
+       val = (index << Z_EROFS_ONLINEPAGE_INDEX_SHIFT) |
+               ((orig & Z_EROFS_ONLINEPAGE_COUNT_MASK) + (unsigned int)down);
+       if (atomic_cmpxchg(u.o, orig, val) != orig)
                goto repeat;
 }
 
index 656647f..6560350 100644 (file)
@@ -230,7 +230,7 @@ struct file *alloc_file_pseudo(struct inode *inode, struct vfsmount *mnt,
                d_set_d_op(path.dentry, &anon_ops);
        path.mnt = mntget(mnt);
        d_instantiate(path.dentry, inode);
-       file = alloc_file(&path, flags, fops);
+       file = alloc_file(&path, flags | FMODE_NONOTIFY, fops);
        if (IS_ERR(file)) {
                ihold(inode);
                path_put(&path);
index 152a0fc..751bc4d 100644 (file)
@@ -689,6 +689,12 @@ static void ocfs2_nfs_sync_lock_res_init(struct ocfs2_lock_res *res,
                                   &ocfs2_nfs_sync_lops, osb);
 }
 
+static void ocfs2_nfs_sync_lock_init(struct ocfs2_super *osb)
+{
+       ocfs2_nfs_sync_lock_res_init(&osb->osb_nfs_sync_lockres, osb);
+       init_rwsem(&osb->nfs_sync_rwlock);
+}
+
 void ocfs2_trim_fs_lock_res_init(struct ocfs2_super *osb)
 {
        struct ocfs2_lock_res *lockres = &osb->osb_trim_fs_lockres;
@@ -2855,6 +2861,11 @@ int ocfs2_nfs_sync_lock(struct ocfs2_super *osb, int ex)
        if (ocfs2_is_hard_readonly(osb))
                return -EROFS;
 
+       if (ex)
+               down_write(&osb->nfs_sync_rwlock);
+       else
+               down_read(&osb->nfs_sync_rwlock);
+
        if (ocfs2_mount_local(osb))
                return 0;
 
@@ -2873,6 +2884,10 @@ void ocfs2_nfs_sync_unlock(struct ocfs2_super *osb, int ex)
        if (!ocfs2_mount_local(osb))
                ocfs2_cluster_unlock(osb, lockres,
                                     ex ? LKM_EXMODE : LKM_PRMODE);
+       if (ex)
+               up_write(&osb->nfs_sync_rwlock);
+       else
+               up_read(&osb->nfs_sync_rwlock);
 }
 
 int ocfs2_trim_fs_lock(struct ocfs2_super *osb,
@@ -3340,7 +3355,7 @@ int ocfs2_dlm_init(struct ocfs2_super *osb)
 local:
        ocfs2_super_lock_res_init(&osb->osb_super_lockres, osb);
        ocfs2_rename_lock_res_init(&osb->osb_rename_lockres, osb);
-       ocfs2_nfs_sync_lock_res_init(&osb->osb_nfs_sync_lockres, osb);
+       ocfs2_nfs_sync_lock_init(osb);
        ocfs2_orphan_scan_lock_res_init(&osb->osb_orphan_scan.os_lockres, osb);
 
        osb->cconn = conn;
index ee5d985..2dd71d6 100644 (file)
@@ -395,6 +395,7 @@ struct ocfs2_super
        struct ocfs2_lock_res osb_super_lockres;
        struct ocfs2_lock_res osb_rename_lockres;
        struct ocfs2_lock_res osb_nfs_sync_lockres;
+       struct rw_semaphore nfs_sync_rwlock;
        struct ocfs2_lock_res osb_trim_fs_lockres;
        struct mutex obs_trim_fs_mutex;
        struct ocfs2_dlm_debug *osb_dlm_debug;
index 0dd8c41..19137c6 100644 (file)
 #define OCFS2_MAX_SLOTS                        255
 
 /* Slot map indicator for an empty slot */
-#define OCFS2_INVALID_SLOT             -1
+#define OCFS2_INVALID_SLOT             ((u16)-1)
 
 #define OCFS2_VOL_UUID_LEN             16
 #define OCFS2_MAX_VOL_LABEL_LEN                64
@@ -326,8 +326,8 @@ struct ocfs2_system_inode_info {
 enum {
        BAD_BLOCK_SYSTEM_INODE = 0,
        GLOBAL_INODE_ALLOC_SYSTEM_INODE,
+#define OCFS2_FIRST_ONLINE_SYSTEM_INODE GLOBAL_INODE_ALLOC_SYSTEM_INODE
        SLOT_MAP_SYSTEM_INODE,
-#define OCFS2_FIRST_ONLINE_SYSTEM_INODE SLOT_MAP_SYSTEM_INODE
        HEARTBEAT_SYSTEM_INODE,
        GLOBAL_BITMAP_SYSTEM_INODE,
        USER_QUOTA_SYSTEM_INODE,
index 4836bec..45745cc 100644 (file)
@@ -2825,9 +2825,12 @@ int ocfs2_test_inode_bit(struct ocfs2_super *osb, u64 blkno, int *res)
                goto bail;
        }
 
-       inode_alloc_inode =
-               ocfs2_get_system_file_inode(osb, INODE_ALLOC_SYSTEM_INODE,
-                                           suballoc_slot);
+       if (suballoc_slot == (u16)OCFS2_INVALID_SLOT)
+               inode_alloc_inode = ocfs2_get_system_file_inode(osb,
+                       GLOBAL_INODE_ALLOC_SYSTEM_INODE, suballoc_slot);
+       else
+               inode_alloc_inode = ocfs2_get_system_file_inode(osb,
+                       INODE_ALLOC_SYSTEM_INODE, suballoc_slot);
        if (!inode_alloc_inode) {
                /* the error code could be inaccurate, but we are not able to
                 * get the correct one. */
index 907fa5d..4a674db 100644 (file)
@@ -2,6 +2,11 @@
 #ifndef _ASM_GENERIC_CACHEFLUSH_H
 #define _ASM_GENERIC_CACHEFLUSH_H
 
+struct mm_struct;
+struct vm_area_struct;
+struct page;
+struct address_space;
+
 /*
  * The cache doesn't need to be flushed when TLB entries change when
  * the cache is mapped to physical memory, not virtual memory
index c230b4e..a3a568b 100644 (file)
@@ -48,6 +48,9 @@ struct host1x_client_ops {
  * @channel: host1x channel associated with this client
  * @syncpts: array of syncpoints requested for this client
  * @num_syncpts: number of syncpoints requested for this client
+ * @parent: pointer to parent structure
+ * @usecount: reference count for this structure
+ * @lock: mutex for mutually exclusive concurrency
  */
 struct host1x_client {
        struct list_head list;
index 116bd9b..ca1887d 100644 (file)
@@ -4283,7 +4283,8 @@ struct mlx5_ifc_rst2init_qp_out_bits {
 
        u8         syndrome[0x20];
 
-       u8         reserved_at_40[0x40];
+       u8         reserved_at_40[0x20];
+       u8         ece[0x20];
 };
 
 struct mlx5_ifc_rst2init_qp_in_bits {
@@ -4300,7 +4301,7 @@ struct mlx5_ifc_rst2init_qp_in_bits {
 
        u8         opt_param_mask[0x20];
 
-       u8         reserved_at_a0[0x20];
+       u8         ece[0x20];
 
        struct mlx5_ifc_qpc_bits qpc;
 
@@ -6619,7 +6620,8 @@ struct mlx5_ifc_init2init_qp_out_bits {
 
        u8         syndrome[0x20];
 
-       u8         reserved_at_40[0x40];
+       u8         reserved_at_40[0x20];
+       u8         ece[0x20];
 };
 
 struct mlx5_ifc_init2init_qp_in_bits {
@@ -6636,7 +6638,7 @@ struct mlx5_ifc_init2init_qp_in_bits {
 
        u8         opt_param_mask[0x20];
 
-       u8         reserved_at_a0[0x20];
+       u8         ece[0x20];
 
        struct mlx5_ifc_qpc_bits qpc;
 
index c4c37fd..f6f8849 100644 (file)
@@ -257,8 +257,8 @@ struct lruvec {
         */
        unsigned long                   anon_cost;
        unsigned long                   file_cost;
-       /* Evictions & activations on the inactive file list */
-       atomic_long_t                   inactive_age;
+       /* Non-resident age, driven by LRU movement */
+       atomic_long_t                   nonresident_age;
        /* Refaults at the time of last reclaim cycle */
        unsigned long                   refaults;
        /* Various lruvec state flags (enum lruvec_flags) */
index 6fc613e..39e28e1 100644 (file)
@@ -3157,7 +3157,7 @@ static inline int dev_recursion_level(void)
        return this_cpu_read(softnet_data.xmit.recursion);
 }
 
-#define XMIT_RECURSION_LIMIT   10
+#define XMIT_RECURSION_LIMIT   8
 static inline bool dev_xmit_recursion(void)
 {
        return unlikely(__this_cpu_read(softnet_data.xmit.recursion) >
index b394bd4..c4676d6 100644 (file)
 int ipt_register_table(struct net *net, const struct xt_table *table,
                       const struct ipt_replace *repl,
                       const struct nf_hook_ops *ops, struct xt_table **res);
+
+void ipt_unregister_table_pre_exit(struct net *net, struct xt_table *table,
+                      const struct nf_hook_ops *ops);
+
+void ipt_unregister_table_exit(struct net *net, struct xt_table *table);
+
 void ipt_unregister_table(struct net *net, struct xt_table *table,
                          const struct nf_hook_ops *ops);
 
index 8225f78..1547d5f 100644 (file)
@@ -29,6 +29,9 @@ int ip6t_register_table(struct net *net, const struct xt_table *table,
                        const struct nf_hook_ops *ops, struct xt_table **res);
 void ip6t_unregister_table(struct net *net, struct xt_table *table,
                           const struct nf_hook_ops *ops);
+void ip6t_unregister_table_pre_exit(struct net *net, struct xt_table *table,
+                                   const struct nf_hook_ops *ops);
+void ip6t_unregister_table_exit(struct net *net, struct xt_table *table);
 extern unsigned int ip6t_do_table(struct sk_buff *skb,
                                  const struct nf_hook_state *state,
                                  struct xt_table *table);
index 8c05d0f..b693b60 100644 (file)
@@ -1416,6 +1416,7 @@ int phy_ethtool_ksettings_set(struct phy_device *phydev,
 int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd);
 int phy_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
 int phy_do_ioctl_running(struct net_device *dev, struct ifreq *ifr, int cmd);
+int phy_disable_interrupts(struct phy_device *phydev);
 void phy_request_interrupt(struct phy_device *phydev);
 void phy_free_interrupt(struct phy_device *phydev);
 void phy_print_status(struct phy_device *phydev);
index 733fad7..6d15040 100644 (file)
@@ -207,28 +207,34 @@ static inline u32 qed_chain_get_cons_idx_u32(struct qed_chain *p_chain)
 
 static inline u16 qed_chain_get_elem_left(struct qed_chain *p_chain)
 {
+       u16 elem_per_page = p_chain->elem_per_page;
+       u32 prod = p_chain->u.chain16.prod_idx;
+       u32 cons = p_chain->u.chain16.cons_idx;
        u16 used;
 
-       used = (u16) (((u32)0x10000 +
-                      (u32)p_chain->u.chain16.prod_idx) -
-                     (u32)p_chain->u.chain16.cons_idx);
+       if (prod < cons)
+               prod += (u32)U16_MAX + 1;
+
+       used = (u16)(prod - cons);
        if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR)
-               used -= p_chain->u.chain16.prod_idx / p_chain->elem_per_page -
-                   p_chain->u.chain16.cons_idx / p_chain->elem_per_page;
+               used -= prod / elem_per_page - cons / elem_per_page;
 
        return (u16)(p_chain->capacity - used);
 }
 
 static inline u32 qed_chain_get_elem_left_u32(struct qed_chain *p_chain)
 {
+       u16 elem_per_page = p_chain->elem_per_page;
+       u64 prod = p_chain->u.chain32.prod_idx;
+       u64 cons = p_chain->u.chain32.cons_idx;
        u32 used;
 
-       used = (u32) (((u64)0x100000000ULL +
-                      (u64)p_chain->u.chain32.prod_idx) -
-                     (u64)p_chain->u.chain32.cons_idx);
+       if (prod < cons)
+               prod += (u64)U32_MAX + 1;
+
+       used = (u32)(prod - cons);
        if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR)
-               used -= p_chain->u.chain32.prod_idx / p_chain->elem_per_page -
-                   p_chain->u.chain32.cons_idx / p_chain->elem_per_page;
+               used -= (u32)(prod / elem_per_page - cons / elem_per_page);
 
        return p_chain->capacity - used;
 }
index 4c5974b..5b3216b 100644 (file)
@@ -313,6 +313,7 @@ struct vma_swap_readahead {
 };
 
 /* linux/mm/workingset.c */
+void workingset_age_nonresident(struct lruvec *lruvec, unsigned long nr_pages);
 void *workingset_eviction(struct page *page, struct mem_cgroup *target_memcg);
 void workingset_refault(struct page *page, void *shadow);
 void workingset_activation(struct page *page);
index 48bb681..0221f85 100644 (file)
@@ -106,7 +106,6 @@ extern void *vzalloc(unsigned long size);
 extern void *vmalloc_user(unsigned long size);
 extern void *vmalloc_node(unsigned long size, int node);
 extern void *vzalloc_node(unsigned long size, int node);
-extern void *vmalloc_exec(unsigned long size);
 extern void *vmalloc_32(unsigned long size);
 extern void *vmalloc_32_user(unsigned long size);
 extern void *__vmalloc(unsigned long size, gfp_t gfp_mask);
index f2c8311..6315324 100644 (file)
@@ -450,6 +450,7 @@ struct flow_block_indr {
        struct net_device               *dev;
        enum flow_block_binder_type     binder_type;
        void                            *data;
+       void                            *cb_priv;
        void                            (*cleanup)(struct flow_block_cb *block_cb);
 };
 
@@ -467,6 +468,13 @@ struct flow_block_cb {
 struct flow_block_cb *flow_block_cb_alloc(flow_setup_cb_t *cb,
                                          void *cb_ident, void *cb_priv,
                                          void (*release)(void *cb_priv));
+struct flow_block_cb *flow_indr_block_cb_alloc(flow_setup_cb_t *cb,
+                                              void *cb_ident, void *cb_priv,
+                                              void (*release)(void *cb_priv),
+                                              struct flow_block_offload *bo,
+                                              struct net_device *dev, void *data,
+                                              void *indr_cb_priv,
+                                              void (*cleanup)(struct flow_block_cb *block_cb));
 void flow_block_cb_free(struct flow_block_cb *block_cb);
 
 struct flow_block_cb *flow_block_cb_lookup(struct flow_block *block,
@@ -488,6 +496,13 @@ static inline void flow_block_cb_remove(struct flow_block_cb *block_cb,
        list_move(&block_cb->list, &offload->cb_list);
 }
 
+static inline void flow_indr_block_cb_remove(struct flow_block_cb *block_cb,
+                                            struct flow_block_offload *offload)
+{
+       list_del(&block_cb->indr.list);
+       list_move(&block_cb->list, &offload->cb_list);
+}
+
 bool flow_block_cb_is_busy(flow_setup_cb_t *cb, void *cb_ident,
                           struct list_head *driver_block_list);
 
@@ -532,11 +547,13 @@ static inline void flow_block_init(struct flow_block *flow_block)
 }
 
 typedef int flow_indr_block_bind_cb_t(struct net_device *dev, void *cb_priv,
-                                     enum tc_setup_type type, void *type_data);
+                                     enum tc_setup_type type, void *type_data,
+                                     void *data,
+                                     void (*cleanup)(struct flow_block_cb *block_cb));
 
 int flow_indr_dev_register(flow_indr_block_bind_cb_t *cb, void *cb_priv);
 void flow_indr_dev_unregister(flow_indr_block_bind_cb_t *cb, void *cb_priv,
-                             flow_setup_cb_t *setup_cb);
+                             void (*release)(void *cb_priv));
 int flow_indr_dev_setup_offload(struct net_device *dev,
                                enum tc_setup_type type, void *data,
                                struct flow_block_offload *bo,
index 3a6595b..e42402f 100644 (file)
@@ -21,7 +21,7 @@
  * |                                                               |
  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  *
- * C bit indicates contol message when set, data message when unset.
+ * C bit indicates control message when set, data message when unset.
  * For a control message, proto/ctype is interpreted as a type of
  * control message. For data messages, proto/ctype is the IP protocol
  * of the next header.
index 15b4d9a..122d9e2 100644 (file)
@@ -353,11 +353,13 @@ enum {
         ipv4_is_anycast_6to4(a))
 
 /* Flags used for the bind address copy functions.  */
-#define SCTP_ADDR6_ALLOWED     0x00000001      /* IPv6 address is allowed by
+#define SCTP_ADDR4_ALLOWED     0x00000001      /* IPv4 address is allowed by
                                                   local sock family */
-#define SCTP_ADDR4_PEERSUPP    0x00000002      /* IPv4 address is supported by
+#define SCTP_ADDR6_ALLOWED     0x00000002      /* IPv6 address is allowed by
+                                                  local sock family */
+#define SCTP_ADDR4_PEERSUPP    0x00000004      /* IPv4 address is supported by
                                                   peer */
-#define SCTP_ADDR6_PEERSUPP    0x00000004      /* IPv6 address is supported by
+#define SCTP_ADDR6_PEERSUPP    0x00000008      /* IPv6 address is supported by
                                                   peer */
 
 /* Reasons to retransmit. */
index c53cc42..3428619 100644 (file)
@@ -1848,7 +1848,6 @@ static inline int sk_rx_queue_get(const struct sock *sk)
 
 static inline void sk_set_socket(struct sock *sk, struct socket *sock)
 {
-       sk_tx_queue_clear(sk);
        sk->sk_socket = sock;
 }
 
index 094fe68..c7d213c 100644 (file)
@@ -1008,6 +1008,7 @@ struct xfrm_offload {
 #define        XFRM_GRO                32
 #define        XFRM_ESP_NO_TRAILER     64
 #define        XFRM_DEV_RESUME         128
+#define        XFRM_XMIT               256
 
        __u32                   status;
 #define CRYPTO_SUCCESS                         1
index b652206..8c5e381 100644 (file)
@@ -161,4 +161,15 @@ int snd_dmaengine_pcm_prepare_slave_config(struct snd_pcm_substream *substream,
 
 #define SND_DMAENGINE_PCM_DRV_NAME "snd_dmaengine_pcm"
 
+struct dmaengine_pcm {
+       struct dma_chan *chan[SNDRV_PCM_STREAM_LAST + 1];
+       const struct snd_dmaengine_pcm_config *config;
+       struct snd_soc_component component;
+       unsigned int flags;
+};
+
+static inline struct dmaengine_pcm *soc_component_to_pcm(struct snd_soc_component *p)
+{
+       return container_of(p, struct dmaengine_pcm, component);
+}
 #endif
index ef5dd28..2756f9b 100644 (file)
@@ -444,6 +444,8 @@ int devm_snd_soc_register_component(struct device *dev,
                         const struct snd_soc_component_driver *component_driver,
                         struct snd_soc_dai_driver *dai_drv, int num_dai);
 void snd_soc_unregister_component(struct device *dev);
+struct snd_soc_component *snd_soc_lookup_component_nolocked(struct device *dev,
+                                                           const char *driver_name);
 struct snd_soc_component *snd_soc_lookup_component(struct device *dev,
                                                   const char *driver_name);
 
@@ -1361,6 +1363,10 @@ void snd_soc_remove_pcm_runtime(struct snd_soc_card *card,
 struct snd_soc_dai *snd_soc_register_dai(struct snd_soc_component *component,
                                         struct snd_soc_dai_driver *dai_drv,
                                         bool legacy_dai_naming);
+struct snd_soc_dai *devm_snd_soc_register_dai(struct device *dev,
+                                             struct snd_soc_component *component,
+                                             struct snd_soc_dai_driver *dai_drv,
+                                             bool legacy_dai_naming);
 void snd_soc_unregister_dai(struct snd_soc_dai *dai);
 
 struct snd_soc_dai *snd_soc_find_dai(
index ba9efdc..059b6e4 100644 (file)
@@ -400,7 +400,7 @@ enum rxrpc_tx_point {
        EM(rxrpc_cong_begin_retransmission,     " Retrans") \
        EM(rxrpc_cong_cleared_nacks,            " Cleared") \
        EM(rxrpc_cong_new_low_nack,             " NewLowN") \
-       EM(rxrpc_cong_no_change,                "") \
+       EM(rxrpc_cong_no_change,                " -") \
        EM(rxrpc_cong_progress,                 " Progres") \
        EM(rxrpc_cong_retransmit_again,         " ReTxAgn") \
        EM(rxrpc_cong_rtt_window_end,           " RttWinE") \
index 1968481..974a713 100644 (file)
@@ -3168,7 +3168,7 @@ union bpf_attr {
  *     Return
  *             The id is returned or 0 in case the id could not be retrieved.
  *
- * void *bpf_ringbuf_output(void *ringbuf, void *data, u64 size, u64 flags)
+ * int bpf_ringbuf_output(void *ringbuf, void *data, u64 size, u64 flags)
  *     Description
  *             Copy *size* bytes from *data* into a ring buffer *ringbuf*.
  *             If BPF_RB_NO_WAKEUP is specified in *flags*, no notification of
index b6aac7e..4c14e8b 100644 (file)
@@ -205,6 +205,7 @@ struct fb_bitfield {
 #define FB_ACTIVATE_ALL               64       /* change all VCs on this fb    */
 #define FB_ACTIVATE_FORCE     128      /* force apply even when no change*/
 #define FB_ACTIVATE_INV_MODE  256       /* invalidate videomode */
+#define FB_ACTIVATE_KD_TEXT   512       /* for KDSET vt ioctl */
 
 #define FB_ACCELF_TEXT         1       /* (OBSOLETE) see fb_info.flags and vc_mode */
 
index 84f15f4..bee3665 100644 (file)
@@ -36,7 +36,6 @@ enum br_mrp_port_state_type {
 enum br_mrp_port_role_type {
        BR_MRP_PORT_ROLE_PRIMARY,
        BR_MRP_PORT_ROLE_SECONDARY,
-       BR_MRP_PORT_ROLE_NONE,
 };
 
 enum br_mrp_tlv_header_type {
index cba368e..c21edb9 100644 (file)
 
 /* supported values for SO_RDS_TRANSPORT */
 #define        RDS_TRANS_IB    0
-#define        RDS_TRANS_IWARP 1
+#define        RDS_TRANS_GAP   1
 #define        RDS_TRANS_TCP   2
 #define RDS_TRANS_COUNT        3
 #define        RDS_TRANS_NONE  (~0)
+/* don't use RDS_TRANS_IWARP - it is deprecated */
+#define RDS_TRANS_IWARP RDS_TRANS_GAP
 
 /* IOCTLS commands for SOL_RDS */
 #define SIOCRDSSETTOS          (SIOCPROTOPRIVATE)
index ee0f246..d56427c 100644 (file)
 #define SPI_TX_QUAD            0x200
 #define SPI_RX_DUAL            0x400
 #define SPI_RX_QUAD            0x800
+#define SPI_CS_WORD            0x1000
+#define SPI_TX_OCTAL           0x2000
+#define SPI_RX_OCTAL           0x4000
+#define SPI_3WIRE_HIZ          0x8000
 
 /*---------------------------------------------------------------------------*/
 
index 4d76f16..ac53102 100644 (file)
@@ -1276,16 +1276,23 @@ static bool __cgroup_bpf_prog_array_is_empty(struct cgroup *cgrp,
 
 static int sockopt_alloc_buf(struct bpf_sockopt_kern *ctx, int max_optlen)
 {
-       if (unlikely(max_optlen > PAGE_SIZE) || max_optlen < 0)
+       if (unlikely(max_optlen < 0))
                return -EINVAL;
 
+       if (unlikely(max_optlen > PAGE_SIZE)) {
+               /* We don't expose optvals that are greater than PAGE_SIZE
+                * to the BPF program.
+                */
+               max_optlen = PAGE_SIZE;
+       }
+
        ctx->optval = kzalloc(max_optlen, GFP_USER);
        if (!ctx->optval)
                return -ENOMEM;
 
        ctx->optval_end = ctx->optval + max_optlen;
 
-       return 0;
+       return max_optlen;
 }
 
 static void sockopt_free_buf(struct bpf_sockopt_kern *ctx)
@@ -1319,13 +1326,13 @@ int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level,
         */
        max_optlen = max_t(int, 16, *optlen);
 
-       ret = sockopt_alloc_buf(&ctx, max_optlen);
-       if (ret)
-               return ret;
+       max_optlen = sockopt_alloc_buf(&ctx, max_optlen);
+       if (max_optlen < 0)
+               return max_optlen;
 
        ctx.optlen = *optlen;
 
-       if (copy_from_user(ctx.optval, optval, *optlen) != 0) {
+       if (copy_from_user(ctx.optval, optval, min(*optlen, max_optlen)) != 0) {
                ret = -EFAULT;
                goto out;
        }
@@ -1353,8 +1360,14 @@ int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level,
                /* export any potential modifications */
                *level = ctx.level;
                *optname = ctx.optname;
-               *optlen = ctx.optlen;
-               *kernel_optval = ctx.optval;
+
+               /* optlen == 0 from BPF indicates that we should
+                * use original userspace data.
+                */
+               if (ctx.optlen != 0) {
+                       *optlen = ctx.optlen;
+                       *kernel_optval = ctx.optval;
+               }
        }
 
 out:
@@ -1385,12 +1398,12 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
            __cgroup_bpf_prog_array_is_empty(cgrp, BPF_CGROUP_GETSOCKOPT))
                return retval;
 
-       ret = sockopt_alloc_buf(&ctx, max_optlen);
-       if (ret)
-               return ret;
-
        ctx.optlen = max_optlen;
 
+       max_optlen = sockopt_alloc_buf(&ctx, max_optlen);
+       if (max_optlen < 0)
+               return max_optlen;
+
        if (!retval) {
                /* If kernel getsockopt finished successfully,
                 * copy whatever was returned to the user back
@@ -1404,10 +1417,8 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
                        goto out;
                }
 
-               if (ctx.optlen > max_optlen)
-                       ctx.optlen = max_optlen;
-
-               if (copy_from_user(ctx.optval, optval, ctx.optlen) != 0) {
+               if (copy_from_user(ctx.optval, optval,
+                                  min(ctx.optlen, max_optlen)) != 0) {
                        ret = -EFAULT;
                        goto out;
                }
@@ -1436,10 +1447,12 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
                goto out;
        }
 
-       if (copy_to_user(optval, ctx.optval, ctx.optlen) ||
-           put_user(ctx.optlen, optlen)) {
-               ret = -EFAULT;
-               goto out;
+       if (ctx.optlen != 0) {
+               if (copy_to_user(optval, ctx.optval, ctx.optlen) ||
+                   put_user(ctx.optlen, optlen)) {
+                       ret = -EFAULT;
+                       goto out;
+               }
        }
 
        ret = ctx.retval;
index 0cbb72c..5fdbc77 100644 (file)
@@ -86,12 +86,13 @@ static DEFINE_PER_CPU(struct list_head, dev_flush_list);
 static DEFINE_SPINLOCK(dev_map_lock);
 static LIST_HEAD(dev_map_list);
 
-static struct hlist_head *dev_map_create_hash(unsigned int entries)
+static struct hlist_head *dev_map_create_hash(unsigned int entries,
+                                             int numa_node)
 {
        int i;
        struct hlist_head *hash;
 
-       hash = kmalloc_array(entries, sizeof(*hash), GFP_KERNEL);
+       hash = bpf_map_area_alloc(entries * sizeof(*hash), numa_node);
        if (hash != NULL)
                for (i = 0; i < entries; i++)
                        INIT_HLIST_HEAD(&hash[i]);
@@ -145,7 +146,8 @@ static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr)
                return -EINVAL;
 
        if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
-               dtab->dev_index_head = dev_map_create_hash(dtab->n_buckets);
+               dtab->dev_index_head = dev_map_create_hash(dtab->n_buckets,
+                                                          dtab->map.numa_node);
                if (!dtab->dev_index_head)
                        goto free_charge;
 
@@ -232,7 +234,7 @@ static void dev_map_free(struct bpf_map *map)
                        }
                }
 
-               kfree(dtab->dev_index_head);
+               bpf_map_area_free(dtab->dev_index_head);
        } else {
                for (i = 0; i < dtab->map.max_entries; i++) {
                        struct bpf_dtab_netdev *dev;
index bb05fd5..09cc78d 100644 (file)
@@ -181,34 +181,19 @@ void kimage_file_post_load_cleanup(struct kimage *image)
 static int
 kimage_validate_signature(struct kimage *image)
 {
-       const char *reason;
        int ret;
 
        ret = arch_kexec_kernel_verify_sig(image, image->kernel_buf,
                                           image->kernel_buf_len);
-       switch (ret) {
-       case 0:
-               break;
+       if (ret) {
 
-               /* Certain verification errors are non-fatal if we're not
-                * checking errors, provided we aren't mandating that there
-                * must be a valid signature.
-                */
-       case -ENODATA:
-               reason = "kexec of unsigned image";
-               goto decide;
-       case -ENOPKG:
-               reason = "kexec of image with unsupported crypto";
-               goto decide;
-       case -ENOKEY:
-               reason = "kexec of image with unavailable key";
-       decide:
                if (IS_ENABLED(CONFIG_KEXEC_SIG_FORCE)) {
-                       pr_notice("%s rejected\n", reason);
+                       pr_notice("Enforced kernel signature verification failed (%d).\n", ret);
                        return ret;
                }
 
-               /* If IMA is guaranteed to appraise a signature on the kexec
+               /*
+                * If IMA is guaranteed to appraise a signature on the kexec
                 * image, permit it even if the kernel is otherwise locked
                 * down.
                 */
@@ -216,17 +201,10 @@ kimage_validate_signature(struct kimage *image)
                    security_locked_down(LOCKDOWN_KEXEC))
                        return -EPERM;
 
-               return 0;
-
-               /* All other errors are fatal, including nomem, unparseable
-                * signatures and signature check failures - even if signatures
-                * aren't required.
-                */
-       default:
-               pr_notice("kernel signature verification failed (%d).\n", ret);
+               pr_debug("kernel signature verification failed (%d).\n", ret);
        }
 
-       return ret;
+       return 0;
 }
 #endif
 
index e8a1985..0c6573b 100644 (file)
@@ -2783,7 +2783,9 @@ static void dynamic_debug_remove(struct module *mod, struct _ddebug *debug)
 
 void * __weak module_alloc(unsigned long size)
 {
-       return vmalloc_exec(size);
+       return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
+                       GFP_KERNEL, PAGE_KERNEL_EXEC, VM_FLUSH_RESET_PERMS,
+                       NUMA_NO_NODE, __func__);
 }
 
 bool __weak module_init_section(const char *name)
index b03df67..cd35663 100644 (file)
@@ -531,7 +531,7 @@ SYSCALL_DEFINE2(setns, int, fd, int, flags)
        } else if (!IS_ERR(pidfd_pid(file))) {
                err = check_setns_flags(flags);
        } else {
-               err = -EBADF;
+               err = -EINVAL;
        }
        if (err)
                goto out;
index 8c14835..b71eaf5 100644 (file)
@@ -974,16 +974,6 @@ static loff_t devkmsg_llseek(struct file *file, loff_t offset, int whence)
                user->idx = log_next_idx;
                user->seq = log_next_seq;
                break;
-       case SEEK_CUR:
-               /*
-                * It isn't supported due to the record nature of this
-                * interface: _SET _DATA and _END point to very specific
-                * record positions, while _CUR would be more useful in case
-                * of a byte-based log. Because of that, return the default
-                * errno value for invalid seek operation.
-                */
-               ret = -ESPIPE;
-               break;
        default:
                ret = -EINVAL;
        }
index dc05626..7bc3d61 100644 (file)
@@ -241,7 +241,7 @@ bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr)
        if (unlikely(ret < 0))
                goto fail;
 
-       return 0;
+       return ret;
 fail:
        memset(dst, 0, size);
        return ret;
index b8e1ca4..00867ff 100644 (file)
@@ -2427,7 +2427,7 @@ rb_update_event(struct ring_buffer_per_cpu *cpu_buffer,
        if (unlikely(info->add_timestamp)) {
                bool abs = ring_buffer_time_stamp_abs(cpu_buffer->buffer);
 
-               event = rb_add_time_stamp(event, info->delta, abs);
+               event = rb_add_time_stamp(event, abs ? info->delta : delta, abs);
                length -= RB_LEN_TIME_EXTEND;
                delta = 0;
        }
index 9de29bb..fa0fc08 100644 (file)
@@ -101,12 +101,16 @@ trace_boot_add_kprobe_event(struct xbc_node *node, const char *event)
                kprobe_event_cmd_init(&cmd, buf, MAX_BUF_LEN);
 
                ret = kprobe_event_gen_cmd_start(&cmd, event, val);
-               if (ret)
+               if (ret) {
+                       pr_err("Failed to generate probe: %s\n", buf);
                        break;
+               }
 
                ret = kprobe_event_gen_cmd_end(&cmd);
-               if (ret)
+               if (ret) {
                        pr_err("Failed to add probe: %s\n", buf);
+                       break;
+               }
        }
 
        return ret;
@@ -120,7 +124,7 @@ trace_boot_add_kprobe_event(struct xbc_node *node, const char *event)
 }
 #endif
 
-#ifdef CONFIG_HIST_TRIGGERS
+#ifdef CONFIG_SYNTH_EVENTS
 static int __init
 trace_boot_add_synth_event(struct xbc_node *node, const char *event)
 {
index 3a74736..f725802 100644 (file)
@@ -216,11 +216,17 @@ static int event_trigger_regex_open(struct inode *inode, struct file *file)
 
 int trigger_process_regex(struct trace_event_file *file, char *buff)
 {
-       char *command, *next = buff;
+       char *command, *next;
        struct event_command *p;
        int ret = -EINVAL;
 
+       next = buff = skip_spaces(buff);
        command = strsep(&next, ": \t");
+       if (next) {
+               next = skip_spaces(next);
+               if (!*next)
+                       next = NULL;
+       }
        command = (command[0] != '!') ? command : command + 1;
 
        mutex_lock(&trigger_cmd_mutex);
@@ -630,8 +636,14 @@ event_trigger_callback(struct event_command *cmd_ops,
        int ret;
 
        /* separate the trigger from the filter (t:n [if filter]) */
-       if (param && isdigit(param[0]))
+       if (param && isdigit(param[0])) {
                trigger = strsep(&param, " \t");
+               if (param) {
+                       param = skip_spaces(param);
+                       if (!*param)
+                               param = NULL;
+               }
+       }
 
        trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
 
@@ -1368,6 +1380,11 @@ int event_enable_trigger_func(struct event_command *cmd_ops,
        trigger = strsep(&param, " \t");
        if (!trigger)
                return -EINVAL;
+       if (param) {
+               param = skip_spaces(param);
+               if (!*param)
+                       param = NULL;
+       }
 
        system = strsep(&trigger, ":");
        if (!trigger)
index 2852828..a2a8226 100644 (file)
@@ -520,8 +520,7 @@ static bool dmirror_allocate_chunk(struct dmirror_device *mdevice,
 err_free:
        kfree(devmem);
 err_release:
-       release_mem_region(devmem->pagemap.res.start,
-                          resource_size(&devmem->pagemap.res));
+       release_mem_region(res->start, resource_size(res));
 err:
        mutex_unlock(&mdevice->devmem_lock);
        return false;
index fd988b7..8637560 100644 (file)
@@ -2316,15 +2316,26 @@ static enum compact_result compact_zone_order(struct zone *zone, int order,
                .page = NULL,
        };
 
-       current->capture_control = &capc;
+       /*
+        * Make sure the structs are really initialized before we expose the
+        * capture control, in case we are interrupted and the interrupt handler
+        * frees a page.
+        */
+       barrier();
+       WRITE_ONCE(current->capture_control, &capc);
 
        ret = compact_zone(&cc, &capc);
 
        VM_BUG_ON(!list_empty(&cc.freepages));
        VM_BUG_ON(!list_empty(&cc.migratepages));
 
-       *capture = capc.page;
-       current->capture_control = NULL;
+       /*
+        * Make sure we hide capture control first before we read the captured
+        * page pointer, otherwise an interrupt could free and capture a page
+        * and we would leak it.
+        */
+       WRITE_ONCE(current->capture_control, NULL);
+       *capture = READ_ONCE(capc.page);
 
        return ret;
 }
index e456230..61ab16f 100644 (file)
@@ -246,13 +246,13 @@ static void __init pgd_populate_tests(struct mm_struct *mm, pgd_t *pgdp,
 static void __init pte_clear_tests(struct mm_struct *mm, pte_t *ptep,
                                   unsigned long vaddr)
 {
-       pte_t pte = READ_ONCE(*ptep);
+       pte_t pte = ptep_get(ptep);
 
        pte = __pte(pte_val(pte) | RANDOM_ORVALUE);
        set_pte_at(mm, vaddr, ptep, pte);
        barrier();
        pte_clear(mm, vaddr, ptep);
-       pte = READ_ONCE(*ptep);
+       pte = ptep_get(ptep);
        WARN_ON(!pte_none(pte));
 }
 
index 0b38b6a..1962232 100644 (file)
@@ -2772,8 +2772,10 @@ static void memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
                return;
 
        cw = kmalloc(sizeof(*cw), GFP_NOWAIT | __GFP_NOWARN);
-       if (!cw)
+       if (!cw) {
+               css_put(&memcg->css);
                return;
+       }
 
        cw->memcg = memcg;
        cw->cachep = cachep;
@@ -6360,11 +6362,16 @@ static unsigned long effective_protection(unsigned long usage,
         * We're using unprotected memory for the weight so that if
         * some cgroups DO claim explicit protection, we don't protect
         * the same bytes twice.
+        *
+        * Check both usage and parent_usage against the respective
+        * protected values. One should imply the other, but they
+        * aren't read atomically - make sure the division is sane.
         */
        if (!(cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_RECURSIVE_PROT))
                return ep;
-
-       if (parent_effective > siblings_protected && usage > protected) {
+       if (parent_effective > siblings_protected &&
+           parent_usage > siblings_protected &&
+           usage > protected) {
                unsigned long unclaimed;
 
                unclaimed = parent_effective - siblings_protected;
@@ -6416,7 +6423,7 @@ enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root,
 
        if (parent == root) {
                memcg->memory.emin = READ_ONCE(memcg->memory.min);
-               memcg->memory.elow = memcg->memory.low;
+               memcg->memory.elow = READ_ONCE(memcg->memory.low);
                goto out;
        }
 
@@ -6428,7 +6435,8 @@ enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root,
                        atomic_long_read(&parent->memory.children_min_usage)));
 
        WRITE_ONCE(memcg->memory.elow, effective_protection(usage, parent_usage,
-                       memcg->memory.low, READ_ONCE(parent->memory.elow),
+                       READ_ONCE(memcg->memory.low),
+                       READ_ONCE(parent->memory.elow),
                        atomic_long_read(&parent->memory.children_low_usage)));
 
 out:
index dc7f354..87ec87c 100644 (file)
@@ -1498,7 +1498,7 @@ out:
 }
 
 #ifdef pte_index
-static int insert_page_in_batch_locked(struct mm_struct *mm, pmd_t *pmd,
+static int insert_page_in_batch_locked(struct mm_struct *mm, pte_t *pte,
                        unsigned long addr, struct page *page, pgprot_t prot)
 {
        int err;
@@ -1506,8 +1506,9 @@ static int insert_page_in_batch_locked(struct mm_struct *mm, pmd_t *pmd,
        if (!page_count(page))
                return -EINVAL;
        err = validate_page_before_insert(page);
-       return err ? err : insert_page_into_pte_locked(
-               mm, pte_offset_map(pmd, addr), addr, page, prot);
+       if (err)
+               return err;
+       return insert_page_into_pte_locked(mm, pte, addr, page, prot);
 }
 
 /* insert_pages() amortizes the cost of spinlock operations
@@ -1517,7 +1518,8 @@ static int insert_pages(struct vm_area_struct *vma, unsigned long addr,
                        struct page **pages, unsigned long *num, pgprot_t prot)
 {
        pmd_t *pmd = NULL;
-       spinlock_t *pte_lock = NULL;
+       pte_t *start_pte, *pte;
+       spinlock_t *pte_lock;
        struct mm_struct *const mm = vma->vm_mm;
        unsigned long curr_page_idx = 0;
        unsigned long remaining_pages_total = *num;
@@ -1536,18 +1538,17 @@ more:
        ret = -ENOMEM;
        if (pte_alloc(mm, pmd))
                goto out;
-       pte_lock = pte_lockptr(mm, pmd);
 
        while (pages_to_write_in_pmd) {
                int pte_idx = 0;
                const int batch_size = min_t(int, pages_to_write_in_pmd, 8);
 
-               spin_lock(pte_lock);
-               for (; pte_idx < batch_size; ++pte_idx) {
-                       int err = insert_page_in_batch_locked(mm, pmd,
+               start_pte = pte_offset_map_lock(mm, pmd, addr, &pte_lock);
+               for (pte = start_pte; pte_idx < batch_size; ++pte, ++pte_idx) {
+                       int err = insert_page_in_batch_locked(mm, pte,
                                addr, pages[curr_page_idx], prot);
                        if (unlikely(err)) {
-                               spin_unlock(pte_lock);
+                               pte_unmap_unlock(start_pte, pte_lock);
                                ret = err;
                                remaining_pages_total -= pte_idx;
                                goto out;
@@ -1555,7 +1556,7 @@ more:
                        addr += PAGE_SIZE;
                        ++curr_page_idx;
                }
-               spin_unlock(pte_lock);
+               pte_unmap_unlock(start_pte, pte_lock);
                pages_to_write_in_pmd -= batch_size;
                remaining_pages_total -= batch_size;
        }
@@ -3140,8 +3141,18 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
                                err = mem_cgroup_charge(page, vma->vm_mm,
                                                        GFP_KERNEL);
                                ClearPageSwapCache(page);
-                               if (err)
+                               if (err) {
+                                       ret = VM_FAULT_OOM;
                                        goto out_page;
+                               }
+
+                               /*
+                                * XXX: Move to lru_cache_add() when it
+                                * supports new vs putback
+                                */
+                               spin_lock_irq(&page_pgdat(page)->lru_lock);
+                               lru_note_cost_page(page);
+                               spin_unlock_irq(&page_pgdat(page)->lru_lock);
 
                                lru_cache_add(page);
                                swap_readpage(page, true);
index 9b34e03..da374cd 100644 (file)
@@ -471,11 +471,20 @@ void __ref remove_pfn_range_from_zone(struct zone *zone,
                                      unsigned long start_pfn,
                                      unsigned long nr_pages)
 {
+       const unsigned long end_pfn = start_pfn + nr_pages;
        struct pglist_data *pgdat = zone->zone_pgdat;
-       unsigned long flags;
+       unsigned long pfn, cur_nr_pages, flags;
 
        /* Poison struct pages because they are now uninitialized again. */
-       page_init_poison(pfn_to_page(start_pfn), sizeof(struct page) * nr_pages);
+       for (pfn = start_pfn; pfn < end_pfn; pfn += cur_nr_pages) {
+               cond_resched();
+
+               /* Select all remaining pages up to the next section boundary */
+               cur_nr_pages =
+                       min(end_pfn - pfn, SECTION_ALIGN_UP(pfn + 1) - pfn);
+               page_init_poison(pfn_to_page(pfn),
+                                sizeof(struct page) * cur_nr_pages);
+       }
 
 #ifdef CONFIG_ZONE_DEVICE
        /*
index cdcad5d..f32a690 100644 (file)
@@ -291,23 +291,6 @@ void *vzalloc_node(unsigned long size, int node)
 EXPORT_SYMBOL(vzalloc_node);
 
 /**
- *     vmalloc_exec  -  allocate virtually contiguous, executable memory
- *     @size:          allocation size
- *
- *     Kernel-internal function to allocate enough pages to cover @size
- *     the page level allocator and map them into contiguous and
- *     executable kernel virtual space.
- *
- *     For tight control over page level allocator and protection flags
- *     use __vmalloc() instead.
- */
-
-void *vmalloc_exec(unsigned long size)
-{
-       return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM);
-}
-
-/**
  * vmalloc_32  -  allocate virtually contiguous memory (32bit addressable)
  *     @size:          allocation size
  *
index 207c83e..74f7e09 100644 (file)
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -348,7 +348,7 @@ static __always_inline int memcg_charge_slab(struct page *page,
                                             gfp_t gfp, int order,
                                             struct kmem_cache *s)
 {
-       unsigned int nr_pages = 1 << order;
+       int nr_pages = 1 << order;
        struct mem_cgroup *memcg;
        struct lruvec *lruvec;
        int ret;
@@ -388,7 +388,7 @@ out:
 static __always_inline void memcg_uncharge_slab(struct page *page, int order,
                                                struct kmem_cache *s)
 {
-       unsigned int nr_pages = 1 << order;
+       int nr_pages = 1 << order;
        struct mem_cgroup *memcg;
        struct lruvec *lruvec;
 
index 9e72ba2..37d48a5 100644 (file)
@@ -1726,7 +1726,7 @@ void kzfree(const void *p)
        if (unlikely(ZERO_OR_NULL_PTR(mem)))
                return;
        ks = ksize(mem);
-       memset(mem, 0, ks);
+       memzero_explicit(mem, ks);
        kfree(mem);
 }
 EXPORT_SYMBOL(kzfree);
index fe81773..ef30307 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3766,15 +3766,13 @@ error:
 }
 
 static void list_slab_objects(struct kmem_cache *s, struct page *page,
-                             const char *text, unsigned long *map)
+                             const char *text)
 {
 #ifdef CONFIG_SLUB_DEBUG
        void *addr = page_address(page);
+       unsigned long *map;
        void *p;
 
-       if (!map)
-               return;
-
        slab_err(s, page, text, s->name);
        slab_lock(page);
 
@@ -3786,6 +3784,7 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
                        print_tracking(s, p);
                }
        }
+       put_map(map);
        slab_unlock(page);
 #endif
 }
@@ -3799,11 +3798,6 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
 {
        LIST_HEAD(discard);
        struct page *page, *h;
-       unsigned long *map = NULL;
-
-#ifdef CONFIG_SLUB_DEBUG
-       map = bitmap_alloc(oo_objects(s->max), GFP_KERNEL);
-#endif
 
        BUG_ON(irqs_disabled());
        spin_lock_irq(&n->list_lock);
@@ -3813,16 +3807,11 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
                        list_add(&page->slab_list, &discard);
                } else {
                        list_slab_objects(s, page,
-                         "Objects remaining in %s on __kmem_cache_shutdown()",
-                         map);
+                         "Objects remaining in %s on __kmem_cache_shutdown()");
                }
        }
        spin_unlock_irq(&n->list_lock);
 
-#ifdef CONFIG_SLUB_DEBUG
-       bitmap_free(map);
-#endif
-
        list_for_each_entry_safe(page, h, &discard, slab_list)
                discard_slab(s, page);
 }
index dbcab84..a82efc3 100644 (file)
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -443,8 +443,7 @@ void mark_page_accessed(struct page *page)
                else
                        __lru_cache_activate_page(page);
                ClearPageReferenced(page);
-               if (page_is_file_lru(page))
-                       workingset_activation(page);
+               workingset_activation(page);
        }
        if (page_is_idle(page))
                clear_page_idle(page);
index e98ff46..05889e8 100644 (file)
@@ -21,7 +21,7 @@
 #include <linux/vmalloc.h>
 #include <linux/swap_slots.h>
 #include <linux/huge_mm.h>
-
+#include "internal.h"
 
 /*
  * swapper_space is a fiction, retained to simplify the path through
@@ -429,7 +429,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
        __SetPageSwapBacked(page);
 
        /* May fail (-ENOMEM) if XArray node allocation failed. */
-       if (add_to_swap_cache(page, entry, gfp_mask & GFP_KERNEL)) {
+       if (add_to_swap_cache(page, entry, gfp_mask & GFP_RECLAIM_MASK)) {
                put_swap_page(page, entry);
                goto fail_unlock;
        }
index 3091c2c..5a2b55c 100644 (file)
@@ -1862,7 +1862,6 @@ EXPORT_SYMBOL(vm_unmap_ram);
  * @pages: an array of pointers to the pages to be mapped
  * @count: number of pages
  * @node: prefer to allocate data structures on this node
- * @prot: memory protection to use. PAGE_KERNEL for regular RAM
  *
  * If you use this function for less than VMAP_MAX_ALLOC pages, it could be
  * faster than vmap so it's good.  But if you mix long-life and short-life
@@ -2696,26 +2695,6 @@ void *vzalloc_node(unsigned long size, int node)
 }
 EXPORT_SYMBOL(vzalloc_node);
 
-/**
- * vmalloc_exec - allocate virtually contiguous, executable memory
- * @size:        allocation size
- *
- * Kernel-internal function to allocate enough pages to cover @size
- * the page level allocator and map them into contiguous and
- * executable kernel virtual space.
- *
- * For tight control over page level allocator and protection flags
- * use __vmalloc() instead.
- *
- * Return: pointer to the allocated memory or %NULL on error
- */
-void *vmalloc_exec(unsigned long size)
-{
-       return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
-                       GFP_KERNEL, PAGE_KERNEL_EXEC, VM_FLUSH_RESET_PERMS,
-                       NUMA_NO_NODE, __builtin_return_address(0));
-}
-
 #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
 #define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL)
 #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
index b6d8432..749d239 100644 (file)
@@ -904,6 +904,7 @@ static int __remove_mapping(struct address_space *mapping, struct page *page,
                __delete_from_swap_cache(page, swap);
                xa_unlock_irqrestore(&mapping->i_pages, flags);
                put_swap_page(page, swap);
+               workingset_eviction(page, target_memcg);
        } else {
                void (*freepage)(struct page *);
                void *shadow = NULL;
@@ -1884,6 +1885,8 @@ static unsigned noinline_for_stack move_pages_to_lru(struct lruvec *lruvec,
                                list_add(&page->lru, &pages_to_free);
                } else {
                        nr_moved += nr_pages;
+                       if (PageActive(page))
+                               workingset_age_nonresident(lruvec, nr_pages);
                }
        }
 
index d481ea4..50b7937 100644 (file)
  *
  *             Implementation
  *
- * For each node's file LRU lists, a counter for inactive evictions
- * and activations is maintained (node->inactive_age).
+ * For each node's LRU lists, a counter for inactive evictions and
+ * activations is maintained (node->nonresident_age).
  *
  * On eviction, a snapshot of this counter (along with some bits to
  * identify the node) is stored in the now empty page cache
@@ -213,7 +213,17 @@ static void unpack_shadow(void *shadow, int *memcgidp, pg_data_t **pgdat,
        *workingsetp = workingset;
 }
 
-static void advance_inactive_age(struct mem_cgroup *memcg, pg_data_t *pgdat)
+/**
+ * workingset_age_nonresident - age non-resident entries as LRU ages
+ * @memcg: the lruvec that was aged
+ * @nr_pages: the number of pages to count
+ *
+ * As in-memory pages are aged, non-resident pages need to be aged as
+ * well, in order for the refault distances later on to be comparable
+ * to the in-memory dimensions. This function allows reclaim and LRU
+ * operations to drive the non-resident aging along in parallel.
+ */
+void workingset_age_nonresident(struct lruvec *lruvec, unsigned long nr_pages)
 {
        /*
         * Reclaiming a cgroup means reclaiming all its children in a
@@ -227,11 +237,8 @@ static void advance_inactive_age(struct mem_cgroup *memcg, pg_data_t *pgdat)
         * the root cgroup's, age as well.
         */
        do {
-               struct lruvec *lruvec;
-
-               lruvec = mem_cgroup_lruvec(memcg, pgdat);
-               atomic_long_inc(&lruvec->inactive_age);
-       } while (memcg && (memcg = parent_mem_cgroup(memcg)));
+               atomic_long_add(nr_pages, &lruvec->nonresident_age);
+       } while ((lruvec = parent_lruvec(lruvec)));
 }
 
 /**
@@ -254,12 +261,11 @@ void *workingset_eviction(struct page *page, struct mem_cgroup *target_memcg)
        VM_BUG_ON_PAGE(page_count(page), page);
        VM_BUG_ON_PAGE(!PageLocked(page), page);
 
-       advance_inactive_age(page_memcg(page), pgdat);
-
        lruvec = mem_cgroup_lruvec(target_memcg, pgdat);
+       workingset_age_nonresident(lruvec, hpage_nr_pages(page));
        /* XXX: target_memcg can be NULL, go through lruvec */
        memcgid = mem_cgroup_id(lruvec_memcg(lruvec));
-       eviction = atomic_long_read(&lruvec->inactive_age);
+       eviction = atomic_long_read(&lruvec->nonresident_age);
        return pack_shadow(memcgid, pgdat, eviction, PageWorkingset(page));
 }
 
@@ -309,20 +315,20 @@ void workingset_refault(struct page *page, void *shadow)
        if (!mem_cgroup_disabled() && !eviction_memcg)
                goto out;
        eviction_lruvec = mem_cgroup_lruvec(eviction_memcg, pgdat);
-       refault = atomic_long_read(&eviction_lruvec->inactive_age);
+       refault = atomic_long_read(&eviction_lruvec->nonresident_age);
 
        /*
         * Calculate the refault distance
         *
         * The unsigned subtraction here gives an accurate distance
-        * across inactive_age overflows in most cases. There is a
+        * across nonresident_age overflows in most cases. There is a
         * special case: usually, shadow entries have a short lifetime
         * and are either refaulted or reclaimed along with the inode
         * before they get too old.  But it is not impossible for the
-        * inactive_age to lap a shadow entry in the field, which can
-        * then result in a false small refault distance, leading to a
-        * false activation should this old entry actually refault
-        * again.  However, earlier kernels used to deactivate
+        * nonresident_age to lap a shadow entry in the field, which
+        * can then result in a false small refault distance, leading
+        * to a false activation should this old entry actually
+        * refault again.  However, earlier kernels used to deactivate
         * unconditionally with *every* reclaim invocation for the
         * longest time, so the occasional inappropriate activation
         * leading to pressure on the active list is not a problem.
@@ -359,7 +365,7 @@ void workingset_refault(struct page *page, void *shadow)
                goto out;
 
        SetPageActive(page);
-       advance_inactive_age(memcg, pgdat);
+       workingset_age_nonresident(lruvec, hpage_nr_pages(page));
        inc_lruvec_state(lruvec, WORKINGSET_ACTIVATE);
 
        /* Page was active prior to eviction */
@@ -382,6 +388,7 @@ out:
 void workingset_activation(struct page *page)
 {
        struct mem_cgroup *memcg;
+       struct lruvec *lruvec;
 
        rcu_read_lock();
        /*
@@ -394,7 +401,8 @@ void workingset_activation(struct page *page)
        memcg = page_memcg_rcu(page);
        if (!mem_cgroup_disabled() && !memcg)
                goto out;
-       advance_inactive_age(memcg, page_pgdat(page));
+       lruvec = mem_cgroup_page_lruvec(page, page_pgdat(page));
+       workingset_age_nonresident(lruvec, hpage_nr_pages(page));
 out:
        rcu_read_unlock();
 }
index c1b6242..5126566 100644 (file)
@@ -189,3 +189,4 @@ MODULE_AUTHOR("Latchesar Ionkov <lucho@ionkov.net>");
 MODULE_AUTHOR("Eric Van Hensbergen <ericvh@gmail.com>");
 MODULE_AUTHOR("Ron Minnich <rminnich@lanl.gov>");
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Plan 9 Resource Sharing Support (9P2000)");
index 24986ec..779e1eb 100644 (file)
@@ -411,10 +411,16 @@ int br_mrp_set_port_role(struct net_bridge_port *p,
        if (!mrp)
                return -EINVAL;
 
-       if (role == BR_MRP_PORT_ROLE_PRIMARY)
+       switch (role) {
+       case BR_MRP_PORT_ROLE_PRIMARY:
                rcu_assign_pointer(mrp->p_port, p);
-       else
+               break;
+       case BR_MRP_PORT_ROLE_SECONDARY:
                rcu_assign_pointer(mrp->s_port, p);
+               break;
+       default:
+               return -EINVAL;
+       }
 
        br_mrp_port_switchdev_set_role(p, role);
 
index 7501be4..2130fe0 100644 (file)
@@ -217,8 +217,8 @@ struct net_bridge_port_group {
        struct rcu_head                 rcu;
        struct timer_list               timer;
        struct br_ip                    addr;
+       unsigned char                   eth_addr[ETH_ALEN] __aligned(2);
        unsigned char                   flags;
-       unsigned char                   eth_addr[ETH_ALEN];
 };
 
 struct net_bridge_mdb_entry {
index 7c9e92b..8e8ffac 100644 (file)
@@ -155,3 +155,4 @@ module_exit(nft_meta_bridge_module_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("wenxu <wenxu@ucloud.cn>");
 MODULE_ALIAS_NFT_AF_EXPR(AF_BRIDGE, "meta");
+MODULE_DESCRIPTION("Support for bridge dedicated meta key");
index f48cf4c..deae2c9 100644 (file)
@@ -455,3 +455,4 @@ module_exit(nft_reject_bridge_module_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
 MODULE_ALIAS_NFT_AF_EXPR(AF_BRIDGE, "reject");
+MODULE_DESCRIPTION("Reject packets from bridge via nftables");
index 6bc2388..90b59fc 100644 (file)
@@ -4192,10 +4192,12 @@ int dev_direct_xmit(struct sk_buff *skb, u16 queue_id)
 
        local_bh_disable();
 
+       dev_xmit_recursion_inc();
        HARD_TX_LOCK(dev, txq, smp_processor_id());
        if (!netif_xmit_frozen_or_drv_stopped(txq))
                ret = netdev_start_xmit(skb, dev, txq, false);
        HARD_TX_UNLOCK(dev, txq);
+       dev_xmit_recursion_dec();
 
        local_bh_enable();
 
@@ -9547,6 +9549,13 @@ int register_netdevice(struct net_device *dev)
                rcu_barrier();
 
                dev->reg_state = NETREG_UNREGISTERED;
+               /* We should put the kobject that hold in
+                * netdev_unregister_kobject(), otherwise
+                * the net device cannot be freed when
+                * driver calls free_netdev(), because the
+                * kobject is being hold.
+                */
+               kobject_put(&dev->dev.kobj);
        }
        /*
         *      Prevent userspace races by waiting until the network
index 2ee7bc4..b09bebe 100644 (file)
@@ -1721,3 +1721,4 @@ module_exit(exit_net_drop_monitor);
 MODULE_LICENSE("GPL v2");
 MODULE_AUTHOR("Neil Horman <nhorman@tuxdriver.com>");
 MODULE_ALIAS_GENL_FAMILY("NET_DM");
+MODULE_DESCRIPTION("Monitoring code for network dropped packet alerts");
index 0cfc35e..b739cfa 100644 (file)
@@ -372,14 +372,15 @@ int flow_indr_dev_register(flow_indr_block_bind_cb_t *cb, void *cb_priv)
 }
 EXPORT_SYMBOL(flow_indr_dev_register);
 
-static void __flow_block_indr_cleanup(flow_setup_cb_t *setup_cb, void *cb_priv,
+static void __flow_block_indr_cleanup(void (*release)(void *cb_priv),
+                                     void *cb_priv,
                                      struct list_head *cleanup_list)
 {
        struct flow_block_cb *this, *next;
 
        list_for_each_entry_safe(this, next, &flow_block_indr_list, indr.list) {
-               if (this->cb == setup_cb &&
-                   this->cb_priv == cb_priv) {
+               if (this->release == release &&
+                   this->indr.cb_priv == cb_priv) {
                        list_move(&this->indr.list, cleanup_list);
                        return;
                }
@@ -397,7 +398,7 @@ static void flow_block_indr_notify(struct list_head *cleanup_list)
 }
 
 void flow_indr_dev_unregister(flow_indr_block_bind_cb_t *cb, void *cb_priv,
-                             flow_setup_cb_t *setup_cb)
+                             void (*release)(void *cb_priv))
 {
        struct flow_indr_dev *this, *next, *indr_dev = NULL;
        LIST_HEAD(cleanup_list);
@@ -418,7 +419,7 @@ void flow_indr_dev_unregister(flow_indr_block_bind_cb_t *cb, void *cb_priv,
                return;
        }
 
-       __flow_block_indr_cleanup(setup_cb, cb_priv, &cleanup_list);
+       __flow_block_indr_cleanup(release, cb_priv, &cleanup_list);
        mutex_unlock(&flow_indr_block_lock);
 
        flow_block_indr_notify(&cleanup_list);
@@ -429,32 +430,37 @@ EXPORT_SYMBOL(flow_indr_dev_unregister);
 static void flow_block_indr_init(struct flow_block_cb *flow_block,
                                 struct flow_block_offload *bo,
                                 struct net_device *dev, void *data,
+                                void *cb_priv,
                                 void (*cleanup)(struct flow_block_cb *block_cb))
 {
        flow_block->indr.binder_type = bo->binder_type;
        flow_block->indr.data = data;
+       flow_block->indr.cb_priv = cb_priv;
        flow_block->indr.dev = dev;
        flow_block->indr.cleanup = cleanup;
 }
 
-static void __flow_block_indr_binding(struct flow_block_offload *bo,
-                                     struct net_device *dev, void *data,
-                                     void (*cleanup)(struct flow_block_cb *block_cb))
+struct flow_block_cb *flow_indr_block_cb_alloc(flow_setup_cb_t *cb,
+                                              void *cb_ident, void *cb_priv,
+                                              void (*release)(void *cb_priv),
+                                              struct flow_block_offload *bo,
+                                              struct net_device *dev, void *data,
+                                              void *indr_cb_priv,
+                                              void (*cleanup)(struct flow_block_cb *block_cb))
 {
        struct flow_block_cb *block_cb;
 
-       list_for_each_entry(block_cb, &bo->cb_list, list) {
-               switch (bo->command) {
-               case FLOW_BLOCK_BIND:
-                       flow_block_indr_init(block_cb, bo, dev, data, cleanup);
-                       list_add(&block_cb->indr.list, &flow_block_indr_list);
-                       break;
-               case FLOW_BLOCK_UNBIND:
-                       list_del(&block_cb->indr.list);
-                       break;
-               }
-       }
+       block_cb = flow_block_cb_alloc(cb, cb_ident, cb_priv, release);
+       if (IS_ERR(block_cb))
+               goto out;
+
+       flow_block_indr_init(block_cb, bo, dev, data, indr_cb_priv, cleanup);
+       list_add(&block_cb->indr.list, &flow_block_indr_list);
+
+out:
+       return block_cb;
 }
+EXPORT_SYMBOL(flow_indr_block_cb_alloc);
 
 int flow_indr_dev_setup_offload(struct net_device *dev,
                                enum tc_setup_type type, void *data,
@@ -465,9 +471,8 @@ int flow_indr_dev_setup_offload(struct net_device *dev,
 
        mutex_lock(&flow_indr_block_lock);
        list_for_each_entry(this, &flow_block_indr_dev_list, list)
-               this->cb(dev, this->cb_priv, type, bo);
+               this->cb(dev, this->cb_priv, type, bo, data, cleanup);
 
-       __flow_block_indr_binding(bo, dev, data, cleanup);
        mutex_unlock(&flow_indr_block_lock);
 
        return list_empty(&bo->cb_list) ? -EOPNOTSUPP : 0;
index 6c4acf1..d832c65 100644 (file)
@@ -718,7 +718,7 @@ bool sk_mc_loop(struct sock *sk)
                return inet6_sk(sk)->mc_loop;
 #endif
        }
-       WARN_ON(1);
+       WARN_ON_ONCE(1);
        return true;
 }
 EXPORT_SYMBOL(sk_mc_loop);
@@ -1767,6 +1767,7 @@ struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
                cgroup_sk_alloc(&sk->sk_cgrp_data);
                sock_update_classid(&sk->sk_cgrp_data);
                sock_update_netprioidx(&sk->sk_cgrp_data);
+               sk_tx_queue_clear(sk);
        }
 
        return sk;
@@ -1990,6 +1991,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
                 */
                sk_refcnt_debug_inc(newsk);
                sk_set_socket(newsk, NULL);
+               sk_tx_queue_clear(newsk);
                RCU_INIT_POINTER(newsk->sk_wq, NULL);
 
                if (newsk->sk_prot->sockets_allocated)
index 90f44f3..3c45f99 100644 (file)
@@ -462,6 +462,7 @@ struct xdp_frame *xdp_convert_zc_to_xdp_frame(struct xdp_buff *xdp)
        xdpf->len = totsize - metasize;
        xdpf->headroom = 0;
        xdpf->metasize = metasize;
+       xdpf->frame_sz = PAGE_SIZE;
        xdpf->mem.type = MEM_TYPE_PAGE_ORDER0;
 
        xsk_buff_free(xdp);
index e8eaa80..d6200ff 100644 (file)
 #define DSA_HLEN       4
 #define EDSA_HLEN      8
 
+#define FRAME_TYPE_TO_CPU      0x00
+#define FRAME_TYPE_FORWARD     0x03
+
+#define TO_CPU_CODE_MGMT_TRAP          0x00
+#define TO_CPU_CODE_FRAME2REG          0x01
+#define TO_CPU_CODE_IGMP_MLD_TRAP      0x02
+#define TO_CPU_CODE_POLICY_TRAP                0x03
+#define TO_CPU_CODE_ARP_MIRROR         0x04
+#define TO_CPU_CODE_POLICY_MIRROR      0x05
+
 static struct sk_buff *edsa_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct dsa_port *dp = dsa_slave_to_port(dev);
@@ -77,6 +87,8 @@ static struct sk_buff *edsa_rcv(struct sk_buff *skb, struct net_device *dev,
                                struct packet_type *pt)
 {
        u8 *edsa_header;
+       int frame_type;
+       int code;
        int source_device;
        int source_port;
 
@@ -91,8 +103,29 @@ static struct sk_buff *edsa_rcv(struct sk_buff *skb, struct net_device *dev,
        /*
         * Check that frame type is either TO_CPU or FORWARD.
         */
-       if ((edsa_header[0] & 0xc0) != 0x00 && (edsa_header[0] & 0xc0) != 0xc0)
+       frame_type = edsa_header[0] >> 6;
+
+       switch (frame_type) {
+       case FRAME_TYPE_TO_CPU:
+               code = (edsa_header[1] & 0x6) | ((edsa_header[2] >> 4) & 1);
+
+               /*
+                * Mark the frame to never egress on any port of the same switch
+                * unless it's a trapped IGMP/MLD packet, in which case the
+                * bridge might want to forward it.
+                */
+               if (code != TO_CPU_CODE_IGMP_MLD_TRAP)
+                       skb->offload_fwd_mark = 1;
+
+               break;
+
+       case FRAME_TYPE_FORWARD:
+               skb->offload_fwd_mark = 1;
+               break;
+
+       default:
                return NULL;
+       }
 
        /*
         * Determine source device and port.
@@ -156,8 +189,6 @@ static struct sk_buff *edsa_rcv(struct sk_buff *skb, struct net_device *dev,
                        2 * ETH_ALEN);
        }
 
-       skb->offload_fwd_mark = 1;
-
        return skb;
 }
 
index 7b7a045..7194956 100644 (file)
@@ -234,6 +234,14 @@ static int ethnl_act_cable_test_tdr_cfg(const struct nlattr *nest,
        struct nlattr *tb[ETHTOOL_A_CABLE_TEST_TDR_CFG_MAX + 1];
        int ret;
 
+       cfg->first = 100;
+       cfg->step = 100;
+       cfg->last = MAX_CABLE_LENGTH_CM;
+       cfg->pair = PHY_PAIR_ALL;
+
+       if (!nest)
+               return 0;
+
        ret = nla_parse_nested(tb, ETHTOOL_A_CABLE_TEST_TDR_CFG_MAX, nest,
                               cable_test_tdr_act_cfg_policy, info->extack);
        if (ret < 0)
@@ -242,17 +250,12 @@ static int ethnl_act_cable_test_tdr_cfg(const struct nlattr *nest,
        if (tb[ETHTOOL_A_CABLE_TEST_TDR_CFG_FIRST])
                cfg->first = nla_get_u32(
                        tb[ETHTOOL_A_CABLE_TEST_TDR_CFG_FIRST]);
-       else
-               cfg->first = 100;
+
        if (tb[ETHTOOL_A_CABLE_TEST_TDR_CFG_LAST])
                cfg->last = nla_get_u32(tb[ETHTOOL_A_CABLE_TEST_TDR_CFG_LAST]);
-       else
-               cfg->last = MAX_CABLE_LENGTH_CM;
 
        if (tb[ETHTOOL_A_CABLE_TEST_TDR_CFG_STEP])
                cfg->step = nla_get_u32(tb[ETHTOOL_A_CABLE_TEST_TDR_CFG_STEP]);
-       else
-               cfg->step = 100;
 
        if (tb[ETHTOOL_A_CABLE_TEST_TDR_CFG_PAIR]) {
                cfg->pair = nla_get_u8(tb[ETHTOOL_A_CABLE_TEST_TDR_CFG_PAIR]);
@@ -263,8 +266,6 @@ static int ethnl_act_cable_test_tdr_cfg(const struct nlattr *nest,
                                "invalid pair parameter");
                        return -EINVAL;
                }
-       } else {
-               cfg->pair = PHY_PAIR_ALL;
        }
 
        if (cfg->first > MAX_CABLE_LENGTH_CM) {
index 423e640..aaecfc9 100644 (file)
@@ -40,9 +40,11 @@ const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN] = {
        [NETIF_F_GSO_UDP_TUNNEL_BIT] =   "tx-udp_tnl-segmentation",
        [NETIF_F_GSO_UDP_TUNNEL_CSUM_BIT] = "tx-udp_tnl-csum-segmentation",
        [NETIF_F_GSO_PARTIAL_BIT] =      "tx-gso-partial",
+       [NETIF_F_GSO_TUNNEL_REMCSUM_BIT] = "tx-tunnel-remcsum-segmentation",
        [NETIF_F_GSO_SCTP_BIT] =         "tx-sctp-segmentation",
        [NETIF_F_GSO_ESP_BIT] =          "tx-esp-segmentation",
        [NETIF_F_GSO_UDP_L4_BIT] =       "tx-udp-segmentation",
+       [NETIF_F_GSO_FRAGLIST_BIT] =     "tx-gso-list",
 
        [NETIF_F_FCOE_CRC_BIT] =         "tx-checksum-fcoe-crc",
        [NETIF_F_SCTP_CRC_BIT] =        "tx-checksum-sctp",
index b5df90c..21d5fc0 100644 (file)
@@ -2978,7 +2978,7 @@ ethtool_rx_flow_rule_create(const struct ethtool_rx_flow_spec_input *input)
                               sizeof(match->mask.ipv6.dst));
                }
                if (memcmp(v6_m_spec->ip6src, &zero_addr, sizeof(zero_addr)) ||
-                   memcmp(v6_m_spec->ip6src, &zero_addr, sizeof(zero_addr))) {
+                   memcmp(v6_m_spec->ip6dst, &zero_addr, sizeof(zero_addr))) {
                        match->dissector.used_keys |=
                                BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS);
                        match->dissector.offset[FLOW_DISSECTOR_KEY_IPV6_ADDRS] =
index 7f47ba8..afe5ac8 100644 (file)
@@ -78,19 +78,18 @@ static int linkstate_prepare_data(const struct ethnl_req_info *req_base,
 
        ret = linkstate_get_sqi(dev);
        if (ret < 0 && ret != -EOPNOTSUPP)
-               return ret;
-
+               goto out;
        data->sqi = ret;
 
        ret = linkstate_get_sqi_max(dev);
        if (ret < 0 && ret != -EOPNOTSUPP)
-               return ret;
-
+               goto out;
        data->sqi_max = ret;
 
+       ret = 0;
+out:
        ethnl_ops_complete(dev);
-
-       return 0;
+       return ret;
 }
 
 static int linkstate_reply_size(const struct ethnl_req_info *req_base,
index cd99f54..478852e 100644 (file)
@@ -339,7 +339,7 @@ static void hsr_announce(struct timer_list *t)
        rcu_read_unlock();
 }
 
-static void hsr_del_ports(struct hsr_priv *hsr)
+void hsr_del_ports(struct hsr_priv *hsr)
 {
        struct hsr_port *port;
 
@@ -356,31 +356,12 @@ static void hsr_del_ports(struct hsr_priv *hsr)
                hsr_del_port(port);
 }
 
-/* This has to be called after all the readers are gone.
- * Otherwise we would have to check the return value of
- * hsr_port_get_hsr().
- */
-static void hsr_dev_destroy(struct net_device *hsr_dev)
-{
-       struct hsr_priv *hsr = netdev_priv(hsr_dev);
-
-       hsr_debugfs_term(hsr);
-       hsr_del_ports(hsr);
-
-       del_timer_sync(&hsr->prune_timer);
-       del_timer_sync(&hsr->announce_timer);
-
-       hsr_del_self_node(hsr);
-       hsr_del_nodes(&hsr->node_db);
-}
-
 static const struct net_device_ops hsr_device_ops = {
        .ndo_change_mtu = hsr_dev_change_mtu,
        .ndo_open = hsr_dev_open,
        .ndo_stop = hsr_dev_close,
        .ndo_start_xmit = hsr_dev_xmit,
        .ndo_fix_features = hsr_fix_features,
-       .ndo_uninit = hsr_dev_destroy,
 };
 
 static struct device_type hsr_type = {
index a099d7d..b8f9262 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/netdevice.h>
 #include "hsr_main.h"
 
+void hsr_del_ports(struct hsr_priv *hsr);
 void hsr_dev_setup(struct net_device *dev);
 int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
                     unsigned char multicast_spec, u8 protocol_version,
@@ -18,5 +19,4 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
 void hsr_check_carrier_and_operstate(struct hsr_priv *hsr);
 bool is_hsr_master(struct net_device *dev);
 int hsr_get_max_mtu(struct hsr_priv *hsr);
-
 #endif /* __HSR_DEVICE_H */
index e2564de..144da15 100644 (file)
@@ -6,6 +6,7 @@
  */
 
 #include <linux/netdevice.h>
+#include <net/rtnetlink.h>
 #include <linux/rculist.h>
 #include <linux/timer.h>
 #include <linux/etherdevice.h>
@@ -100,8 +101,10 @@ static int hsr_netdev_notify(struct notifier_block *nb, unsigned long event,
                        master = hsr_port_get_hsr(port->hsr, HSR_PT_MASTER);
                        hsr_del_port(port);
                        if (hsr_slave_empty(master->hsr)) {
-                               unregister_netdevice_queue(master->dev,
-                                                          &list_kill);
+                               const struct rtnl_link_ops *ops;
+
+                               ops = master->dev->rtnl_link_ops;
+                               ops->dellink(master->dev, &list_kill);
                                unregister_netdevice_many(&list_kill);
                        }
                }
@@ -144,9 +147,9 @@ static int __init hsr_init(void)
 
 static void __exit hsr_exit(void)
 {
-       unregister_netdevice_notifier(&hsr_nb);
        hsr_netlink_exit();
        hsr_debugfs_remove_root();
+       unregister_netdevice_notifier(&hsr_nb);
 }
 
 module_init(hsr_init);
index 1decb25..6e14b7d 100644 (file)
@@ -83,6 +83,22 @@ static int hsr_newlink(struct net *src_net, struct net_device *dev,
        return hsr_dev_finalize(dev, link, multicast_spec, hsr_version, extack);
 }
 
+static void hsr_dellink(struct net_device *dev, struct list_head *head)
+{
+       struct hsr_priv *hsr = netdev_priv(dev);
+
+       del_timer_sync(&hsr->prune_timer);
+       del_timer_sync(&hsr->announce_timer);
+
+       hsr_debugfs_term(hsr);
+       hsr_del_ports(hsr);
+
+       hsr_del_self_node(hsr);
+       hsr_del_nodes(&hsr->node_db);
+
+       unregister_netdevice_queue(dev, head);
+}
+
 static int hsr_fill_info(struct sk_buff *skb, const struct net_device *dev)
 {
        struct hsr_priv *hsr = netdev_priv(dev);
@@ -118,6 +134,7 @@ static struct rtnl_link_ops hsr_link_ops __read_mostly = {
        .priv_size      = sizeof(struct hsr_priv),
        .setup          = hsr_dev_setup,
        .newlink        = hsr_newlink,
+       .dellink        = hsr_dellink,
        .fill_info      = hsr_fill_info,
 };
 
index 6ecbb0c..e64e59b 100644 (file)
@@ -340,29 +340,31 @@ config NET_FOU_IP_TUNNELS
 
 config INET_AH
        tristate "IP: AH transformation"
-       select XFRM_ALGO
-       select CRYPTO
-       select CRYPTO_HMAC
-       select CRYPTO_MD5
-       select CRYPTO_SHA1
+       select XFRM_AH
        help
-         Support for IPsec AH.
+         Support for IPsec AH (Authentication Header).
+
+         AH can be used with various authentication algorithms.  Besides
+         enabling AH support itself, this option enables the generic
+         implementations of the algorithms that RFC 8221 lists as MUST be
+         implemented.  If you need any other algorithms, you'll need to enable
+         them in the crypto API.  You should also enable accelerated
+         implementations of any needed algorithms when available.
 
          If unsure, say Y.
 
 config INET_ESP
        tristate "IP: ESP transformation"
-       select XFRM_ALGO
-       select CRYPTO
-       select CRYPTO_AUTHENC
-       select CRYPTO_HMAC
-       select CRYPTO_MD5
-       select CRYPTO_CBC
-       select CRYPTO_SHA1
-       select CRYPTO_DES
-       select CRYPTO_ECHAINIV
+       select XFRM_ESP
        help
-         Support for IPsec ESP.
+         Support for IPsec ESP (Encapsulating Security Payload).
+
+         ESP can be used with various encryption and authentication algorithms.
+         Besides enabling ESP support itself, this option enables the generic
+         implementations of the algorithms that RFC 8221 lists as MUST be
+         implemented.  If you need any other algorithms, you'll need to enable
+         them in the crypto API.  You should also enable accelerated
+         implementations of any needed algorithms when available.
 
          If unsure, say Y.
 
index d14133e..5bda5ae 100644 (file)
@@ -361,3 +361,4 @@ module_exit(esp4_offload_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
 MODULE_ALIAS_XFRM_OFFLOAD_TYPE(AF_INET, XFRM_PROTO_ESP);
+MODULE_DESCRIPTION("IPV4 GSO/GRO offload support");
index e53871e..1f75dc6 100644 (file)
@@ -1109,7 +1109,7 @@ static int fib_check_nh_v4_gw(struct net *net, struct fib_nh *nh, u32 table,
                if (fl4.flowi4_scope < RT_SCOPE_LINK)
                        fl4.flowi4_scope = RT_SCOPE_LINK;
 
-               if (table)
+               if (table && table != RT_TABLE_MAIN)
                        tbl = fib_get_table(net, table);
 
                if (tbl)
index dcc79ff..abd0834 100644 (file)
@@ -1304,3 +1304,4 @@ module_init(fou_init);
 module_exit(fou_fini);
 MODULE_AUTHOR("Tom Herbert <therbert@google.com>");
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Foo over UDP");
index f4f1d11..0c1f364 100644 (file)
@@ -85,9 +85,10 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
                                   __be32 remote, __be32 local,
                                   __be32 key)
 {
-       unsigned int hash;
        struct ip_tunnel *t, *cand = NULL;
        struct hlist_head *head;
+       struct net_device *ndev;
+       unsigned int hash;
 
        hash = ip_tunnel_hash(key, remote);
        head = &itn->tunnels[hash];
@@ -162,8 +163,9 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
        if (t && t->dev->flags & IFF_UP)
                return t;
 
-       if (itn->fb_tunnel_dev && itn->fb_tunnel_dev->flags & IFF_UP)
-               return netdev_priv(itn->fb_tunnel_dev);
+       ndev = READ_ONCE(itn->fb_tunnel_dev);
+       if (ndev && ndev->flags & IFF_UP)
+               return netdev_priv(ndev);
 
        return NULL;
 }
@@ -1259,9 +1261,9 @@ void ip_tunnel_uninit(struct net_device *dev)
        struct ip_tunnel_net *itn;
 
        itn = net_generic(net, tunnel->ip_tnl_net_id);
-       /* fb_tunnel_dev will be unregisted in net-exit call. */
-       if (itn->fb_tunnel_dev != dev)
-               ip_tunnel_del(itn, netdev_priv(dev));
+       ip_tunnel_del(itn, netdev_priv(dev));
+       if (itn->fb_tunnel_dev == dev)
+               WRITE_ONCE(itn->fb_tunnel_dev, NULL);
 
        dst_cache_reset(&tunnel->dst_cache);
 }
index c2670ea..5bf9fa0 100644 (file)
@@ -1797,11 +1797,22 @@ out_free:
        return ret;
 }
 
+void ipt_unregister_table_pre_exit(struct net *net, struct xt_table *table,
+                                  const struct nf_hook_ops *ops)
+{
+       nf_unregister_net_hooks(net, ops, hweight32(table->valid_hooks));
+}
+
+void ipt_unregister_table_exit(struct net *net, struct xt_table *table)
+{
+       __ipt_unregister_table(net, table);
+}
+
 void ipt_unregister_table(struct net *net, struct xt_table *table,
                          const struct nf_hook_ops *ops)
 {
        if (ops)
-               nf_unregister_net_hooks(net, ops, hweight32(table->valid_hooks));
+               ipt_unregister_table_pre_exit(net, table, ops);
        __ipt_unregister_table(net, table);
 }
 
@@ -1958,6 +1969,8 @@ static void __exit ip_tables_fini(void)
 
 EXPORT_SYMBOL(ipt_register_table);
 EXPORT_SYMBOL(ipt_unregister_table);
+EXPORT_SYMBOL(ipt_unregister_table_pre_exit);
+EXPORT_SYMBOL(ipt_unregister_table_exit);
 EXPORT_SYMBOL(ipt_do_table);
 module_init(ip_tables_init);
 module_exit(ip_tables_fini);
index 748dc3c..f2984c7 100644 (file)
@@ -118,3 +118,4 @@ module_exit(synproxy_tg4_exit);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_DESCRIPTION("Intercept TCP connections and establish them using syncookies");
index 9d54b40..8f7bc1e 100644 (file)
@@ -72,16 +72,24 @@ static int __net_init iptable_filter_net_init(struct net *net)
        return 0;
 }
 
+static void __net_exit iptable_filter_net_pre_exit(struct net *net)
+{
+       if (net->ipv4.iptable_filter)
+               ipt_unregister_table_pre_exit(net, net->ipv4.iptable_filter,
+                                             filter_ops);
+}
+
 static void __net_exit iptable_filter_net_exit(struct net *net)
 {
        if (!net->ipv4.iptable_filter)
                return;
-       ipt_unregister_table(net, net->ipv4.iptable_filter, filter_ops);
+       ipt_unregister_table_exit(net, net->ipv4.iptable_filter);
        net->ipv4.iptable_filter = NULL;
 }
 
 static struct pernet_operations iptable_filter_net_ops = {
        .init = iptable_filter_net_init,
+       .pre_exit = iptable_filter_net_pre_exit,
        .exit = iptable_filter_net_exit,
 };
 
index bb9266e..f703a71 100644 (file)
@@ -100,15 +100,23 @@ static int __net_init iptable_mangle_table_init(struct net *net)
        return ret;
 }
 
+static void __net_exit iptable_mangle_net_pre_exit(struct net *net)
+{
+       if (net->ipv4.iptable_mangle)
+               ipt_unregister_table_pre_exit(net, net->ipv4.iptable_mangle,
+                                             mangle_ops);
+}
+
 static void __net_exit iptable_mangle_net_exit(struct net *net)
 {
        if (!net->ipv4.iptable_mangle)
                return;
-       ipt_unregister_table(net, net->ipv4.iptable_mangle, mangle_ops);
+       ipt_unregister_table_exit(net, net->ipv4.iptable_mangle);
        net->ipv4.iptable_mangle = NULL;
 }
 
 static struct pernet_operations iptable_mangle_net_ops = {
+       .pre_exit = iptable_mangle_net_pre_exit,
        .exit = iptable_mangle_net_exit,
 };
 
index ad33687..b0143b1 100644 (file)
@@ -113,16 +113,22 @@ static int __net_init iptable_nat_table_init(struct net *net)
        return ret;
 }
 
+static void __net_exit iptable_nat_net_pre_exit(struct net *net)
+{
+       if (net->ipv4.nat_table)
+               ipt_nat_unregister_lookups(net);
+}
+
 static void __net_exit iptable_nat_net_exit(struct net *net)
 {
        if (!net->ipv4.nat_table)
                return;
-       ipt_nat_unregister_lookups(net);
-       ipt_unregister_table(net, net->ipv4.nat_table, NULL);
+       ipt_unregister_table_exit(net, net->ipv4.nat_table);
        net->ipv4.nat_table = NULL;
 }
 
 static struct pernet_operations iptable_nat_net_ops = {
+       .pre_exit = iptable_nat_net_pre_exit,
        .exit   = iptable_nat_net_exit,
 };
 
index 69697eb..9abfe6b 100644 (file)
@@ -67,15 +67,23 @@ static int __net_init iptable_raw_table_init(struct net *net)
        return ret;
 }
 
+static void __net_exit iptable_raw_net_pre_exit(struct net *net)
+{
+       if (net->ipv4.iptable_raw)
+               ipt_unregister_table_pre_exit(net, net->ipv4.iptable_raw,
+                                             rawtable_ops);
+}
+
 static void __net_exit iptable_raw_net_exit(struct net *net)
 {
        if (!net->ipv4.iptable_raw)
                return;
-       ipt_unregister_table(net, net->ipv4.iptable_raw, rawtable_ops);
+       ipt_unregister_table_exit(net, net->ipv4.iptable_raw);
        net->ipv4.iptable_raw = NULL;
 }
 
 static struct pernet_operations iptable_raw_net_ops = {
+       .pre_exit = iptable_raw_net_pre_exit,
        .exit = iptable_raw_net_exit,
 };
 
index ac633c1..415c197 100644 (file)
@@ -62,16 +62,23 @@ static int __net_init iptable_security_table_init(struct net *net)
        return ret;
 }
 
+static void __net_exit iptable_security_net_pre_exit(struct net *net)
+{
+       if (net->ipv4.iptable_security)
+               ipt_unregister_table_pre_exit(net, net->ipv4.iptable_security,
+                                             sectbl_ops);
+}
+
 static void __net_exit iptable_security_net_exit(struct net *net)
 {
        if (!net->ipv4.iptable_security)
                return;
-
-       ipt_unregister_table(net, net->ipv4.iptable_security, sectbl_ops);
+       ipt_unregister_table_exit(net, net->ipv4.iptable_security);
        net->ipv4.iptable_security = NULL;
 }
 
 static struct pernet_operations iptable_security_net_ops = {
+       .pre_exit = iptable_security_net_pre_exit,
        .exit = iptable_security_net_exit,
 };
 
index e32e41b..aba65fe 100644 (file)
@@ -34,3 +34,4 @@ module_exit(nf_flow_ipv4_module_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
 MODULE_ALIAS_NF_FLOWTABLE(AF_INET);
+MODULE_DESCRIPTION("Netfilter flow table support");
index abf89b9..bcdb37f 100644 (file)
@@ -107,3 +107,4 @@ module_exit(nft_dup_ipv4_module_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
 MODULE_ALIAS_NFT_AF_EXPR(AF_INET, "dup");
+MODULE_DESCRIPTION("IPv4 nftables packet duplication support");
index ce29411..03df986 100644 (file)
@@ -210,3 +210,4 @@ module_exit(nft_fib4_module_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Florian Westphal <fw@strlen.de>");
 MODULE_ALIAS_NFT_AF_EXPR(2, "fib");
+MODULE_DESCRIPTION("nftables fib / ip route lookup support");
index 7e6fd5c..e408f81 100644 (file)
@@ -71,3 +71,4 @@ module_exit(nft_reject_ipv4_module_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
 MODULE_ALIAS_NFT_AF_EXPR(AF_INET, "reject");
+MODULE_DESCRIPTION("IPv4 packet rejection for nftables");
index 8f8eefd..c7bf5b2 100644 (file)
@@ -432,10 +432,9 @@ static void hystart_update(struct sock *sk, u32 delay)
 
        if (hystart_detect & HYSTART_DELAY) {
                /* obtain the minimum delay of more than sampling packets */
+               if (ca->curr_rtt > delay)
+                       ca->curr_rtt = delay;
                if (ca->sample_cnt < HYSTART_MIN_SAMPLES) {
-                       if (ca->curr_rtt > delay)
-                               ca->curr_rtt = delay;
-
                        ca->sample_cnt++;
                } else {
                        if (ca->curr_rtt > ca->delay_min +
index 12fda8f..f3a0eb1 100644 (file)
@@ -261,7 +261,8 @@ static void tcp_ecn_accept_cwr(struct sock *sk, const struct sk_buff *skb)
                 * cwnd may be very low (even just 1 packet), so we should ACK
                 * immediately.
                 */
-               inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW;
+               if (TCP_SKB_CB(skb)->seq != TCP_SKB_CB(skb)->end_seq)
+                       inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW;
        }
 }
 
@@ -3665,6 +3666,15 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
                tcp_in_ack_event(sk, ack_ev_flags);
        }
 
+       /* This is a deviation from RFC3168 since it states that:
+        * "When the TCP data sender is ready to set the CWR bit after reducing
+        * the congestion window, it SHOULD set the CWR bit only on the first
+        * new data packet that it transmits."
+        * We accept CWR on pure ACKs to be more robust
+        * with widely-deployed TCP implementations that do this.
+        */
+       tcp_ecn_accept_cwr(sk, skb);
+
        /* We passed data and got it acked, remove any soft error
         * log. Something worked...
         */
@@ -4800,8 +4810,6 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
        skb_dst_drop(skb);
        __skb_pull(skb, tcp_hdr(skb)->doff * 4);
 
-       tcp_ecn_accept_cwr(sk, skb);
-
        tp->rx_opt.dsack = 0;
 
        /*  Queue data for delivery to the user.
index 992cf45..f4f19e8 100644 (file)
@@ -49,29 +49,31 @@ config IPV6_OPTIMISTIC_DAD
 
 config INET6_AH
        tristate "IPv6: AH transformation"
-       select XFRM_ALGO
-       select CRYPTO
-       select CRYPTO_HMAC
-       select CRYPTO_MD5
-       select CRYPTO_SHA1
+       select XFRM_AH
        help
-         Support for IPsec AH.
+         Support for IPsec AH (Authentication Header).
+
+         AH can be used with various authentication algorithms.  Besides
+         enabling AH support itself, this option enables the generic
+         implementations of the algorithms that RFC 8221 lists as MUST be
+         implemented.  If you need any other algorithms, you'll need to enable
+         them in the crypto API.  You should also enable accelerated
+         implementations of any needed algorithms when available.
 
          If unsure, say Y.
 
 config INET6_ESP
        tristate "IPv6: ESP transformation"
-       select XFRM_ALGO
-       select CRYPTO
-       select CRYPTO_AUTHENC
-       select CRYPTO_HMAC
-       select CRYPTO_MD5
-       select CRYPTO_CBC
-       select CRYPTO_SHA1
-       select CRYPTO_DES
-       select CRYPTO_ECHAINIV
+       select XFRM_ESP
        help
-         Support for IPsec ESP.
+         Support for IPsec ESP (Encapsulating Security Payload).
+
+         ESP can be used with various encryption and authentication algorithms.
+         Besides enabling ESP support itself, this option enables the generic
+         implementations of the algorithms that RFC 8221 lists as MUST be
+         implemented.  If you need any other algorithms, you'll need to enable
+         them in the crypto API.  You should also enable accelerated
+         implementations of any needed algorithms when available.
 
          If unsure, say Y.
 
index 55addea..1ca516f 100644 (file)
@@ -395,3 +395,4 @@ module_exit(esp6_offload_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
 MODULE_ALIAS_XFRM_OFFLOAD_TYPE(AF_INET6, XFRM_PROTO_ESP);
+MODULE_DESCRIPTION("IPV6 GSO/GRO offload support");
index 091f941..430518a 100644 (file)
@@ -224,3 +224,4 @@ module_init(fou6_init);
 module_exit(fou6_fini);
 MODULE_AUTHOR("Tom Herbert <therbert@google.com>");
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Foo over UDP (IPv6)");
index 257d2b6..36c58aa 100644 (file)
@@ -120,3 +120,4 @@ module_init(ila_init);
 module_exit(ila_fini);
 MODULE_AUTHOR("Tom Herbert <tom@herbertland.com>");
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("IPv6: Identifier Locator Addressing (ILA)");
index 781ca8c..6532bde 100644 (file)
@@ -127,6 +127,7 @@ static struct ip6_tnl *ip6gre_tunnel_lookup(struct net_device *dev,
                        gre_proto == htons(ETH_P_ERSPAN2)) ?
                       ARPHRD_ETHER : ARPHRD_IP6GRE;
        int score, cand_score = 4;
+       struct net_device *ndev;
 
        for_each_ip_tunnel_rcu(t, ign->tunnels_r_l[h0 ^ h1]) {
                if (!ipv6_addr_equal(local, &t->parms.laddr) ||
@@ -238,9 +239,9 @@ static struct ip6_tnl *ip6gre_tunnel_lookup(struct net_device *dev,
        if (t && t->dev->flags & IFF_UP)
                return t;
 
-       dev = ign->fb_tunnel_dev;
-       if (dev && dev->flags & IFF_UP)
-               return netdev_priv(dev);
+       ndev = READ_ONCE(ign->fb_tunnel_dev);
+       if (ndev && ndev->flags & IFF_UP)
+               return netdev_priv(ndev);
 
        return NULL;
 }
@@ -413,6 +414,8 @@ static void ip6gre_tunnel_uninit(struct net_device *dev)
 
        ip6gre_tunnel_unlink_md(ign, t);
        ip6gre_tunnel_unlink(ign, t);
+       if (ign->fb_tunnel_dev == dev)
+               WRITE_ONCE(ign->fb_tunnel_dev, NULL);
        dst_cache_reset(&t->dst_cache);
        dev_put(dev);
 }
index e273934..e96a431 100644 (file)
@@ -1807,11 +1807,22 @@ out_free:
        return ret;
 }
 
+void ip6t_unregister_table_pre_exit(struct net *net, struct xt_table *table,
+                                   const struct nf_hook_ops *ops)
+{
+       nf_unregister_net_hooks(net, ops, hweight32(table->valid_hooks));
+}
+
+void ip6t_unregister_table_exit(struct net *net, struct xt_table *table)
+{
+       __ip6t_unregister_table(net, table);
+}
+
 void ip6t_unregister_table(struct net *net, struct xt_table *table,
                           const struct nf_hook_ops *ops)
 {
        if (ops)
-               nf_unregister_net_hooks(net, ops, hweight32(table->valid_hooks));
+               ip6t_unregister_table_pre_exit(net, table, ops);
        __ip6t_unregister_table(net, table);
 }
 
@@ -1969,6 +1980,8 @@ static void __exit ip6_tables_fini(void)
 
 EXPORT_SYMBOL(ip6t_register_table);
 EXPORT_SYMBOL(ip6t_unregister_table);
+EXPORT_SYMBOL(ip6t_unregister_table_pre_exit);
+EXPORT_SYMBOL(ip6t_unregister_table_exit);
 EXPORT_SYMBOL(ip6t_do_table);
 
 module_init(ip6_tables_init);
index fd1f52a..d51d0c3 100644 (file)
@@ -121,3 +121,4 @@ module_exit(synproxy_tg6_exit);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_DESCRIPTION("Intercept IPv6 TCP connections and establish them using syncookies");
index 32667f5..88337b5 100644 (file)
@@ -73,16 +73,24 @@ static int __net_init ip6table_filter_net_init(struct net *net)
        return 0;
 }
 
+static void __net_exit ip6table_filter_net_pre_exit(struct net *net)
+{
+       if (net->ipv6.ip6table_filter)
+               ip6t_unregister_table_pre_exit(net, net->ipv6.ip6table_filter,
+                                              filter_ops);
+}
+
 static void __net_exit ip6table_filter_net_exit(struct net *net)
 {
        if (!net->ipv6.ip6table_filter)
                return;
-       ip6t_unregister_table(net, net->ipv6.ip6table_filter, filter_ops);
+       ip6t_unregister_table_exit(net, net->ipv6.ip6table_filter);
        net->ipv6.ip6table_filter = NULL;
 }
 
 static struct pernet_operations ip6table_filter_net_ops = {
        .init = ip6table_filter_net_init,
+       .pre_exit = ip6table_filter_net_pre_exit,
        .exit = ip6table_filter_net_exit,
 };
 
index 070afb9..1a27486 100644 (file)
@@ -93,16 +93,24 @@ static int __net_init ip6table_mangle_table_init(struct net *net)
        return ret;
 }
 
+static void __net_exit ip6table_mangle_net_pre_exit(struct net *net)
+{
+       if (net->ipv6.ip6table_mangle)
+               ip6t_unregister_table_pre_exit(net, net->ipv6.ip6table_mangle,
+                                              mangle_ops);
+}
+
 static void __net_exit ip6table_mangle_net_exit(struct net *net)
 {
        if (!net->ipv6.ip6table_mangle)
                return;
 
-       ip6t_unregister_table(net, net->ipv6.ip6table_mangle, mangle_ops);
+       ip6t_unregister_table_exit(net, net->ipv6.ip6table_mangle);
        net->ipv6.ip6table_mangle = NULL;
 }
 
 static struct pernet_operations ip6table_mangle_net_ops = {
+       .pre_exit = ip6table_mangle_net_pre_exit,
        .exit = ip6table_mangle_net_exit,
 };
 
index 0f48759..0a23265 100644 (file)
@@ -114,16 +114,22 @@ static int __net_init ip6table_nat_table_init(struct net *net)
        return ret;
 }
 
+static void __net_exit ip6table_nat_net_pre_exit(struct net *net)
+{
+       if (net->ipv6.ip6table_nat)
+               ip6t_nat_unregister_lookups(net);
+}
+
 static void __net_exit ip6table_nat_net_exit(struct net *net)
 {
        if (!net->ipv6.ip6table_nat)
                return;
-       ip6t_nat_unregister_lookups(net);
-       ip6t_unregister_table(net, net->ipv6.ip6table_nat, NULL);
+       ip6t_unregister_table_exit(net, net->ipv6.ip6table_nat);
        net->ipv6.ip6table_nat = NULL;
 }
 
 static struct pernet_operations ip6table_nat_net_ops = {
+       .pre_exit = ip6table_nat_net_pre_exit,
        .exit   = ip6table_nat_net_exit,
 };
 
index a22100b..8f9e742 100644 (file)
@@ -66,15 +66,23 @@ static int __net_init ip6table_raw_table_init(struct net *net)
        return ret;
 }
 
+static void __net_exit ip6table_raw_net_pre_exit(struct net *net)
+{
+       if (net->ipv6.ip6table_raw)
+               ip6t_unregister_table_pre_exit(net, net->ipv6.ip6table_raw,
+                                              rawtable_ops);
+}
+
 static void __net_exit ip6table_raw_net_exit(struct net *net)
 {
        if (!net->ipv6.ip6table_raw)
                return;
-       ip6t_unregister_table(net, net->ipv6.ip6table_raw, rawtable_ops);
+       ip6t_unregister_table_exit(net, net->ipv6.ip6table_raw);
        net->ipv6.ip6table_raw = NULL;
 }
 
 static struct pernet_operations ip6table_raw_net_ops = {
+       .pre_exit = ip6table_raw_net_pre_exit,
        .exit = ip6table_raw_net_exit,
 };
 
index a74335f..5e8c48f 100644 (file)
@@ -61,15 +61,23 @@ static int __net_init ip6table_security_table_init(struct net *net)
        return ret;
 }
 
+static void __net_exit ip6table_security_net_pre_exit(struct net *net)
+{
+       if (net->ipv6.ip6table_security)
+               ip6t_unregister_table_pre_exit(net, net->ipv6.ip6table_security,
+                                              sectbl_ops);
+}
+
 static void __net_exit ip6table_security_net_exit(struct net *net)
 {
        if (!net->ipv6.ip6table_security)
                return;
-       ip6t_unregister_table(net, net->ipv6.ip6table_security, sectbl_ops);
+       ip6t_unregister_table_exit(net, net->ipv6.ip6table_security);
        net->ipv6.ip6table_security = NULL;
 }
 
 static struct pernet_operations ip6table_security_net_ops = {
+       .pre_exit = ip6table_security_net_pre_exit,
        .exit = ip6table_security_net_exit,
 };
 
index a8566ee..667b8af 100644 (file)
@@ -35,3 +35,4 @@ module_exit(nf_flow_ipv6_module_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
 MODULE_ALIAS_NF_FLOWTABLE(AF_INET6);
+MODULE_DESCRIPTION("Netfilter flow table IPv6 module");
index 2af3220..8b5193e 100644 (file)
@@ -105,3 +105,4 @@ module_exit(nft_dup_ipv6_module_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
 MODULE_ALIAS_NFT_AF_EXPR(AF_INET6, "dup");
+MODULE_DESCRIPTION("IPv6 nftables packet duplication support");
index 7ece86a..e204163 100644 (file)
@@ -255,3 +255,4 @@ module_exit(nft_fib6_module_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Florian Westphal <fw@strlen.de>");
 MODULE_ALIAS_NFT_AF_EXPR(10, "fib");
+MODULE_DESCRIPTION("nftables fib / ipv6 route lookup support");
index 680a28c..c1098a1 100644 (file)
@@ -72,3 +72,4 @@ module_exit(nft_reject_ipv6_module_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
 MODULE_ALIAS_NFT_AF_EXPR(AF_INET6, "reject");
+MODULE_DESCRIPTION("IPv6 packet rejection for nftables");
index 490b925..df9a514 100644 (file)
@@ -336,9 +336,7 @@ bool mptcp_syn_options(struct sock *sk, const struct sk_buff *skb,
         */
        subflow->snd_isn = TCP_SKB_CB(skb)->end_seq;
        if (subflow->request_mptcp) {
-               pr_debug("local_key=%llu", subflow->local_key);
                opts->suboptions = OPTION_MPTCP_MPC_SYN;
-               opts->sndr_key = subflow->local_key;
                *size = TCPOLEN_MPTCP_MPC_SYN;
                return true;
        } else if (subflow->request_join) {
index db56535..c6eeaf3 100644 (file)
@@ -249,6 +249,7 @@ struct mptcp_subflow_request_sock {
        u64     thmac;
        u32     local_nonce;
        u32     remote_nonce;
+       struct mptcp_sock       *msk;
 };
 
 static inline struct mptcp_subflow_request_sock *
index bbdb74b..3838a0b 100644 (file)
@@ -69,6 +69,9 @@ static void subflow_req_destructor(struct request_sock *req)
 
        pr_debug("subflow_req=%p", subflow_req);
 
+       if (subflow_req->msk)
+               sock_put((struct sock *)subflow_req->msk);
+
        if (subflow_req->mp_capable)
                mptcp_token_destroy_request(subflow_req->token);
        tcp_request_sock_ops.destructor(req);
@@ -86,8 +89,8 @@ static void subflow_generate_hmac(u64 key1, u64 key2, u32 nonce1, u32 nonce2,
 }
 
 /* validate received token and create truncated hmac and nonce for SYN-ACK */
-static bool subflow_token_join_request(struct request_sock *req,
-                                      const struct sk_buff *skb)
+static struct mptcp_sock *subflow_token_join_request(struct request_sock *req,
+                                                    const struct sk_buff *skb)
 {
        struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
        u8 hmac[SHA256_DIGEST_SIZE];
@@ -97,13 +100,13 @@ static bool subflow_token_join_request(struct request_sock *req,
        msk = mptcp_token_get_sock(subflow_req->token);
        if (!msk) {
                SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINNOTOKEN);
-               return false;
+               return NULL;
        }
 
        local_id = mptcp_pm_get_local_id(msk, (struct sock_common *)req);
        if (local_id < 0) {
                sock_put((struct sock *)msk);
-               return false;
+               return NULL;
        }
        subflow_req->local_id = local_id;
 
@@ -114,9 +117,7 @@ static bool subflow_token_join_request(struct request_sock *req,
                              subflow_req->remote_nonce, hmac);
 
        subflow_req->thmac = get_unaligned_be64(hmac);
-
-       sock_put((struct sock *)msk);
-       return true;
+       return msk;
 }
 
 static void subflow_init_req(struct request_sock *req,
@@ -133,6 +134,7 @@ static void subflow_init_req(struct request_sock *req,
 
        subflow_req->mp_capable = 0;
        subflow_req->mp_join = 0;
+       subflow_req->msk = NULL;
 
 #ifdef CONFIG_TCP_MD5SIG
        /* no MPTCP if MD5SIG is enabled on this socket or we may run out of
@@ -166,12 +168,9 @@ static void subflow_init_req(struct request_sock *req,
                subflow_req->remote_id = mp_opt.join_id;
                subflow_req->token = mp_opt.token;
                subflow_req->remote_nonce = mp_opt.nonce;
-               pr_debug("token=%u, remote_nonce=%u", subflow_req->token,
-                        subflow_req->remote_nonce);
-               if (!subflow_token_join_request(req, skb)) {
-                       subflow_req->mp_join = 0;
-                       // @@ need to trigger RST
-               }
+               subflow_req->msk = subflow_token_join_request(req, skb);
+               pr_debug("token=%u, remote_nonce=%u msk=%p", subflow_req->token,
+                        subflow_req->remote_nonce, subflow_req->msk);
        }
 }
 
@@ -354,10 +353,9 @@ static bool subflow_hmac_valid(const struct request_sock *req,
        const struct mptcp_subflow_request_sock *subflow_req;
        u8 hmac[SHA256_DIGEST_SIZE];
        struct mptcp_sock *msk;
-       bool ret;
 
        subflow_req = mptcp_subflow_rsk(req);
-       msk = mptcp_token_get_sock(subflow_req->token);
+       msk = subflow_req->msk;
        if (!msk)
                return false;
 
@@ -365,12 +363,7 @@ static bool subflow_hmac_valid(const struct request_sock *req,
                              subflow_req->remote_nonce,
                              subflow_req->local_nonce, hmac);
 
-       ret = true;
-       if (crypto_memneq(hmac, mp_opt->hmac, MPTCPOPT_HMAC_LEN))
-               ret = false;
-
-       sock_put((struct sock *)msk);
-       return ret;
+       return !crypto_memneq(hmac, mp_opt->hmac, MPTCPOPT_HMAC_LEN);
 }
 
 static void mptcp_sock_destruct(struct sock *sk)
@@ -438,22 +431,25 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk,
        struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk);
        struct mptcp_subflow_request_sock *subflow_req;
        struct mptcp_options_received mp_opt;
-       bool fallback_is_fatal = false;
+       bool fallback, fallback_is_fatal;
        struct sock *new_msk = NULL;
-       bool fallback = false;
        struct sock *child;
 
        pr_debug("listener=%p, req=%p, conn=%p", listener, req, listener->conn);
 
-       /* we need later a valid 'mp_capable' value even when options are not
-        * parsed
+       /* After child creation we must look for 'mp_capable' even when options
+        * are not parsed
         */
        mp_opt.mp_capable = 0;
-       if (tcp_rsk(req)->is_mptcp == 0)
+
+       /* hopefully temporary handling for MP_JOIN+syncookie */
+       subflow_req = mptcp_subflow_rsk(req);
+       fallback_is_fatal = subflow_req->mp_join;
+       fallback = !tcp_rsk(req)->is_mptcp;
+       if (fallback)
                goto create_child;
 
        /* if the sk is MP_CAPABLE, we try to fetch the client key */
-       subflow_req = mptcp_subflow_rsk(req);
        if (subflow_req->mp_capable) {
                if (TCP_SKB_CB(skb)->seq != subflow_req->ssn_offset + 1) {
                        /* here we can receive and accept an in-window,
@@ -474,12 +470,11 @@ create_msk:
                if (!new_msk)
                        fallback = true;
        } else if (subflow_req->mp_join) {
-               fallback_is_fatal = true;
                mptcp_get_options(skb, &mp_opt);
                if (!mp_opt.mp_join ||
                    !subflow_hmac_valid(req, &mp_opt)) {
                        SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKMAC);
-                       return NULL;
+                       fallback = true;
                }
        }
 
@@ -522,10 +517,12 @@ create_child:
                } else if (ctx->mp_join) {
                        struct mptcp_sock *owner;
 
-                       owner = mptcp_token_get_sock(ctx->token);
+                       owner = subflow_req->msk;
                        if (!owner)
                                goto dispose_child;
 
+                       /* move the msk reference ownership to the subflow */
+                       subflow_req->msk = NULL;
                        ctx->conn = (struct sock *)owner;
                        if (!mptcp_finish_join(child))
                                goto dispose_child;
index 340cb95..56621d6 100644 (file)
@@ -460,6 +460,8 @@ ip_set_elem_len(struct ip_set *set, struct nlattr *tb[], size_t len,
        for (id = 0; id < IPSET_EXT_ID_MAX; id++) {
                if (!add_extension(id, cadt_flags, tb))
                        continue;
+               if (align < ip_set_extensions[id].align)
+                       align = ip_set_extensions[id].align;
                len = ALIGN(len, ip_set_extensions[id].align);
                set->offset[id] = len;
                set->extensions |= ip_set_extensions[id].type;
index f108a76..2b01a15 100644 (file)
@@ -73,3 +73,4 @@ EXPORT_SYMBOL_GPL(nft_fwd_dup_netdev_offload);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
+MODULE_DESCRIPTION("Netfilter packet duplication support");
index afa8517..b1eb527 100644 (file)
@@ -594,3 +594,4 @@ module_exit(nf_flow_table_module_exit);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
+MODULE_DESCRIPTION("Netfilter flow table module");
index 88bedf1..bc4126d 100644 (file)
@@ -72,3 +72,4 @@ module_exit(nf_flow_inet_module_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
 MODULE_ALIAS_NF_FLOWTABLE(1); /* NFPROTO_INET */
+MODULE_DESCRIPTION("Netfilter flow table mixed IPv4/IPv6 module");
index 62651e6..5fff1e0 100644 (file)
@@ -950,6 +950,7 @@ static void nf_flow_table_indr_cleanup(struct flow_block_cb *block_cb)
        nf_flow_table_gc_cleanup(flowtable, dev);
        down_write(&flowtable->flow_block_lock);
        list_del(&block_cb->list);
+       list_del(&block_cb->driver_list);
        flow_block_cb_free(block_cb);
        up_write(&flowtable->flow_block_lock);
 }
index b9cbe1e..ebcdc8e 100644 (file)
@@ -1237,3 +1237,4 @@ EXPORT_SYMBOL_GPL(nf_synproxy_ipv6_fini);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_DESCRIPTION("nftables SYNPROXY expression support");
index 185fc82..c7cf1cd 100644 (file)
@@ -296,6 +296,7 @@ static void nft_indr_block_cleanup(struct flow_block_cb *block_cb)
        nft_flow_block_offload_init(&bo, dev_net(dev), FLOW_BLOCK_UNBIND,
                                    basechain, &extack);
        mutex_lock(&net->nft.commit_mutex);
+       list_del(&block_cb->driver_list);
        list_move(&block_cb->list, &bo.cb_list);
        nft_flow_offload_unbind(&bo, basechain);
        mutex_unlock(&net->nft.commit_mutex);
index 99127e2..5f24edf 100644 (file)
@@ -33,6 +33,7 @@
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
 MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_NETFILTER);
+MODULE_DESCRIPTION("Netfilter messages via netlink socket");
 
 #define nfnl_dereference_protected(id) \
        rcu_dereference_protected(table[(id)].subsys, \
index f9adca6..aa1a066 100644 (file)
@@ -902,3 +902,4 @@ MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
 MODULE_ALIAS_NFT_EXPR("match");
 MODULE_ALIAS_NFT_EXPR("target");
+MODULE_DESCRIPTION("x_tables over nftables support");
index 69d6173..7d0761f 100644 (file)
@@ -280,3 +280,4 @@ MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Pablo Neira Ayuso");
 MODULE_ALIAS_NFT_EXPR("connlimit");
 MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_CONNLIMIT);
+MODULE_DESCRIPTION("nftables connlimit rule support");
index f6d4d0f..85ed461 100644 (file)
@@ -303,3 +303,4 @@ MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
 MODULE_ALIAS_NFT_EXPR("counter");
 MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_COUNTER);
+MODULE_DESCRIPTION("nftables counter rule support");
index faea72c..77258af 100644 (file)
@@ -1345,3 +1345,4 @@ MODULE_ALIAS_NFT_EXPR("notrack");
 MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_CT_HELPER);
 MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_CT_TIMEOUT);
 MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_CT_EXPECT);
+MODULE_DESCRIPTION("Netfilter nf_tables conntrack module");
index c2e78c1..40788b3 100644 (file)
@@ -102,3 +102,4 @@ module_exit(nft_dup_netdev_module_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
 MODULE_ALIAS_NFT_AF_EXPR(5, "dup");
+MODULE_DESCRIPTION("nftables netdev packet duplication support");
index 465432e..a88d44e 100644 (file)
@@ -76,3 +76,4 @@ module_exit(nft_fib_inet_module_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Florian Westphal <fw@strlen.de>");
 MODULE_ALIAS_NFT_AF_EXPR(1, "fib");
+MODULE_DESCRIPTION("nftables fib inet support");
index a2e726a..3f3478a 100644 (file)
@@ -85,3 +85,4 @@ module_exit(nft_fib_netdev_module_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Pablo M. Bermudo Garay <pablombg@gmail.com>");
 MODULE_ALIAS_NFT_AF_EXPR(5, "fib");
+MODULE_DESCRIPTION("nftables netdev fib lookups support");
index b70b489..3b9b97a 100644 (file)
@@ -286,3 +286,4 @@ module_exit(nft_flow_offload_module_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
 MODULE_ALIAS_NFT_EXPR("flow_offload");
+MODULE_DESCRIPTION("nftables hardware flow offload module");
index b836d55..96371d8 100644 (file)
@@ -248,3 +248,4 @@ module_exit(nft_hash_module_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Laura Garcia <nevola@gmail.com>");
 MODULE_ALIAS_NFT_EXPR("hash");
+MODULE_DESCRIPTION("Netfilter nftables hash module");
index 35b67d7..0e2c315 100644 (file)
@@ -372,3 +372,4 @@ MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
 MODULE_ALIAS_NFT_EXPR("limit");
 MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_LIMIT);
+MODULE_DESCRIPTION("nftables limit expression support");
index fe4831f..5789945 100644 (file)
@@ -298,3 +298,4 @@ module_exit(nft_log_module_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
 MODULE_ALIAS_NFT_EXPR("log");
+MODULE_DESCRIPTION("Netfilter nf_tables log module");
index bc9fd98..71390b7 100644 (file)
@@ -305,3 +305,4 @@ module_exit(nft_masq_module_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Arturo Borrero Gonzalez <arturo@debian.org>");
 MODULE_ALIAS_NFT_EXPR("masq");
+MODULE_DESCRIPTION("Netfilter nftables masquerade expression support");
index 23a7bfd..4bcf33b 100644 (file)
@@ -402,3 +402,4 @@ module_exit(nft_nat_module_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Tomasz Bursztyka <tomasz.bursztyka@linux.intel.com>");
 MODULE_ALIAS_NFT_EXPR("nat");
+MODULE_DESCRIPTION("Network Address Translation support");
index 48edb9d..f1fc824 100644 (file)
@@ -217,3 +217,4 @@ module_exit(nft_ng_module_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Laura Garcia <nevola@gmail.com>");
 MODULE_ALIAS_NFT_EXPR("numgen");
+MODULE_DESCRIPTION("nftables number generator module");
index bfd18d2..5f9207a 100644 (file)
@@ -252,3 +252,4 @@ module_exit(nft_objref_module_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
 MODULE_ALIAS_NFT_EXPR("objref");
+MODULE_DESCRIPTION("nftables stateful object reference module");
index b42247a..c261d57 100644 (file)
@@ -149,3 +149,4 @@ module_exit(nft_osf_module_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Fernando Fernandez <ffmancera@riseup.net>");
 MODULE_ALIAS_NFT_EXPR("osf");
+MODULE_DESCRIPTION("nftables passive OS fingerprint support");
index 5ece0a6..23265d7 100644 (file)
@@ -216,3 +216,4 @@ module_exit(nft_queue_module_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Eric Leblond <eric@regit.org>");
 MODULE_ALIAS_NFT_EXPR("queue");
+MODULE_DESCRIPTION("Netfilter nftables queue module");
index 4413690..0363f53 100644 (file)
@@ -254,3 +254,4 @@ MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
 MODULE_ALIAS_NFT_EXPR("quota");
 MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_QUOTA);
+MODULE_DESCRIPTION("Netfilter nftables quota module");
index 5b77917..2056051 100644 (file)
@@ -292,3 +292,4 @@ module_exit(nft_redir_module_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Arturo Borrero Gonzalez <arturo@debian.org>");
 MODULE_ALIAS_NFT_EXPR("redir");
+MODULE_DESCRIPTION("Netfilter nftables redirect support");
index 00f865f..86eafbb 100644 (file)
@@ -119,3 +119,4 @@ EXPORT_SYMBOL_GPL(nft_reject_icmpv6_code);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_DESCRIPTION("Netfilter x_tables over nftables module");
index f41f414..cf8f264 100644 (file)
@@ -149,3 +149,4 @@ module_exit(nft_reject_inet_module_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
 MODULE_ALIAS_NFT_AF_EXPR(1, "reject");
+MODULE_DESCRIPTION("Netfilter nftables reject inet support");
index e2c1fc6..4fda8b3 100644 (file)
@@ -388,3 +388,4 @@ MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Fernando Fernandez <ffmancera@riseup.net>");
 MODULE_ALIAS_NFT_EXPR("synproxy");
 MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_SYNPROXY);
+MODULE_DESCRIPTION("nftables SYNPROXY expression support");
index 30be578..d3eb953 100644 (file)
@@ -719,3 +719,4 @@ MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
 MODULE_ALIAS_NFT_EXPR("tunnel");
 MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_TUNNEL);
+MODULE_DESCRIPTION("nftables tunnel expression support");
index a8e5f6c..b4f7bbc 100644 (file)
@@ -244,3 +244,4 @@ MODULE_ALIAS("ipt_SNAT");
 MODULE_ALIAS("ipt_DNAT");
 MODULE_ALIAS("ip6t_SNAT");
 MODULE_ALIAS("ip6t_DNAT");
+MODULE_DESCRIPTION("SNAT and DNAT targets support");
index fc0efd8..2611657 100644 (file)
@@ -1169,9 +1169,10 @@ static int execute_check_pkt_len(struct datapath *dp, struct sk_buff *skb,
                                 struct sw_flow_key *key,
                                 const struct nlattr *attr, bool last)
 {
+       struct ovs_skb_cb *ovs_cb = OVS_CB(skb);
        const struct nlattr *actions, *cpl_arg;
+       int len, max_len, rem = nla_len(attr);
        const struct check_pkt_len_arg *arg;
-       int rem = nla_len(attr);
        bool clone_flow_key;
 
        /* The first netlink attribute in 'attr' is always
@@ -1180,7 +1181,11 @@ static int execute_check_pkt_len(struct datapath *dp, struct sk_buff *skb,
        cpl_arg = nla_data(attr);
        arg = nla_data(cpl_arg);
 
-       if (skb->len <= arg->pkt_len) {
+       len = ovs_cb->mru ? ovs_cb->mru + skb->mac_len : skb->len;
+       max_len = arg->pkt_len;
+
+       if ((skb_is_gso(skb) && skb_gso_validate_mac_len(skb, max_len)) ||
+           len <= max_len) {
                /* Second netlink attribute in 'attr' is always
                 * 'OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_LESS_EQUAL'.
                 */
index 46f709a..f8001ec 100644 (file)
 #include "rds.h"
 #include "loop.h"
 
+static char * const rds_trans_modules[] = {
+       [RDS_TRANS_IB] = "rds_rdma",
+       [RDS_TRANS_GAP] = NULL,
+       [RDS_TRANS_TCP] = "rds_tcp",
+};
+
 static struct rds_transport *transports[RDS_TRANS_COUNT];
 static DECLARE_RWSEM(rds_trans_sem);
 
@@ -110,18 +116,20 @@ struct rds_transport *rds_trans_get(int t_type)
 {
        struct rds_transport *ret = NULL;
        struct rds_transport *trans;
-       unsigned int i;
 
        down_read(&rds_trans_sem);
-       for (i = 0; i < RDS_TRANS_COUNT; i++) {
-               trans = transports[i];
-
-               if (trans && trans->t_type == t_type &&
-                   (!trans->t_owner || try_module_get(trans->t_owner))) {
-                       ret = trans;
-                       break;
-               }
+       trans = transports[t_type];
+       if (!trans) {
+               up_read(&rds_trans_sem);
+               if (rds_trans_modules[t_type])
+                       request_module(rds_trans_modules[t_type]);
+               down_read(&rds_trans_sem);
+               trans = transports[t_type];
        }
+       if (trans && trans->t_type == t_type &&
+           (!trans->t_owner || try_module_get(trans->t_owner)))
+               ret = trans;
+
        up_read(&rds_trans_sem);
 
        return ret;
index b7611cc..032ed76 100644 (file)
 #include <net/ip.h>
 #include "ar-internal.h"
 
+static void rxrpc_dummy_notify(struct sock *sk, struct rxrpc_call *call,
+                              unsigned long user_call_ID)
+{
+}
+
 /*
  * Preallocate a single service call, connection and peer and, if possible,
  * give them a user ID and attach the user's side of the ID to them.
@@ -228,6 +233,8 @@ void rxrpc_discard_prealloc(struct rxrpc_sock *rx)
                if (rx->discard_new_call) {
                        _debug("discard %lx", call->user_call_ID);
                        rx->discard_new_call(call, call->user_call_ID);
+                       if (call->notify_rx)
+                               call->notify_rx = rxrpc_dummy_notify;
                        rxrpc_put_call(call, rxrpc_call_put_kernel);
                }
                rxrpc_call_completed(call);
index aa1c8ee..6be2672 100644 (file)
@@ -253,7 +253,7 @@ static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j)
                 * confuse things
                 */
                annotation &= ~RXRPC_TX_ANNO_MASK;
-               annotation |= RXRPC_TX_ANNO_RESENT;
+               annotation |= RXRPC_TX_ANNO_UNACK | RXRPC_TX_ANNO_RESENT;
                call->rxtx_annotations[ix] = annotation;
 
                skb = call->rxtx_buffer[ix];
index 299ac98..7675793 100644 (file)
@@ -722,13 +722,12 @@ static void rxrpc_input_ackinfo(struct rxrpc_call *call, struct sk_buff *skb,
               ntohl(ackinfo->rxMTU), ntohl(ackinfo->maxMTU),
               rwind, ntohl(ackinfo->jumbo_max));
 
+       if (rwind > RXRPC_RXTX_BUFF_SIZE - 1)
+               rwind = RXRPC_RXTX_BUFF_SIZE - 1;
        if (call->tx_winsize != rwind) {
-               if (rwind > RXRPC_RXTX_BUFF_SIZE - 1)
-                       rwind = RXRPC_RXTX_BUFF_SIZE - 1;
                if (rwind > call->tx_winsize)
                        wake = true;
-               trace_rxrpc_rx_rwind_change(call, sp->hdr.serial,
-                                           ntohl(ackinfo->rwind), wake);
+               trace_rxrpc_rx_rwind_change(call, sp->hdr.serial, rwind, wake);
                call->tx_winsize = rwind;
        }
 
index 9c62859..323ae7f 100644 (file)
@@ -32,7 +32,7 @@ static ktime_t gate_get_time(struct tcf_gate *gact)
        return KTIME_MAX;
 }
 
-static int gate_get_start_time(struct tcf_gate *gact, ktime_t *start)
+static void gate_get_start_time(struct tcf_gate *gact, ktime_t *start)
 {
        struct tcf_gate_params *param = &gact->param;
        ktime_t now, base, cycle;
@@ -43,18 +43,13 @@ static int gate_get_start_time(struct tcf_gate *gact, ktime_t *start)
 
        if (ktime_after(base, now)) {
                *start = base;
-               return 0;
+               return;
        }
 
        cycle = param->tcfg_cycletime;
 
-       /* cycle time should not be zero */
-       if (!cycle)
-               return -EFAULT;
-
        n = div64_u64(ktime_sub_ns(now, base), cycle);
        *start = ktime_add_ns(base, (n + 1) * cycle);
-       return 0;
 }
 
 static void gate_start_timer(struct tcf_gate *gact, ktime_t start)
@@ -277,6 +272,27 @@ release_list:
        return err;
 }
 
+static void gate_setup_timer(struct tcf_gate *gact, u64 basetime,
+                            enum tk_offsets tko, s32 clockid,
+                            bool do_init)
+{
+       if (!do_init) {
+               if (basetime == gact->param.tcfg_basetime &&
+                   tko == gact->tk_offset &&
+                   clockid == gact->param.tcfg_clockid)
+                       return;
+
+               spin_unlock_bh(&gact->tcf_lock);
+               hrtimer_cancel(&gact->hitimer);
+               spin_lock_bh(&gact->tcf_lock);
+       }
+       gact->param.tcfg_basetime = basetime;
+       gact->param.tcfg_clockid = clockid;
+       gact->tk_offset = tko;
+       hrtimer_init(&gact->hitimer, clockid, HRTIMER_MODE_ABS_SOFT);
+       gact->hitimer.function = gate_timer_func;
+}
+
 static int tcf_gate_init(struct net *net, struct nlattr *nla,
                         struct nlattr *est, struct tc_action **a,
                         int ovr, int bind, bool rtnl_held,
@@ -287,12 +303,12 @@ static int tcf_gate_init(struct net *net, struct nlattr *nla,
        enum tk_offsets tk_offset = TK_OFFS_TAI;
        struct nlattr *tb[TCA_GATE_MAX + 1];
        struct tcf_chain *goto_ch = NULL;
+       u64 cycletime = 0, basetime = 0;
        struct tcf_gate_params *p;
        s32 clockid = CLOCK_TAI;
        struct tcf_gate *gact;
        struct tc_gate *parm;
        int ret = 0, err;
-       u64 basetime = 0;
        u32 gflags = 0;
        s32 prio = -1;
        ktime_t start;
@@ -308,6 +324,27 @@ static int tcf_gate_init(struct net *net, struct nlattr *nla,
        if (!tb[TCA_GATE_PARMS])
                return -EINVAL;
 
+       if (tb[TCA_GATE_CLOCKID]) {
+               clockid = nla_get_s32(tb[TCA_GATE_CLOCKID]);
+               switch (clockid) {
+               case CLOCK_REALTIME:
+                       tk_offset = TK_OFFS_REAL;
+                       break;
+               case CLOCK_MONOTONIC:
+                       tk_offset = TK_OFFS_MAX;
+                       break;
+               case CLOCK_BOOTTIME:
+                       tk_offset = TK_OFFS_BOOT;
+                       break;
+               case CLOCK_TAI:
+                       tk_offset = TK_OFFS_TAI;
+                       break;
+               default:
+                       NL_SET_ERR_MSG(extack, "Invalid 'clockid'");
+                       return -EINVAL;
+               }
+       }
+
        parm = nla_data(tb[TCA_GATE_PARMS]);
        index = parm->index;
 
@@ -331,10 +368,6 @@ static int tcf_gate_init(struct net *net, struct nlattr *nla,
                tcf_idr_release(*a, bind);
                return -EEXIST;
        }
-       if (ret == ACT_P_CREATED) {
-               to_gate(*a)->param.tcfg_clockid = -1;
-               INIT_LIST_HEAD(&(to_gate(*a)->param.entries));
-       }
 
        if (tb[TCA_GATE_PRIORITY])
                prio = nla_get_s32(tb[TCA_GATE_PRIORITY]);
@@ -345,41 +378,19 @@ static int tcf_gate_init(struct net *net, struct nlattr *nla,
        if (tb[TCA_GATE_FLAGS])
                gflags = nla_get_u32(tb[TCA_GATE_FLAGS]);
 
-       if (tb[TCA_GATE_CLOCKID]) {
-               clockid = nla_get_s32(tb[TCA_GATE_CLOCKID]);
-               switch (clockid) {
-               case CLOCK_REALTIME:
-                       tk_offset = TK_OFFS_REAL;
-                       break;
-               case CLOCK_MONOTONIC:
-                       tk_offset = TK_OFFS_MAX;
-                       break;
-               case CLOCK_BOOTTIME:
-                       tk_offset = TK_OFFS_BOOT;
-                       break;
-               case CLOCK_TAI:
-                       tk_offset = TK_OFFS_TAI;
-                       break;
-               default:
-                       NL_SET_ERR_MSG(extack, "Invalid 'clockid'");
-                       goto release_idr;
-               }
-       }
+       gact = to_gate(*a);
+       if (ret == ACT_P_CREATED)
+               INIT_LIST_HEAD(&gact->param.entries);
 
        err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
        if (err < 0)
                goto release_idr;
 
-       gact = to_gate(*a);
-
        spin_lock_bh(&gact->tcf_lock);
        p = &gact->param;
 
-       if (tb[TCA_GATE_CYCLE_TIME]) {
-               p->tcfg_cycletime = nla_get_u64(tb[TCA_GATE_CYCLE_TIME]);
-               if (!p->tcfg_cycletime_ext)
-                       goto chain_put;
-       }
+       if (tb[TCA_GATE_CYCLE_TIME])
+               cycletime = nla_get_u64(tb[TCA_GATE_CYCLE_TIME]);
 
        if (tb[TCA_GATE_ENTRY_LIST]) {
                err = parse_gate_list(tb[TCA_GATE_ENTRY_LIST], p, extack);
@@ -387,35 +398,29 @@ static int tcf_gate_init(struct net *net, struct nlattr *nla,
                        goto chain_put;
        }
 
-       if (!p->tcfg_cycletime) {
+       if (!cycletime) {
                struct tcfg_gate_entry *entry;
                ktime_t cycle = 0;
 
                list_for_each_entry(entry, &p->entries, list)
                        cycle = ktime_add_ns(cycle, entry->interval);
-               p->tcfg_cycletime = cycle;
+               cycletime = cycle;
+               if (!cycletime) {
+                       err = -EINVAL;
+                       goto chain_put;
+               }
        }
+       p->tcfg_cycletime = cycletime;
 
        if (tb[TCA_GATE_CYCLE_TIME_EXT])
                p->tcfg_cycletime_ext =
                        nla_get_u64(tb[TCA_GATE_CYCLE_TIME_EXT]);
 
+       gate_setup_timer(gact, basetime, tk_offset, clockid,
+                        ret == ACT_P_CREATED);
        p->tcfg_priority = prio;
-       p->tcfg_basetime = basetime;
-       p->tcfg_clockid = clockid;
        p->tcfg_flags = gflags;
-
-       gact->tk_offset = tk_offset;
-       hrtimer_init(&gact->hitimer, clockid, HRTIMER_MODE_ABS_SOFT);
-       gact->hitimer.function = gate_timer_func;
-
-       err = gate_get_start_time(gact, &start);
-       if (err < 0) {
-               NL_SET_ERR_MSG(extack,
-                              "Internal error: failed get start time");
-               release_entry_list(&p->entries);
-               goto chain_put;
-       }
+       gate_get_start_time(gact, &start);
 
        gact->current_close_time = start;
        gact->current_gate_status = GATE_ACT_GATE_OPEN | GATE_ACT_PENDING;
@@ -443,6 +448,13 @@ chain_put:
        if (goto_ch)
                tcf_chain_put_by_act(goto_ch);
 release_idr:
+       /* action is not inserted in any list: it's safe to init hitimer
+        * without taking tcf_lock.
+        */
+       if (ret == ACT_P_CREATED)
+               gate_setup_timer(gact, gact->param.tcfg_basetime,
+                                gact->tk_offset, gact->param.tcfg_clockid,
+                                true);
        tcf_idr_release(*a, bind);
        return err;
 }
@@ -453,9 +465,7 @@ static void tcf_gate_cleanup(struct tc_action *a)
        struct tcf_gate_params *p;
 
        p = &gact->param;
-       if (p->tcfg_clockid != -1)
-               hrtimer_cancel(&gact->hitimer);
-
+       hrtimer_cancel(&gact->hitimer);
        release_entry_list(&p->entries);
 }
 
index a00a203..faa78b7 100644 (file)
@@ -652,6 +652,7 @@ static void tc_block_indr_cleanup(struct flow_block_cb *block_cb)
                               &block->flow_block, tcf_block_shared(block),
                               &extack);
        down_write(&block->cb_lock);
+       list_del(&block_cb->driver_list);
        list_move(&block_cb->list, &bo.cb_list);
        up_write(&block->cb_lock);
        rtnl_lock();
@@ -671,25 +672,29 @@ static int tcf_block_offload_cmd(struct tcf_block *block,
                                 struct netlink_ext_ack *extack)
 {
        struct flow_block_offload bo = {};
-       int err;
 
        tcf_block_offload_init(&bo, dev, command, ei->binder_type,
                               &block->flow_block, tcf_block_shared(block),
                               extack);
 
-       if (dev->netdev_ops->ndo_setup_tc)
+       if (dev->netdev_ops->ndo_setup_tc) {
+               int err;
+
                err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
-       else
-               err = flow_indr_dev_setup_offload(dev, TC_SETUP_BLOCK, block,
-                                                 &bo, tc_block_indr_cleanup);
+               if (err < 0) {
+                       if (err != -EOPNOTSUPP)
+                               NL_SET_ERR_MSG(extack, "Driver ndo_setup_tc failed");
+                       return err;
+               }
 
-       if (err < 0) {
-               if (err != -EOPNOTSUPP)
-                       NL_SET_ERR_MSG(extack, "Driver ndo_setup_tc failed");
-               return err;
+               return tcf_block_setup(block, &bo);
        }
 
-       return tcf_block_setup(block, &bo);
+       flow_indr_dev_setup_offload(dev, TC_SETUP_BLOCK, block, &bo,
+                                   tc_block_indr_cleanup);
+       tcf_block_setup(block, &bo);
+
+       return -EOPNOTSUPP;
 }
 
 static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q,
index 60f8ae5..ca81369 100644 (file)
@@ -1551,32 +1551,51 @@ static unsigned int cake_drop(struct Qdisc *sch, struct sk_buff **to_free)
        return idx + (tin << 16);
 }
 
-static u8 cake_handle_diffserv(struct sk_buff *skb, u16 wash)
+static u8 cake_handle_diffserv(struct sk_buff *skb, bool wash)
 {
-       int wlen = skb_network_offset(skb);
+       const int offset = skb_network_offset(skb);
+       u16 *buf, buf_;
        u8 dscp;
 
        switch (tc_skb_protocol(skb)) {
        case htons(ETH_P_IP):
-               wlen += sizeof(struct iphdr);
-               if (!pskb_may_pull(skb, wlen) ||
-                   skb_try_make_writable(skb, wlen))
+               buf = skb_header_pointer(skb, offset, sizeof(buf_), &buf_);
+               if (unlikely(!buf))
                        return 0;
 
-               dscp = ipv4_get_dsfield(ip_hdr(skb)) >> 2;
-               if (wash && dscp)
+               /* ToS is in the second byte of iphdr */
+               dscp = ipv4_get_dsfield((struct iphdr *)buf) >> 2;
+
+               if (wash && dscp) {
+                       const int wlen = offset + sizeof(struct iphdr);
+
+                       if (!pskb_may_pull(skb, wlen) ||
+                           skb_try_make_writable(skb, wlen))
+                               return 0;
+
                        ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, 0);
+               }
+
                return dscp;
 
        case htons(ETH_P_IPV6):
-               wlen += sizeof(struct ipv6hdr);
-               if (!pskb_may_pull(skb, wlen) ||
-                   skb_try_make_writable(skb, wlen))
+               buf = skb_header_pointer(skb, offset, sizeof(buf_), &buf_);
+               if (unlikely(!buf))
                        return 0;
 
-               dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2;
-               if (wash && dscp)
+               /* Traffic class is in the first and second bytes of ipv6hdr */
+               dscp = ipv6_get_dsfield((struct ipv6hdr *)buf) >> 2;
+
+               if (wash && dscp) {
+                       const int wlen = offset + sizeof(struct ipv6hdr);
+
+                       if (!pskb_may_pull(skb, wlen) ||
+                           skb_try_make_writable(skb, wlen))
+                               return 0;
+
                        ipv6_change_dsfield(ipv6_hdr(skb), INET_ECN_MASK, 0);
+               }
+
                return dscp;
 
        case htons(ETH_P_ARP):
@@ -1593,14 +1612,17 @@ static struct cake_tin_data *cake_select_tin(struct Qdisc *sch,
 {
        struct cake_sched_data *q = qdisc_priv(sch);
        u32 tin, mark;
+       bool wash;
        u8 dscp;
 
        /* Tin selection: Default to diffserv-based selection, allow overriding
-        * using firewall marks or skb->priority.
+        * using firewall marks or skb->priority. Call DSCP parsing early if
+        * wash is enabled, otherwise defer to below to skip unneeded parsing.
         */
-       dscp = cake_handle_diffserv(skb,
-                                   q->rate_flags & CAKE_FLAG_WASH);
        mark = (skb->mark & q->fwmark_mask) >> q->fwmark_shft;
+       wash = !!(q->rate_flags & CAKE_FLAG_WASH);
+       if (wash)
+               dscp = cake_handle_diffserv(skb, wash);
 
        if (q->tin_mode == CAKE_DIFFSERV_BESTEFFORT)
                tin = 0;
@@ -1614,6 +1636,8 @@ static struct cake_tin_data *cake_select_tin(struct Qdisc *sch,
                tin = q->tin_order[TC_H_MIN(skb->priority) - 1];
 
        else {
+               if (!wash)
+                       dscp = cake_handle_diffserv(skb, wash);
                tin = q->tin_index[dscp];
 
                if (unlikely(tin >= q->tin_cnt))
@@ -2691,7 +2715,7 @@ static int cake_init(struct Qdisc *sch, struct nlattr *opt,
        qdisc_watchdog_init(&q->watchdog, sch);
 
        if (opt) {
-               int err = cake_change(sch, opt, extack);
+               err = cake_change(sch, opt, extack);
 
                if (err)
                        return err;
@@ -3008,7 +3032,7 @@ static int cake_dump_class_stats(struct Qdisc *sch, unsigned long cl,
                        PUT_STAT_S32(BLUE_TIMER_US,
                                     ktime_to_us(
                                             ktime_sub(now,
-                                                    flow->cvars.blue_timer)));
+                                                      flow->cvars.blue_timer)));
                }
                if (flow->cvars.dropping) {
                        PUT_STAT_S32(DROP_NEXT_US,
index 8f06a80..2fb76fc 100644 (file)
@@ -1075,3 +1075,4 @@ module_init(fq_module_init)
 module_exit(fq_module_exit)
 MODULE_AUTHOR("Eric Dumazet");
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Fair Queue Packet Scheduler");
index 436160b..459a784 100644 (file)
@@ -721,3 +721,4 @@ module_init(fq_codel_module_init)
 module_exit(fq_codel_module_exit)
 MODULE_AUTHOR("Eric Dumazet");
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Fair Queue CoDel discipline");
index be35f03..420ede8 100644 (file)
@@ -721,3 +721,4 @@ module_exit(hhf_module_exit)
 MODULE_AUTHOR("Terry Lam");
 MODULE_AUTHOR("Nandita Dukkipati");
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Heavy-Hitter Filter (HHF)");
index 7231513..8d73546 100644 (file)
@@ -1565,12 +1565,15 @@ void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned int len)
 int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *asoc,
                                     enum sctp_scope scope, gfp_t gfp)
 {
+       struct sock *sk = asoc->base.sk;
        int flags;
 
        /* Use scoping rules to determine the subset of addresses from
         * the endpoint.
         */
-       flags = (PF_INET6 == asoc->base.sk->sk_family) ? SCTP_ADDR6_ALLOWED : 0;
+       flags = (PF_INET6 == sk->sk_family) ? SCTP_ADDR6_ALLOWED : 0;
+       if (!inet_v6_ipv6only(sk))
+               flags |= SCTP_ADDR4_ALLOWED;
        if (asoc->peer.ipv4_address)
                flags |= SCTP_ADDR4_PEERSUPP;
        if (asoc->peer.ipv6_address)
index 53bc615..701c5a4 100644 (file)
@@ -461,6 +461,7 @@ static int sctp_copy_one_addr(struct net *net, struct sctp_bind_addr *dest,
                 * well as the remote peer.
                 */
                if ((((AF_INET == addr->sa.sa_family) &&
+                     (flags & SCTP_ADDR4_ALLOWED) &&
                      (flags & SCTP_ADDR4_PEERSUPP))) ||
                    (((AF_INET6 == addr->sa.sa_family) &&
                      (flags & SCTP_ADDR6_ALLOWED) &&
index 092d1af..cde29f3 100644 (file)
@@ -148,7 +148,8 @@ int sctp_copy_local_addr_list(struct net *net, struct sctp_bind_addr *bp,
                 * sock as well as the remote peer.
                 */
                if (addr->a.sa.sa_family == AF_INET &&
-                   !(copy_flags & SCTP_ADDR4_PEERSUPP))
+                   (!(copy_flags & SCTP_ADDR4_ALLOWED) ||
+                    !(copy_flags & SCTP_ADDR4_PEERSUPP)))
                        continue;
                if (addr->a.sa.sa_family == AF_INET6 &&
                    (!(copy_flags & SCTP_ADDR6_ALLOWED) ||
index b5d4a1e..5b9a5ab 100644 (file)
@@ -67,6 +67,30 @@ config XFRM_STATISTICS
 
          If unsure, say N.
 
+# This option selects XFRM_ALGO along with the AH authentication algorithms that
+# RFC 8221 lists as MUST be implemented.
+config XFRM_AH
+       tristate
+       select XFRM_ALGO
+       select CRYPTO
+       select CRYPTO_HMAC
+       select CRYPTO_SHA256
+
+# This option selects XFRM_ALGO along with the ESP encryption and authentication
+# algorithms that RFC 8221 lists as MUST be implemented.
+config XFRM_ESP
+       tristate
+       select XFRM_ALGO
+       select CRYPTO
+       select CRYPTO_AES
+       select CRYPTO_AUTHENC
+       select CRYPTO_CBC
+       select CRYPTO_ECHAINIV
+       select CRYPTO_GCM
+       select CRYPTO_HMAC
+       select CRYPTO_SEQIV
+       select CRYPTO_SHA256
+
 config XFRM_IPCOMP
        tristate
        select XFRM_ALGO
index f50d1f9..626096b 100644 (file)
@@ -108,7 +108,7 @@ struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t featur
        struct xfrm_offload *xo = xfrm_offload(skb);
        struct sec_path *sp;
 
-       if (!xo)
+       if (!xo || (xo->flags & XFRM_XMIT))
                return skb;
 
        if (!(features & NETIF_F_HW_ESP))
@@ -129,6 +129,8 @@ struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t featur
                return skb;
        }
 
+       xo->flags |= XFRM_XMIT;
+
        if (skb_is_gso(skb)) {
                struct net_device *dev = skb->dev;
 
index e4c23f6..a7ab193 100644 (file)
@@ -574,16 +574,12 @@ int xfrm_output(struct sock *sk, struct sk_buff *skb)
        switch (x->outer_mode.family) {
        case AF_INET:
                memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
-#ifdef CONFIG_NETFILTER
                IPCB(skb)->flags |= IPSKB_XFRM_TRANSFORMED;
-#endif
                break;
        case AF_INET6:
                memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
 
-#ifdef CONFIG_NETFILTER
                IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED;
-#endif
                break;
        }
 
index dd558cb..ef53b93 100644 (file)
@@ -509,11 +509,8 @@ static void *alloc_rec_per_cpu(int record_size)
 {
        unsigned int nr_cpus = bpf_num_possible_cpus();
        void *array;
-       size_t size;
 
-       size = record_size * nr_cpus;
-       array = malloc(size);
-       memset(array, 0, size);
+       array = calloc(nr_cpus, record_size);
        if (!array) {
                fprintf(stderr, "Mem alloc error (nr_cpus:%u)\n", nr_cpus);
                exit(EXIT_FAIL_MEM);
@@ -528,8 +525,7 @@ static struct stats_record *alloc_stats_record(void)
        int i;
 
        /* Alloc main stats_record structure */
-       rec = malloc(sizeof(*rec));
-       memset(rec, 0, sizeof(*rec));
+       rec = calloc(1, sizeof(*rec));
        if (!rec) {
                fprintf(stderr, "Mem alloc error\n");
                exit(EXIT_FAIL_MEM);
index f346816..f4e755e 100644 (file)
@@ -207,11 +207,8 @@ static struct datarec *alloc_record_per_cpu(void)
 {
        unsigned int nr_cpus = bpf_num_possible_cpus();
        struct datarec *array;
-       size_t size;
 
-       size = sizeof(struct datarec) * nr_cpus;
-       array = malloc(size);
-       memset(array, 0, size);
+       array = calloc(nr_cpus, sizeof(struct datarec));
        if (!array) {
                fprintf(stderr, "Mem alloc error (nr_cpus:%u)\n", nr_cpus);
                exit(EXIT_FAIL_MEM);
@@ -226,11 +223,11 @@ static struct stats_record *alloc_stats_record(void)
 
        size = sizeof(*rec) + n_cpus * sizeof(struct record);
        rec = malloc(size);
-       memset(rec, 0, size);
        if (!rec) {
                fprintf(stderr, "Mem alloc error\n");
                exit(EXIT_FAIL_MEM);
        }
+       memset(rec, 0, size);
        rec->rx_cnt.cpu    = alloc_record_per_cpu();
        rec->redir_err.cpu = alloc_record_per_cpu();
        rec->kthread.cpu   = alloc_record_per_cpu();
index 4fe4750..caa4e7f 100644 (file)
@@ -198,11 +198,8 @@ static struct datarec *alloc_record_per_cpu(void)
 {
        unsigned int nr_cpus = bpf_num_possible_cpus();
        struct datarec *array;
-       size_t size;
 
-       size = sizeof(struct datarec) * nr_cpus;
-       array = malloc(size);
-       memset(array, 0, size);
+       array = calloc(nr_cpus, sizeof(struct datarec));
        if (!array) {
                fprintf(stderr, "Mem alloc error (nr_cpus:%u)\n", nr_cpus);
                exit(EXIT_FAIL_MEM);
@@ -214,11 +211,8 @@ static struct record *alloc_record_per_rxq(void)
 {
        unsigned int nr_rxqs = bpf_map__def(rx_queue_index_map)->max_entries;
        struct record *array;
-       size_t size;
 
-       size = sizeof(struct record) * nr_rxqs;
-       array = malloc(size);
-       memset(array, 0, size);
+       array = calloc(nr_rxqs, sizeof(struct record));
        if (!array) {
                fprintf(stderr, "Mem alloc error (nr_rxqs:%u)\n", nr_rxqs);
                exit(EXIT_FAIL_MEM);
@@ -232,8 +226,7 @@ static struct stats_record *alloc_stats_record(void)
        struct stats_record *rec;
        int i;
 
-       rec = malloc(sizeof(*rec));
-       memset(rec, 0, sizeof(*rec));
+       rec = calloc(1, sizeof(struct stats_record));
        if (!rec) {
                fprintf(stderr, "Mem alloc error\n");
                exit(EXIT_FAIL_MEM);
index 20b8f6c..99aec73 100644 (file)
@@ -208,8 +208,8 @@ static const struct config_entry config_table[] = {
        },
 #endif
 
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_COMETLAKE)
 /* Cometlake-LP */
-#if IS_ENABLED(CONFIG_SND_SOC_SOF_COMETLAKE_LP)
        {
                .flags = FLAG_SOF,
                .device = 0x02c8,
@@ -240,9 +240,7 @@ static const struct config_entry config_table[] = {
                .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
                .device = 0x02c8,
        },
-#endif
 /* Cometlake-H */
-#if IS_ENABLED(CONFIG_SND_SOC_SOF_COMETLAKE_H)
        {
                .flags = FLAG_SOF,
                .device = 0x06c8,
index d20aedd..3565e2a 100644 (file)
@@ -2470,6 +2470,9 @@ static const struct pci_device_id azx_ids[] = {
        /* Icelake */
        { PCI_DEVICE(0x8086, 0x34c8),
          .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
+       /* Icelake-H */
+       { PCI_DEVICE(0x8086, 0x3dc8),
+         .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
        /* Jasperlake */
        { PCI_DEVICE(0x8086, 0x38c8),
          .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
@@ -2478,9 +2481,14 @@ static const struct pci_device_id azx_ids[] = {
        /* Tigerlake */
        { PCI_DEVICE(0x8086, 0xa0c8),
          .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
+       /* Tigerlake-H */
+       { PCI_DEVICE(0x8086, 0x43c8),
+         .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
        /* Elkhart Lake */
        { PCI_DEVICE(0x8086, 0x4b55),
          .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
+       { PCI_DEVICE(0x8086, 0x4b58),
+         .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
        /* Broxton-P(Apollolake) */
        { PCI_DEVICE(0x8086, 0x5a98),
          .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_BROXTON },
index fbd7cc6..e2b21ef 100644 (file)
@@ -4145,6 +4145,11 @@ HDA_CODEC_ENTRY(0x10de0095, "GPU 95 HDMI/DP",    patch_nvhdmi),
 HDA_CODEC_ENTRY(0x10de0097, "GPU 97 HDMI/DP",  patch_nvhdmi),
 HDA_CODEC_ENTRY(0x10de0098, "GPU 98 HDMI/DP",  patch_nvhdmi),
 HDA_CODEC_ENTRY(0x10de0099, "GPU 99 HDMI/DP",  patch_nvhdmi),
+HDA_CODEC_ENTRY(0x10de009a, "GPU 9a HDMI/DP",  patch_nvhdmi),
+HDA_CODEC_ENTRY(0x10de009d, "GPU 9d HDMI/DP",  patch_nvhdmi),
+HDA_CODEC_ENTRY(0x10de009e, "GPU 9e HDMI/DP",  patch_nvhdmi),
+HDA_CODEC_ENTRY(0x10de009f, "GPU 9f HDMI/DP",  patch_nvhdmi),
+HDA_CODEC_ENTRY(0x10de00a0, "GPU a0 HDMI/DP",  patch_nvhdmi),
 HDA_CODEC_ENTRY(0x10de8001, "MCP73 HDMI",      patch_nvhdmi_2ch),
 HDA_CODEC_ENTRY(0x10de8067, "MCP67/68 HDMI",   patch_nvhdmi_2ch),
 HDA_CODEC_ENTRY(0x11069f80, "VX900 HDMI/DP",   patch_via_hdmi),
index 6d73f8b..737ef82 100644 (file)
@@ -2461,6 +2461,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1458, 0xa0b8, "Gigabyte AZ370-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS),
        SND_PCI_QUIRK(0x1458, 0xa0cd, "Gigabyte X570 Aorus Master", ALC1220_FIXUP_CLEVO_P950),
        SND_PCI_QUIRK(0x1458, 0xa0ce, "Gigabyte X570 Aorus Xtreme", ALC1220_FIXUP_CLEVO_P950),
+       SND_PCI_QUIRK(0x1462, 0x11f7, "MSI-GE63", ALC1220_FIXUP_CLEVO_P950),
        SND_PCI_QUIRK(0x1462, 0x1228, "MSI-GP63", ALC1220_FIXUP_CLEVO_P950),
        SND_PCI_QUIRK(0x1462, 0x1275, "MSI-GL63", ALC1220_FIXUP_CLEVO_P950),
        SND_PCI_QUIRK(0x1462, 0x1276, "MSI-GL73", ALC1220_FIXUP_CLEVO_P950),
@@ -7470,6 +7471,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x103c, 0x83b9, "HP Spectre x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
        SND_PCI_QUIRK(0x103c, 0x8497, "HP Envy x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
        SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
+       SND_PCI_QUIRK(0x103c, 0x869d, "HP", ALC236_FIXUP_HP_MUTE_LED),
+       SND_PCI_QUIRK(0x103c, 0x8729, "HP", ALC285_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x8736, "HP", ALC285_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x877a, "HP", ALC285_FIXUP_HP_MUTE_LED),
        SND_PCI_QUIRK(0x103c, 0x877d, "HP", ALC236_FIXUP_HP_MUTE_LED),
index d8f554f..e6386de 100644 (file)
@@ -342,11 +342,34 @@ static int acp3x_dma_close(struct snd_soc_component *component,
 {
        struct snd_soc_pcm_runtime *prtd;
        struct i2s_dev_data *adata;
+       struct i2s_stream_instance *ins;
 
        prtd = substream->private_data;
        component = snd_soc_rtdcom_lookup(prtd, DRV_NAME);
        adata = dev_get_drvdata(component->dev);
+       ins = substream->runtime->private_data;
+       if (!ins)
+               return -EINVAL;
 
+       if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+               switch (ins->i2s_instance) {
+               case I2S_BT_INSTANCE:
+                       adata->play_stream = NULL;
+                       break;
+               case I2S_SP_INSTANCE:
+               default:
+                       adata->i2ssp_play_stream = NULL;
+               }
+       } else {
+               switch (ins->i2s_instance) {
+               case I2S_BT_INSTANCE:
+                       adata->capture_stream = NULL;
+                       break;
+               case I2S_SP_INSTANCE:
+               default:
+                       adata->i2ssp_capture_stream = NULL;
+               }
+       }
 
        /* Disable ACP irq, when the current stream is being closed and
         * another stream is also not active.
@@ -354,13 +377,6 @@ static int acp3x_dma_close(struct snd_soc_component *component,
        if (!adata->play_stream && !adata->capture_stream &&
                !adata->i2ssp_play_stream && !adata->i2ssp_capture_stream)
                rv_writel(0, adata->acp3x_base + mmACP_EXTERNAL_INTR_ENB);
-       if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
-               adata->play_stream = NULL;
-               adata->i2ssp_play_stream = NULL;
-       } else {
-               adata->capture_stream = NULL;
-               adata->i2ssp_capture_stream = NULL;
-       }
        return 0;
 }
 
index de003ac..473efe9 100644 (file)
@@ -441,13 +441,13 @@ static int hdac_hda_codec_probe(struct snd_soc_component *component)
        ret = snd_hda_codec_set_name(hcodec, hcodec->preset->name);
        if (ret < 0) {
                dev_err(&hdev->dev, "name failed %s\n", hcodec->preset->name);
-               goto error;
+               goto error_pm;
        }
 
        ret = snd_hdac_regmap_init(&hcodec->core);
        if (ret < 0) {
                dev_err(&hdev->dev, "regmap init failed\n");
-               goto error;
+               goto error_pm;
        }
 
        patch = (hda_codec_patch_t)hcodec->preset->driver_data;
@@ -455,7 +455,7 @@ static int hdac_hda_codec_probe(struct snd_soc_component *component)
                ret = patch(hcodec);
                if (ret < 0) {
                        dev_err(&hdev->dev, "patch failed %d\n", ret);
-                       goto error;
+                       goto error_regmap;
                }
        } else {
                dev_dbg(&hdev->dev, "no patch file found\n");
@@ -467,7 +467,7 @@ static int hdac_hda_codec_probe(struct snd_soc_component *component)
        ret = snd_hda_codec_parse_pcms(hcodec);
        if (ret < 0) {
                dev_err(&hdev->dev, "unable to map pcms to dai %d\n", ret);
-               goto error;
+               goto error_regmap;
        }
 
        /* HDMI controls need to be created in machine drivers */
@@ -476,7 +476,7 @@ static int hdac_hda_codec_probe(struct snd_soc_component *component)
                if (ret < 0) {
                        dev_err(&hdev->dev, "unable to create controls %d\n",
                                ret);
-                       goto error;
+                       goto error_regmap;
                }
        }
 
@@ -496,7 +496,9 @@ static int hdac_hda_codec_probe(struct snd_soc_component *component)
 
        return 0;
 
-error:
+error_regmap:
+       snd_hdac_regmap_exit(hdev);
+error_pm:
        pm_runtime_put(&hdev->dev);
 error_no_pm:
        snd_hdac_ext_bus_link_put(hdev->bus, hlink);
@@ -518,6 +520,8 @@ static void hdac_hda_codec_remove(struct snd_soc_component *component)
 
        pm_runtime_disable(&hdev->dev);
        snd_hdac_ext_bus_link_put(hdev->bus, hlink);
+
+       snd_hdac_regmap_exit(hdev);
 }
 
 static const struct snd_soc_dapm_route hdac_hda_dapm_routes[] = {
index 0d63ebf..e6613b5 100644 (file)
@@ -700,8 +700,8 @@ static bool max98390_readable_register(struct device *dev, unsigned int reg)
        case MAX98390_IRQ_CTRL ... MAX98390_WDOG_CTRL:
        case MAX98390_MEAS_ADC_THERM_WARN_THRESH
                ... MAX98390_BROWNOUT_INFINITE_HOLD:
-       case MAX98390_BROWNOUT_LVL_HOLD ... THERMAL_COILTEMP_RD_BACK_BYTE0:
-       case DSMIG_DEBUZZER_THRESHOLD ... MAX98390_R24FF_REV_ID:
+       case MAX98390_BROWNOUT_LVL_HOLD ... DSMIG_DEBUZZER_THRESHOLD:
+       case DSM_VOL_ENA ... MAX98390_R24FF_REV_ID:
                return true;
        default:
                return false;
@@ -717,7 +717,7 @@ static bool max98390_volatile_reg(struct device *dev, unsigned int reg)
        case MAX98390_BROWNOUT_LOWEST_STATUS:
        case MAX98390_ENV_TRACK_BOOST_VOUT_READ:
        case DSM_STBASS_HPF_B0_BYTE0 ... DSM_DEBUZZER_ATTACK_TIME_BYTE2:
-       case THERMAL_RDC_RD_BACK_BYTE1 ... THERMAL_COILTEMP_RD_BACK_BYTE0:
+       case THERMAL_RDC_RD_BACK_BYTE1 ... DSMIG_DEBUZZER_THRESHOLD:
        case DSM_THERMAL_GAIN ... DSM_WBDRC_GAIN:
                return true;
        default:
index 67e2e94..2cccb31 100644 (file)
@@ -34,30 +34,32 @@ static const struct reg_default rt1015_reg[] = {
        { 0x0000, 0x0000 },
        { 0x0004, 0xa000 },
        { 0x0006, 0x0003 },
-       { 0x000a, 0x0802 },
-       { 0x000c, 0x0020 },
+       { 0x000a, 0x081e },
+       { 0x000c, 0x0006 },
        { 0x000e, 0x0000 },
        { 0x0010, 0x0000 },
        { 0x0012, 0x0000 },
+       { 0x0014, 0x0000 },
+       { 0x0016, 0x0000 },
+       { 0x0018, 0x0000 },
        { 0x0020, 0x8000 },
-       { 0x0022, 0x471b },
-       { 0x006a, 0x0000 },
-       { 0x006c, 0x4020 },
+       { 0x0022, 0x8043 },
        { 0x0076, 0x0000 },
        { 0x0078, 0x0000 },
-       { 0x007a, 0x0000 },
+       { 0x007a, 0x0002 },
        { 0x007c, 0x10ec },
        { 0x007d, 0x1015 },
        { 0x00f0, 0x5000 },
-       { 0x00f2, 0x0774 },
-       { 0x00f3, 0x8400 },
+       { 0x00f2, 0x004c },
+       { 0x00f3, 0xecfe },
        { 0x00f4, 0x0000 },
+       { 0x00f6, 0x0400 },
        { 0x0100, 0x0028 },
        { 0x0102, 0xff02 },
-       { 0x0104, 0x8232 },
+       { 0x0104, 0xa213 },
        { 0x0106, 0x200c },
-       { 0x010c, 0x002f },
-       { 0x010e, 0xc000 },
+       { 0x010c, 0x0000 },
+       { 0x010e, 0x0058 },
        { 0x0111, 0x0200 },
        { 0x0112, 0x0400 },
        { 0x0114, 0x0022 },
@@ -65,38 +67,46 @@ static const struct reg_default rt1015_reg[] = {
        { 0x0118, 0x0000 },
        { 0x011a, 0x0123 },
        { 0x011c, 0x4567 },
-       { 0x0300, 0xdddd },
-       { 0x0302, 0x0000 },
-       { 0x0311, 0x9330 },
-       { 0x0313, 0x0000 },
-       { 0x0314, 0x0000 },
+       { 0x0300, 0x203d },
+       { 0x0302, 0x001e },
+       { 0x0311, 0x0000 },
+       { 0x0313, 0x6014 },
+       { 0x0314, 0x00a2 },
        { 0x031a, 0x00a0 },
        { 0x031c, 0x001f },
        { 0x031d, 0xffff },
        { 0x031e, 0x0000 },
        { 0x031f, 0x0000 },
+       { 0x0320, 0x0000 },
        { 0x0321, 0x0000 },
-       { 0x0322, 0x0000 },
-       { 0x0328, 0x0000 },
-       { 0x0329, 0x0000 },
-       { 0x032a, 0x0000 },
-       { 0x032b, 0x0000 },
-       { 0x032c, 0x0000 },
-       { 0x032d, 0x0000 },
-       { 0x032e, 0x030e },
-       { 0x0330, 0x0080 },
+       { 0x0322, 0xd7df },
+       { 0x0328, 0x10b2 },
+       { 0x0329, 0x0175 },
+       { 0x032a, 0x36ad },
+       { 0x032b, 0x7e55 },
+       { 0x032c, 0x0520 },
+       { 0x032d, 0xaa00 },
+       { 0x032e, 0x570e },
+       { 0x0330, 0xe180 },
        { 0x0332, 0x0034 },
-       { 0x0334, 0x0000 },
-       { 0x0336, 0x0000 },
+       { 0x0334, 0x0001 },
+       { 0x0336, 0x0010 },
+       { 0x0338, 0x0000 },
+       { 0x04fa, 0x0030 },
+       { 0x04fc, 0x35c8 },
+       { 0x04fe, 0x0800 },
+       { 0x0500, 0x0400 },
+       { 0x0502, 0x1000 },
+       { 0x0504, 0x0000 },
        { 0x0506, 0x04ff },
-       { 0x0508, 0x0030 },
-       { 0x050a, 0x0018 },
-       { 0x0519, 0x307f },
-       { 0x051a, 0xffff },
-       { 0x051b, 0x4000 },
+       { 0x0508, 0x0010 },
+       { 0x050a, 0x001a },
+       { 0x0519, 0x1c68 },
+       { 0x051a, 0x0ccc },
+       { 0x051b, 0x0666 },
        { 0x051d, 0x0000 },
        { 0x051f, 0x0000 },
-       { 0x0536, 0x1000 },
+       { 0x0536, 0x061c },
        { 0x0538, 0x0000 },
        { 0x053a, 0x0000 },
        { 0x053c, 0x0000 },
@@ -110,19 +120,18 @@ static const struct reg_default rt1015_reg[] = {
        { 0x0544, 0x0000 },
        { 0x0568, 0x0000 },
        { 0x056a, 0x0000 },
-       { 0x1000, 0x0000 },
-       { 0x1002, 0x6505 },
+       { 0x1000, 0x0040 },
+       { 0x1002, 0x5405 },
        { 0x1006, 0x5515 },
-       { 0x1007, 0x003f },
-       { 0x1009, 0x770f },
-       { 0x100a, 0x01ff },
-       { 0x100c, 0x0000 },
+       { 0x1007, 0x05f7 },
+       { 0x1009, 0x0b0a },
+       { 0x100a, 0x00ef },
        { 0x100d, 0x0003 },
        { 0x1010, 0xa433 },
        { 0x1020, 0x0000 },
-       { 0x1200, 0x3d02 },
-       { 0x1202, 0x0813 },
-       { 0x1204, 0x0211 },
+       { 0x1200, 0x5a01 },
+       { 0x1202, 0x6524 },
+       { 0x1204, 0x1f00 },
        { 0x1206, 0x0000 },
        { 0x1208, 0x0000 },
        { 0x120a, 0x0000 },
@@ -130,16 +139,16 @@ static const struct reg_default rt1015_reg[] = {
        { 0x120e, 0x0000 },
        { 0x1210, 0x0000 },
        { 0x1212, 0x0000 },
-       { 0x1300, 0x0701 },
-       { 0x1302, 0x12f9 },
-       { 0x1304, 0x3405 },
+       { 0x1300, 0x10a1 },
+       { 0x1302, 0x12ff },
+       { 0x1304, 0x0400 },
        { 0x1305, 0x0844 },
-       { 0x1306, 0x1611 },
+       { 0x1306, 0x4611 },
        { 0x1308, 0x555e },
        { 0x130a, 0x0000 },
-       { 0x130c, 0x2400},
-       { 0x130e, 0x7700 },
-       { 0x130f, 0x0000 },
+       { 0x130c, 0x2000 },
+       { 0x130e, 0x0100 },
+       { 0x130f, 0x0001 },
        { 0x1310, 0x0000 },
        { 0x1312, 0x0000 },
        { 0x1314, 0x0000 },
@@ -209,6 +218,9 @@ static bool rt1015_volatile_register(struct device *dev, unsigned int reg)
        case RT1015_DC_CALIB_CLSD7:
        case RT1015_DC_CALIB_CLSD8:
        case RT1015_S_BST_TIMING_INTER1:
+       case RT1015_OSCK_STA:
+       case RT1015_MONO_DYNA_CTRL1:
+       case RT1015_MONO_DYNA_CTRL5:
                return true;
 
        default:
@@ -224,6 +236,12 @@ static bool rt1015_readable_register(struct device *dev, unsigned int reg)
        case RT1015_CLK3:
        case RT1015_PLL1:
        case RT1015_PLL2:
+       case RT1015_DUM_RW1:
+       case RT1015_DUM_RW2:
+       case RT1015_DUM_RW3:
+       case RT1015_DUM_RW4:
+       case RT1015_DUM_RW5:
+       case RT1015_DUM_RW6:
        case RT1015_CLK_DET:
        case RT1015_SIL_DET:
        case RT1015_CUSTOMER_ID:
@@ -235,6 +253,7 @@ static bool rt1015_readable_register(struct device *dev, unsigned int reg)
        case RT1015_PAD_DRV2:
        case RT1015_GAT_BOOST:
        case RT1015_PRO_ALT:
+       case RT1015_OSCK_STA:
        case RT1015_MAN_I2C:
        case RT1015_DAC1:
        case RT1015_DAC2:
@@ -272,6 +291,13 @@ static bool rt1015_readable_register(struct device *dev, unsigned int reg)
        case RT1015_SMART_BST_CTRL2:
        case RT1015_ANA_CTRL1:
        case RT1015_ANA_CTRL2:
+       case RT1015_PWR_STATE_CTRL:
+       case RT1015_MONO_DYNA_CTRL:
+       case RT1015_MONO_DYNA_CTRL1:
+       case RT1015_MONO_DYNA_CTRL2:
+       case RT1015_MONO_DYNA_CTRL3:
+       case RT1015_MONO_DYNA_CTRL4:
+       case RT1015_MONO_DYNA_CTRL5:
        case RT1015_SPK_VOL:
        case RT1015_SHORT_DETTOP1:
        case RT1015_SHORT_DETTOP2:
index 6fbe802..8169962 100644 (file)
 #define RT1015_CLK3                            0x0006
 #define RT1015_PLL1                            0x000a
 #define RT1015_PLL2                            0x000c
+#define RT1015_DUM_RW1                         0x000e
+#define RT1015_DUM_RW2                         0x0010
+#define RT1015_DUM_RW3                         0x0012
+#define RT1015_DUM_RW4                         0x0014
+#define RT1015_DUM_RW5                         0x0016
+#define RT1015_DUM_RW6                         0x0018
 #define RT1015_CLK_DET                         0x0020
 #define RT1015_SIL_DET                         0x0022
 #define RT1015_CUSTOMER_ID                     0x0076
@@ -32,6 +38,7 @@
 #define RT1015_PAD_DRV2                                0x00f2
 #define RT1015_GAT_BOOST                       0x00f3
 #define RT1015_PRO_ALT                         0x00f4
+#define RT1015_OSCK_STA                                0x00f6
 #define RT1015_MAN_I2C                         0x0100
 #define RT1015_DAC1                            0x0102
 #define RT1015_DAC2                            0x0104
 #define RT1015_ANA_CTRL1                       0x0334
 #define RT1015_ANA_CTRL2                       0x0336
 #define RT1015_PWR_STATE_CTRL                  0x0338
-#define RT1015_SPK_VOL                         0x0506
+#define RT1015_MONO_DYNA_CTRL                  0x04fa
+#define RT1015_MONO_DYNA_CTRL1                 0x04fc
+#define RT1015_MONO_DYNA_CTRL2                 0x04fe
+#define RT1015_MONO_DYNA_CTRL3                 0x0500
+#define RT1015_MONO_DYNA_CTRL4                 0x0502
+#define RT1015_MONO_DYNA_CTRL5                 0x0504
+#define RT1015_SPK_VOL                                 0x0506
 #define RT1015_SHORT_DETTOP1                   0x0508
 #define RT1015_SHORT_DETTOP2                   0x050a
 #define RT1015_SPK_DC_DETECT1                  0x0519
index d324512..3e9d2c6 100644 (file)
@@ -2829,12 +2829,13 @@ static int rt5682_probe(struct snd_soc_component *component)
                                return ret;
                        }
                        rt5682->mclk = NULL;
-               } else {
-                       /* Register CCF DAI clock control */
-                       ret = rt5682_register_dai_clks(component);
-                       if (ret)
-                               return ret;
                }
+
+               /* Register CCF DAI clock control */
+               ret = rt5682_register_dai_clks(component);
+               if (ret)
+                       return ret;
+
                /* Initial setup for CCF */
                rt5682->lrck[RT5682_AIF1] = CLK_48;
 #endif
index 77665b1..7e1c13c 100644 (file)
@@ -32,6 +32,7 @@ enum asrc_pair_index {
  * @dma_chan: inputer and output DMA channels
  * @dma_data: private dma data
  * @pos: hardware pointer position
+ * @req_dma_chan: flag to release dev_to_dev chan
  * @private: pair private area
  */
 struct fsl_asrc_pair {
@@ -45,6 +46,7 @@ struct fsl_asrc_pair {
        struct dma_chan *dma_chan[2];
        struct imx_dma_data dma_data;
        unsigned int pos;
+       bool req_dma_chan;
 
        void *private;
 };
index d6a3fc5..5f01a58 100644 (file)
@@ -135,6 +135,8 @@ static int fsl_asrc_dma_hw_params(struct snd_soc_component *component,
        struct snd_dmaengine_dai_dma_data *dma_params_be = NULL;
        struct snd_pcm_runtime *runtime = substream->runtime;
        struct fsl_asrc_pair *pair = runtime->private_data;
+       struct dma_chan *tmp_chan = NULL, *be_chan = NULL;
+       struct snd_soc_component *component_be = NULL;
        struct fsl_asrc *asrc = pair->asrc;
        struct dma_slave_config config_fe, config_be;
        enum asrc_pair_index index = pair->index;
@@ -142,7 +144,6 @@ static int fsl_asrc_dma_hw_params(struct snd_soc_component *component,
        int stream = substream->stream;
        struct imx_dma_data *tmp_data;
        struct snd_soc_dpcm *dpcm;
-       struct dma_chan *tmp_chan;
        struct device *dev_be;
        u8 dir = tx ? OUT : IN;
        dma_cap_mask_t mask;
@@ -198,17 +199,29 @@ static int fsl_asrc_dma_hw_params(struct snd_soc_component *component,
        dma_cap_set(DMA_CYCLIC, mask);
 
        /*
+        * The Back-End device might have already requested a DMA channel,
+        * so try to reuse it first, and then request a new one upon NULL.
+        */
+       component_be = snd_soc_lookup_component_nolocked(dev_be, SND_DMAENGINE_PCM_DRV_NAME);
+       if (component_be) {
+               be_chan = soc_component_to_pcm(component_be)->chan[substream->stream];
+               tmp_chan = be_chan;
+       }
+       if (!tmp_chan)
+               tmp_chan = dma_request_slave_channel(dev_be, tx ? "tx" : "rx");
+
+       /*
         * An EDMA DEV_TO_DEV channel is fixed and bound with DMA event of each
         * peripheral, unlike SDMA channel that is allocated dynamically. So no
-        * need to configure dma_request and dma_request2, but get dma_chan via
-        * dma_request_slave_channel directly with dma name of Front-End device
+        * need to configure dma_request and dma_request2, but get dma_chan of
+        * Back-End device directly via dma_request_slave_channel.
         */
        if (!asrc->use_edma) {
                /* Get DMA request of Back-End */
-               tmp_chan = dma_request_slave_channel(dev_be, tx ? "tx" : "rx");
                tmp_data = tmp_chan->private;
                pair->dma_data.dma_request = tmp_data->dma_request;
-               dma_release_channel(tmp_chan);
+               if (!be_chan)
+                       dma_release_channel(tmp_chan);
 
                /* Get DMA request of Front-End */
                tmp_chan = asrc->get_dma_channel(pair, dir);
@@ -220,9 +233,11 @@ static int fsl_asrc_dma_hw_params(struct snd_soc_component *component,
 
                pair->dma_chan[dir] =
                        dma_request_channel(mask, filter, &pair->dma_data);
+               pair->req_dma_chan = true;
        } else {
-               pair->dma_chan[dir] =
-                       asrc->get_dma_channel(pair, dir);
+               pair->dma_chan[dir] = tmp_chan;
+               /* Do not flag to release if we are reusing the Back-End one */
+               pair->req_dma_chan = !be_chan;
        }
 
        if (!pair->dma_chan[dir]) {
@@ -261,7 +276,8 @@ static int fsl_asrc_dma_hw_params(struct snd_soc_component *component,
        ret = dmaengine_slave_config(pair->dma_chan[dir], &config_be);
        if (ret) {
                dev_err(dev, "failed to config DMA channel for Back-End\n");
-               dma_release_channel(pair->dma_chan[dir]);
+               if (pair->req_dma_chan)
+                       dma_release_channel(pair->dma_chan[dir]);
                return ret;
        }
 
@@ -273,19 +289,22 @@ static int fsl_asrc_dma_hw_params(struct snd_soc_component *component,
 static int fsl_asrc_dma_hw_free(struct snd_soc_component *component,
                                struct snd_pcm_substream *substream)
 {
+       bool tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
        struct snd_pcm_runtime *runtime = substream->runtime;
        struct fsl_asrc_pair *pair = runtime->private_data;
+       u8 dir = tx ? OUT : IN;
 
        snd_pcm_set_runtime_buffer(substream, NULL);
 
-       if (pair->dma_chan[IN])
-               dma_release_channel(pair->dma_chan[IN]);
+       if (pair->dma_chan[!dir])
+               dma_release_channel(pair->dma_chan[!dir]);
 
-       if (pair->dma_chan[OUT])
-               dma_release_channel(pair->dma_chan[OUT]);
+       /* release dev_to_dev chan if we aren't reusing the Back-End one */
+       if (pair->dma_chan[dir] && pair->req_dma_chan)
+               dma_release_channel(pair->dma_chan[dir]);
 
-       pair->dma_chan[IN] = NULL;
-       pair->dma_chan[OUT] = NULL;
+       pair->dma_chan[!dir] = NULL;
+       pair->dma_chan[dir] = NULL;
 
        return 0;
 }
index bad89b0..1a2fa7f 100644 (file)
@@ -678,8 +678,9 @@ static int fsl_ssi_set_bclk(struct snd_pcm_substream *substream,
        struct regmap *regs = ssi->regs;
        u32 pm = 999, div2, psr, stccr, mask, afreq, factor, i;
        unsigned long clkrate, baudrate, tmprate;
-       unsigned int slots = params_channels(hw_params);
-       unsigned int slot_width = 32;
+       unsigned int channels = params_channels(hw_params);
+       unsigned int slot_width = params_width(hw_params);
+       unsigned int slots = 2;
        u64 sub, savesub = 100000;
        unsigned int freq;
        bool baudclk_is_used;
@@ -688,10 +689,14 @@ static int fsl_ssi_set_bclk(struct snd_pcm_substream *substream,
        /* Override slots and slot_width if being specifically set... */
        if (ssi->slots)
                slots = ssi->slots;
-       /* ...but keep 32 bits if slots is 2 -- I2S Master mode */
-       if (ssi->slot_width && slots != 2)
+       if (ssi->slot_width)
                slot_width = ssi->slot_width;
 
+       /* ...but force 32 bits for stereo audio using I2S Master Mode */
+       if (channels == 2 &&
+           (ssi->i2s_net & SSI_SCR_I2S_MODE_MASK) == SSI_SCR_I2S_MODE_MASTER)
+               slot_width = 32;
+
        /* Generate bit clock based on the slot number and slot width */
        freq = slots * slot_width * params_rate(hw_params);
 
index a2a5798..5dc489a 100644 (file)
@@ -492,7 +492,7 @@ config SND_SOC_INTEL_SOF_PCM512x_MACH
 
 endif ## SND_SOC_SOF_HDA_LINK || SND_SOC_SOF_BAYTRAIL
 
-if (SND_SOC_SOF_COMETLAKE_LP && SND_SOC_SOF_HDA_LINK)
+if (SND_SOC_SOF_COMETLAKE && SND_SOC_SOF_HDA_LINK)
 
 config SND_SOC_INTEL_CML_LP_DA7219_MAX98357A_MACH
        tristate "CML_LP with DA7219 and MAX98357A in I2S Mode"
@@ -520,7 +520,7 @@ config SND_SOC_INTEL_SOF_CML_RT1011_RT5682_MACH
          Say Y if you have such a device.
          If unsure select "N".
 
-endif ## SND_SOC_SOF_COMETLAKE_LP && SND_SOC_SOF_HDA_LINK
+endif ## SND_SOC_SOF_COMETLAKE && SND_SOC_SOF_HDA_LINK
 
 if SND_SOC_SOF_JASPERLAKE
 
index 6c20bdd..8ada4ec 100644 (file)
@@ -4,6 +4,7 @@
 
 #include <linux/module.h>
 #include "common.h"
+#include "qdsp6/q6afe.h"
 
 int qcom_snd_parse_of(struct snd_soc_card *card)
 {
@@ -101,6 +102,15 @@ int qcom_snd_parse_of(struct snd_soc_card *card)
                        }
                        link->no_pcm = 1;
                        link->ignore_pmdown_time = 1;
+
+                       if (q6afe_is_rx_port(link->id)) {
+                               link->dpcm_playback = 1;
+                               link->dpcm_capture = 0;
+                       } else {
+                               link->dpcm_playback = 0;
+                               link->dpcm_capture = 1;
+                       }
+
                } else {
                        dlc = devm_kzalloc(dev, sizeof(*dlc), GFP_KERNEL);
                        if (!dlc)
@@ -113,12 +123,12 @@ int qcom_snd_parse_of(struct snd_soc_card *card)
                        link->codecs->dai_name = "snd-soc-dummy-dai";
                        link->codecs->name = "snd-soc-dummy";
                        link->dynamic = 1;
+                       link->dpcm_playback = 1;
+                       link->dpcm_capture = 1;
                }
 
                link->ignore_suspend = 1;
                link->nonatomic = 1;
-               link->dpcm_playback = 1;
-               link->dpcm_capture = 1;
                link->stream_name = link->name;
                link++;
 
index e0945f7..0ce4eb6 100644 (file)
@@ -800,6 +800,14 @@ int q6afe_get_port_id(int index)
 }
 EXPORT_SYMBOL_GPL(q6afe_get_port_id);
 
+int q6afe_is_rx_port(int index)
+{
+       if (index < 0 || index >= AFE_PORT_MAX)
+               return -EINVAL;
+
+       return port_maps[index].is_rx;
+}
+EXPORT_SYMBOL_GPL(q6afe_is_rx_port);
 static int afe_apr_send_pkt(struct q6afe *afe, struct apr_pkt *pkt,
                            struct q6afe_port *port)
 {
index c7ed542..1a0f80a 100644 (file)
@@ -198,6 +198,7 @@ int q6afe_port_start(struct q6afe_port *port);
 int q6afe_port_stop(struct q6afe_port *port);
 void q6afe_port_put(struct q6afe_port *port);
 int q6afe_get_port_id(int index);
+int q6afe_is_rx_port(int index);
 void q6afe_hdmi_port_prepare(struct q6afe_port *port,
                            struct q6afe_hdmi_cfg *cfg);
 void q6afe_slim_port_prepare(struct q6afe_port *port,
index 0e0e8f7..ae4b2ca 100644 (file)
@@ -25,6 +25,7 @@
 #define ASM_STREAM_CMD_FLUSH                   0x00010BCE
 #define ASM_SESSION_CMD_PAUSE                  0x00010BD3
 #define ASM_DATA_CMD_EOS                       0x00010BDB
+#define ASM_DATA_EVENT_RENDERED_EOS            0x00010C1C
 #define ASM_NULL_POPP_TOPOLOGY                 0x00010C68
 #define ASM_STREAM_CMD_FLUSH_READBUFS          0x00010C09
 #define ASM_STREAM_CMD_SET_ENCDEC_PARAM                0x00010C10
@@ -622,9 +623,6 @@ static int32_t q6asm_stream_callback(struct apr_device *adev,
                case ASM_SESSION_CMD_SUSPEND:
                        client_event = ASM_CLIENT_EVENT_CMD_SUSPEND_DONE;
                        break;
-               case ASM_DATA_CMD_EOS:
-                       client_event = ASM_CLIENT_EVENT_CMD_EOS_DONE;
-                       break;
                case ASM_STREAM_CMD_FLUSH:
                        client_event = ASM_CLIENT_EVENT_CMD_FLUSH_DONE;
                        break;
@@ -728,6 +726,9 @@ static int32_t q6asm_stream_callback(struct apr_device *adev,
                }
 
                break;
+       case ASM_DATA_EVENT_RENDERED_EOS:
+               client_event = ASM_CLIENT_EVENT_CMD_EOS_DONE;
+               break;
        }
 
        if (ac->cb)
index 7cd42fc..1707414 100644 (file)
@@ -590,8 +590,10 @@ static int rockchip_pdm_resume(struct device *dev)
        int ret;
 
        ret = pm_runtime_get_sync(dev);
-       if (ret < 0)
+       if (ret < 0) {
+               pm_runtime_put(dev);
                return ret;
+       }
 
        ret = regcache_sync(pdm->regmap);
 
index 7b38720..0f30f5a 100644 (file)
@@ -310,7 +310,7 @@ struct snd_soc_component *snd_soc_rtdcom_lookup(struct snd_soc_pcm_runtime *rtd,
 }
 EXPORT_SYMBOL_GPL(snd_soc_rtdcom_lookup);
 
-static struct snd_soc_component
+struct snd_soc_component
 *snd_soc_lookup_component_nolocked(struct device *dev, const char *driver_name)
 {
        struct snd_soc_component *component;
@@ -329,6 +329,7 @@ static struct snd_soc_component
 
        return found_component;
 }
+EXPORT_SYMBOL_GPL(snd_soc_lookup_component_nolocked);
 
 struct snd_soc_component *snd_soc_lookup_component(struct device *dev,
                                                   const char *driver_name)
index a9ea172..11e5d79 100644 (file)
@@ -9,6 +9,43 @@
 #include <sound/soc.h>
 #include <sound/dmaengine_pcm.h>
 
+static void devm_dai_release(struct device *dev, void *res)
+{
+       snd_soc_unregister_dai(*(struct snd_soc_dai **)res);
+}
+
+/**
+ * devm_snd_soc_register_dai - resource-managed dai registration
+ * @dev: Device used to manage component
+ * @component: The component the DAIs are registered for
+ * @dai_drv: DAI driver to use for the DAI
+ * @legacy_dai_naming: if %true, use legacy single-name format;
+ *     if %false, use multiple-name format;
+ */
+struct snd_soc_dai *devm_snd_soc_register_dai(struct device *dev,
+                                             struct snd_soc_component *component,
+                                             struct snd_soc_dai_driver *dai_drv,
+                                             bool legacy_dai_naming)
+{
+       struct snd_soc_dai **ptr;
+       struct snd_soc_dai *dai;
+
+       ptr = devres_alloc(devm_dai_release, sizeof(*ptr), GFP_KERNEL);
+       if (!ptr)
+               return NULL;
+
+       dai = snd_soc_register_dai(component, dai_drv, legacy_dai_naming);
+       if (dai) {
+               *ptr = dai;
+               devres_add(dev, ptr);
+       } else {
+               devres_free(ptr);
+       }
+
+       return dai;
+}
+EXPORT_SYMBOL_GPL(devm_snd_soc_register_dai);
+
 static void devm_component_release(struct device *dev, void *res)
 {
        snd_soc_unregister_component(*(struct device **)res);
index f728309..80a4e71 100644 (file)
  */
 #define SND_DMAENGINE_PCM_FLAG_NO_RESIDUE BIT(31)
 
-struct dmaengine_pcm {
-       struct dma_chan *chan[SNDRV_PCM_STREAM_LAST + 1];
-       const struct snd_dmaengine_pcm_config *config;
-       struct snd_soc_component component;
-       unsigned int flags;
-};
-
-static struct dmaengine_pcm *soc_component_to_pcm(struct snd_soc_component *p)
-{
-       return container_of(p, struct dmaengine_pcm, component);
-}
-
 static struct device *dmaengine_dma_dev(struct dmaengine_pcm *pcm,
        struct snd_pcm_substream *substream)
 {
index 2c114b4..c517064 100644 (file)
@@ -2630,15 +2630,15 @@ static int soc_dpcm_fe_runtime_update(struct snd_soc_pcm_runtime *fe, int new)
        int count, paths;
        int ret;
 
+       if (!fe->dai_link->dynamic)
+               return 0;
+
        if (fe->num_cpus > 1) {
                dev_err(fe->dev,
                        "%s doesn't support Multi CPU yet\n", __func__);
                return -EINVAL;
        }
 
-       if (!fe->dai_link->dynamic)
-               return 0;
-
        /* only check active links */
        if (!snd_soc_dai_active(asoc_rtd_to_cpu(fe, 0)))
                return 0;
index 9e89633..43e5745 100644 (file)
@@ -1851,7 +1851,7 @@ static int soc_tplg_dai_create(struct soc_tplg *tplg,
        list_add(&dai_drv->dobj.list, &tplg->comp->dobj_list);
 
        /* register the DAI to the component */
-       dai = snd_soc_register_dai(tplg->comp, dai_drv, false);
+       dai = devm_snd_soc_register_dai(tplg->comp->dev, tplg->comp, dai_drv, false);
        if (!dai)
                return -ENOMEM;
 
@@ -1859,7 +1859,6 @@ static int soc_tplg_dai_create(struct soc_tplg *tplg,
        ret = snd_soc_dapm_new_dai_widgets(dapm, dai);
        if (ret != 0) {
                dev_err(dai->dev, "Failed to create DAI widgets %d\n", ret);
-               snd_soc_unregister_dai(dai);
                return ret;
        }
 
index c9a2bee..3aaf25e 100644 (file)
@@ -25,8 +25,7 @@ config SND_SOC_SOF_INTEL_PCI
        select SND_SOC_SOF_CANNONLAKE  if SND_SOC_SOF_CANNONLAKE_SUPPORT
        select SND_SOC_SOF_COFFEELAKE  if SND_SOC_SOF_COFFEELAKE_SUPPORT
        select SND_SOC_SOF_ICELAKE     if SND_SOC_SOF_ICELAKE_SUPPORT
-       select SND_SOC_SOF_COMETLAKE_LP if SND_SOC_SOF_COMETLAKE_LP_SUPPORT
-       select SND_SOC_SOF_COMETLAKE_H if SND_SOC_SOF_COMETLAKE_H_SUPPORT
+       select SND_SOC_SOF_COMETLAKE   if SND_SOC_SOF_COMETLAKE_SUPPORT
        select SND_SOC_SOF_TIGERLAKE   if SND_SOC_SOF_TIGERLAKE_SUPPORT
        select SND_SOC_SOF_ELKHARTLAKE if SND_SOC_SOF_ELKHARTLAKE_SUPPORT
        select SND_SOC_SOF_JASPERLAKE  if SND_SOC_SOF_JASPERLAKE_SUPPORT
@@ -201,34 +200,22 @@ config SND_SOC_SOF_ICELAKE
          This option is not user-selectable but automagically handled by
          'select' statements at a higher level
 
-config SND_SOC_SOF_COMETLAKE_LP
+config SND_SOC_SOF_COMETLAKE
        tristate
        select SND_SOC_SOF_HDA_COMMON
        help
          This option is not user-selectable but automagically handled by
          'select' statements at a higher level
 
-config SND_SOC_SOF_COMETLAKE_LP_SUPPORT
-       bool "SOF support for CometLake-LP"
-       help
-         This adds support for Sound Open Firmware for Intel(R) platforms
-         using the Cometlake-LP processors.
-         Say Y if you have such a device.
-         If unsure select "N".
+config SND_SOC_SOF_COMETLAKE_SUPPORT
+       bool
 
-config SND_SOC_SOF_COMETLAKE_H
-       tristate
-       select SND_SOC_SOF_HDA_COMMON
-       help
-         This option is not user-selectable but automagically handled by
-         'select' statements at a higher level
-
-config SND_SOC_SOF_COMETLAKE_H_SUPPORT
-       bool "SOF support for CometLake-H"
+config SND_SOC_SOF_COMETLAKE_LP_SUPPORT
+       bool "SOF support for CometLake"
+       select SND_SOC_SOF_COMETLAKE_SUPPORT
        help
          This adds support for Sound Open Firmware for Intel(R) platforms
-         using the Cometlake-H processors.
-         Say Y if you have such a device.
+         using the Cometlake processors.
          If unsure select "N".
 
 config SND_SOC_SOF_TIGERLAKE_SUPPORT
index 7f65dcc..1bda14c 100644 (file)
@@ -653,11 +653,16 @@ irqreturn_t hda_dsp_stream_threaded_handler(int irq, void *context)
                if (status & AZX_INT_CTRL_EN) {
                        rirb_status = snd_hdac_chip_readb(bus, RIRBSTS);
                        if (rirb_status & RIRB_INT_MASK) {
+                               /*
+                                * Clearing the interrupt status here ensures
+                                * that no interrupt gets masked after the RIRB
+                                * wp is read in snd_hdac_bus_update_rirb.
+                                */
+                               snd_hdac_chip_writeb(bus, RIRBSTS,
+                                                    RIRB_INT_MASK);
                                active = true;
                                if (rirb_status & RIRB_INT_RESPONSE)
                                        snd_hdac_bus_update_rirb(bus);
-                               snd_hdac_chip_writeb(bus, RIRBSTS,
-                                                    RIRB_INT_MASK);
                        }
                }
 #endif
index b13697d..aa3532b 100644 (file)
@@ -151,9 +151,7 @@ static const struct sof_dev_desc cfl_desc = {
 };
 #endif
 
-#if IS_ENABLED(CONFIG_SND_SOC_SOF_COMETLAKE_LP) || \
-       IS_ENABLED(CONFIG_SND_SOC_SOF_COMETLAKE_H)
-
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_COMETLAKE)
 static const struct sof_dev_desc cml_desc = {
        .machines               = snd_soc_acpi_intel_cml_machines,
        .alt_machines           = snd_soc_acpi_intel_cml_sdw_machines,
@@ -411,8 +409,11 @@ static const struct pci_device_id sof_pci_ids[] = {
                .driver_data = (unsigned long)&cfl_desc},
 #endif
 #if IS_ENABLED(CONFIG_SND_SOC_SOF_ICELAKE)
-       { PCI_DEVICE(0x8086, 0x34C8),
+       { PCI_DEVICE(0x8086, 0x34C8), /* ICL-LP */
+               .driver_data = (unsigned long)&icl_desc},
+       { PCI_DEVICE(0x8086, 0x3dc8), /* ICL-H */
                .driver_data = (unsigned long)&icl_desc},
+
 #endif
 #if IS_ENABLED(CONFIG_SND_SOC_SOF_JASPERLAKE)
        { PCI_DEVICE(0x8086, 0x38c8),
@@ -420,17 +421,20 @@ static const struct pci_device_id sof_pci_ids[] = {
        { PCI_DEVICE(0x8086, 0x4dc8),
                .driver_data = (unsigned long)&jsl_desc},
 #endif
-#if IS_ENABLED(CONFIG_SND_SOC_SOF_COMETLAKE_LP)
-       { PCI_DEVICE(0x8086, 0x02c8),
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_COMETLAKE)
+       { PCI_DEVICE(0x8086, 0x02c8), /* CML-LP */
                .driver_data = (unsigned long)&cml_desc},
-#endif
-#if IS_ENABLED(CONFIG_SND_SOC_SOF_COMETLAKE_H)
-       { PCI_DEVICE(0x8086, 0x06c8),
+       { PCI_DEVICE(0x8086, 0x06c8), /* CML-H */
+               .driver_data = (unsigned long)&cml_desc},
+       { PCI_DEVICE(0x8086, 0xa3f0), /* CML-S */
                .driver_data = (unsigned long)&cml_desc},
 #endif
 #if IS_ENABLED(CONFIG_SND_SOC_SOF_TIGERLAKE)
-       { PCI_DEVICE(0x8086, 0xa0c8),
+       { PCI_DEVICE(0x8086, 0xa0c8), /* TGL-LP */
+               .driver_data = (unsigned long)&tgl_desc},
+       { PCI_DEVICE(0x8086, 0x43c8), /* TGL-H */
                .driver_data = (unsigned long)&tgl_desc},
+
 #endif
 #if IS_ENABLED(CONFIG_SND_SOC_SOF_ELKHARTLAKE)
        { PCI_DEVICE(0x8086, 0x4b55),
index 5ffb457..1b28d01 100644 (file)
@@ -394,8 +394,9 @@ skip_rate:
        return nr_rates;
 }
 
-/* Line6 Helix series don't support the UAC2_CS_RANGE usb function
- * call. Return a static table of known clock rates.
+/* Line6 Helix series and the Rode Rodecaster Pro don't support the
+ * UAC2_CS_RANGE usb function call. Return a static table of known
+ * clock rates.
  */
 static int line6_parse_audio_format_rates_quirk(struct snd_usb_audio *chip,
                                                struct audioformat *fp)
@@ -408,6 +409,7 @@ static int line6_parse_audio_format_rates_quirk(struct snd_usb_audio *chip,
        case USB_ID(0x0e41, 0x4248): /* Line6 Helix >= fw 2.82 */
        case USB_ID(0x0e41, 0x4249): /* Line6 Helix Rack >= fw 2.82 */
        case USB_ID(0x0e41, 0x424a): /* Line6 Helix LT >= fw 2.82 */
+       case USB_ID(0x19f7, 0x0011): /* Rode Rodecaster Pro */
                return set_fixed_rate(fp, 48000, SNDRV_PCM_RATE_48000);
        }
 
index 15769f2..eab0fd4 100644 (file)
@@ -581,8 +581,9 @@ static int check_matrix_bitmap(unsigned char *bmap,
  * if failed, give up and free the control instance.
  */
 
-int snd_usb_mixer_add_control(struct usb_mixer_elem_list *list,
-                             struct snd_kcontrol *kctl)
+int snd_usb_mixer_add_list(struct usb_mixer_elem_list *list,
+                          struct snd_kcontrol *kctl,
+                          bool is_std_info)
 {
        struct usb_mixer_interface *mixer = list->mixer;
        int err;
@@ -596,6 +597,7 @@ int snd_usb_mixer_add_control(struct usb_mixer_elem_list *list,
                return err;
        }
        list->kctl = kctl;
+       list->is_std_info = is_std_info;
        list->next_id_elem = mixer->id_elems[list->id];
        mixer->id_elems[list->id] = list;
        return 0;
@@ -3234,8 +3236,11 @@ void snd_usb_mixer_notify_id(struct usb_mixer_interface *mixer, int unitid)
        unitid = delegate_notify(mixer, unitid, NULL, NULL);
 
        for_each_mixer_elem(list, mixer, unitid) {
-               struct usb_mixer_elem_info *info =
-                       mixer_elem_list_to_info(list);
+               struct usb_mixer_elem_info *info;
+
+               if (!list->is_std_info)
+                       continue;
+               info = mixer_elem_list_to_info(list);
                /* invalidate cache, so the value is read from the device */
                info->cached = 0;
                snd_ctl_notify(mixer->chip->card, SNDRV_CTL_EVENT_MASK_VALUE,
@@ -3315,6 +3320,8 @@ static void snd_usb_mixer_interrupt_v2(struct usb_mixer_interface *mixer,
 
                if (!list->kctl)
                        continue;
+               if (!list->is_std_info)
+                       continue;
 
                info = mixer_elem_list_to_info(list);
                if (count > 1 && info->control != control)
index 41ec9dc..c29e27a 100644 (file)
@@ -66,6 +66,7 @@ struct usb_mixer_elem_list {
        struct usb_mixer_elem_list *next_id_elem; /* list of controls with same id */
        struct snd_kcontrol *kctl;
        unsigned int id;
+       bool is_std_info;
        usb_mixer_elem_dump_func_t dump;
        usb_mixer_elem_resume_func_t resume;
 };
@@ -103,8 +104,12 @@ void snd_usb_mixer_notify_id(struct usb_mixer_interface *mixer, int unitid);
 int snd_usb_mixer_set_ctl_value(struct usb_mixer_elem_info *cval,
                                int request, int validx, int value_set);
 
-int snd_usb_mixer_add_control(struct usb_mixer_elem_list *list,
-                             struct snd_kcontrol *kctl);
+int snd_usb_mixer_add_list(struct usb_mixer_elem_list *list,
+                          struct snd_kcontrol *kctl,
+                          bool is_std_info);
+
+#define snd_usb_mixer_add_control(list, kctl) \
+       snd_usb_mixer_add_list(list, kctl, true)
 
 void snd_usb_mixer_elem_init_std(struct usb_mixer_elem_list *list,
                                 struct usb_mixer_interface *mixer,
index b6bcf2f..cec1cfd 100644 (file)
@@ -158,7 +158,8 @@ static int add_single_ctl_with_resume(struct usb_mixer_interface *mixer,
                return -ENOMEM;
        }
        kctl->private_free = snd_usb_mixer_elem_free;
-       return snd_usb_mixer_add_control(list, kctl);
+       /* don't use snd_usb_mixer_add_control() here, this is a special list element */
+       return snd_usb_mixer_add_list(list, kctl, false);
 }
 
 /*
index 8a05dcb..a777d36 100644 (file)
@@ -367,6 +367,7 @@ static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs,
                ifnum = 0;
                goto add_sync_ep_from_ifnum;
        case USB_ID(0x07fd, 0x0008): /* MOTU M Series */
+       case USB_ID(0x31e9, 0x0002): /* Solid State Logic SSL2+ */
                ep = 0x81;
                ifnum = 2;
                goto add_sync_ep_from_ifnum;
@@ -1786,6 +1787,7 @@ static int snd_usb_substream_capture_trigger(struct snd_pcm_substream *substream
                return 0;
        case SNDRV_PCM_TRIGGER_STOP:
                stop_endpoints(subs);
+               subs->data_endpoint->retire_data_urb = NULL;
                subs->running = 0;
                return 0;
        case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
index bca0179..fca7273 100644 (file)
@@ -1532,6 +1532,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
 static bool is_itf_usb_dsd_dac(unsigned int id)
 {
        switch (id) {
+       case USB_ID(0x154e, 0x1002): /* Denon DCD-1500RE */
        case USB_ID(0x154e, 0x1003): /* Denon DA-300USB */
        case USB_ID(0x154e, 0x3005): /* Marantz HD-DAC1 */
        case USB_ID(0x154e, 0x3006): /* Marantz SA-14S1 */
@@ -1673,6 +1674,14 @@ void snd_usb_ctl_msg_quirk(struct usb_device *dev, unsigned int pipe,
             chip->usb_id == USB_ID(0x0951, 0x16ad)) &&
            (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS)
                usleep_range(1000, 2000);
+
+       /*
+        * Samsung USBC Headset (AKG) need a tiny delay after each
+        * class compliant request. (Model number: AAM625R or AAM627R)
+        */
+       if (chip->usb_id == USB_ID(0x04e8, 0xa051) &&
+           (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS)
+               usleep_range(5000, 6000);
 }
 
 /*
@@ -1856,6 +1865,7 @@ struct registration_quirk {
 static const struct registration_quirk registration_quirks[] = {
        REG_QUIRK_ENTRY(0x0951, 0x16d8, 2),     /* Kingston HyperX AMP */
        REG_QUIRK_ENTRY(0x0951, 0x16ed, 2),     /* Kingston HyperX Cloud Alpha S */
+       REG_QUIRK_ENTRY(0x0951, 0x16ea, 2),     /* Kingston HyperX Cloud Flight S */
        { 0 }                                   /* terminator */
 };
 
index 3110164..70c78fa 100644 (file)
@@ -49,7 +49,7 @@ MAP COMMANDS
 |              | **lru_percpu_hash** | **lpm_trie** | **array_of_maps** | **hash_of_maps**
 |              | **devmap** | **devmap_hash** | **sockmap** | **cpumap** | **xskmap** | **sockhash**
 |              | **cgroup_storage** | **reuseport_sockarray** | **percpu_cgroup_storage**
-|              | **queue** | **stack** | **sk_storage** | **struct_ops** }
+|              | **queue** | **stack** | **sk_storage** | **struct_ops** | **ringbuf** }
 
 DESCRIPTION
 ===========
index c5fac80..1d3b606 100644 (file)
@@ -49,6 +49,7 @@ const char * const map_type_name[] = {
        [BPF_MAP_TYPE_STACK]                    = "stack",
        [BPF_MAP_TYPE_SK_STORAGE]               = "sk_storage",
        [BPF_MAP_TYPE_STRUCT_OPS]               = "struct_ops",
+       [BPF_MAP_TYPE_RINGBUF]                  = "ringbuf",
 };
 
 const size_t map_type_name_size = ARRAY_SIZE(map_type_name);
@@ -1590,7 +1591,7 @@ static int do_help(int argc, char **argv)
                "                 lru_percpu_hash | lpm_trie | array_of_maps | hash_of_maps |\n"
                "                 devmap | devmap_hash | sockmap | cpumap | xskmap | sockhash |\n"
                "                 cgroup_storage | reuseport_sockarray | percpu_cgroup_storage |\n"
-               "                 queue | stack | sk_storage | struct_ops }\n"
+               "                 queue | stack | sk_storage | struct_ops | ringbuf }\n"
                "       " HELP_SPEC_OPTIONS "\n"
                "",
                bin_name, argv[-2]);
index 1968481..974a713 100644 (file)
@@ -3168,7 +3168,7 @@ union bpf_attr {
  *     Return
  *             The id is returned or 0 in case the id could not be retrieved.
  *
- * void *bpf_ringbuf_output(void *ringbuf, void *data, u64 size, u64 flags)
+ * int bpf_ringbuf_output(void *ringbuf, void *data, u64 size, u64 flags)
  *     Description
  *             Copy *size* bytes from *data* into a ring buffer *ringbuf*.
  *             If BPF_RB_NO_WAKEUP is specified in *flags*, no notification of
index eec23fa..83844f8 100644 (file)
@@ -47,7 +47,7 @@ static int transfer_size;
 static int iterations;
 static int interval = 5; /* interval in seconds for showing transfer rate */
 
-uint8_t default_tx[] = {
+static uint8_t default_tx[] = {
        0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
        0x40, 0x00, 0x00, 0x00, 0x00, 0x95,
        0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
@@ -56,8 +56,8 @@ uint8_t default_tx[] = {
        0xF0, 0x0D,
 };
 
-uint8_t default_rx[ARRAY_SIZE(default_tx)] = {0, };
-char *input_tx;
+static uint8_t default_rx[ARRAY_SIZE(default_tx)] = {0, };
+static char *input_tx;
 
 static void hex_dump(const void *src, size_t length, size_t line_size,
                     char *prefix)
@@ -461,8 +461,8 @@ int main(int argc, char *argv[])
                pabort("can't get max speed hz");
 
        printf("spi mode: 0x%x\n", mode);
-       printf("bits per word: %d\n", bits);
-       printf("max speed: %d Hz (%d KHz)\n", speed, speed/1000);
+       printf("bits per word: %u\n", bits);
+       printf("max speed: %u Hz (%u kHz)\n", speed, speed/1000);
 
        if (input_tx)
                transfer_escaped_string(fd, input_tx);
index 2061a6b..5f54c6a 100644 (file)
@@ -13,6 +13,7 @@ static int getsetsockopt(void)
                char cc[16]; /* TCP_CA_NAME_MAX */
        } buf = {};
        socklen_t optlen;
+       char *big_buf = NULL;
 
        fd = socket(AF_INET, SOCK_STREAM, 0);
        if (fd < 0) {
@@ -22,24 +23,31 @@ static int getsetsockopt(void)
 
        /* IP_TOS - BPF bypass */
 
-       buf.u8[0] = 0x08;
-       err = setsockopt(fd, SOL_IP, IP_TOS, &buf, 1);
+       optlen = getpagesize() * 2;
+       big_buf = calloc(1, optlen);
+       if (!big_buf) {
+               log_err("Couldn't allocate two pages");
+               goto err;
+       }
+
+       *(int *)big_buf = 0x08;
+       err = setsockopt(fd, SOL_IP, IP_TOS, big_buf, optlen);
        if (err) {
                log_err("Failed to call setsockopt(IP_TOS)");
                goto err;
        }
 
-       buf.u8[0] = 0x00;
+       memset(big_buf, 0, optlen);
        optlen = 1;
-       err = getsockopt(fd, SOL_IP, IP_TOS, &buf, &optlen);
+       err = getsockopt(fd, SOL_IP, IP_TOS, big_buf, &optlen);
        if (err) {
                log_err("Failed to call getsockopt(IP_TOS)");
                goto err;
        }
 
-       if (buf.u8[0] != 0x08) {
-               log_err("Unexpected getsockopt(IP_TOS) buf[0] 0x%02x != 0x08",
-                       buf.u8[0]);
+       if (*(int *)big_buf != 0x08) {
+               log_err("Unexpected getsockopt(IP_TOS) optval 0x%x != 0x08",
+                       *(int *)big_buf);
                goto err;
        }
 
@@ -78,6 +86,28 @@ static int getsetsockopt(void)
                goto err;
        }
 
+       /* IP_FREEBIND - BPF can't access optval past PAGE_SIZE */
+
+       optlen = getpagesize() * 2;
+       memset(big_buf, 0, optlen);
+
+       err = setsockopt(fd, SOL_IP, IP_FREEBIND, big_buf, optlen);
+       if (err != 0) {
+               log_err("Failed to call setsockopt, ret=%d", err);
+               goto err;
+       }
+
+       err = getsockopt(fd, SOL_IP, IP_FREEBIND, big_buf, &optlen);
+       if (err != 0) {
+               log_err("Failed to call getsockopt, ret=%d", err);
+               goto err;
+       }
+
+       if (optlen != 1 || *(__u8 *)big_buf != 0x55) {
+               log_err("Unexpected IP_FREEBIND getsockopt, optlen=%d, optval=0x%x",
+                       optlen, *(__u8 *)big_buf);
+       }
+
        /* SO_SNDBUF is overwritten */
 
        buf.u32 = 0x01010101;
@@ -124,9 +154,11 @@ static int getsetsockopt(void)
                goto err;
        }
 
+       free(big_buf);
        close(fd);
        return 0;
 err:
+       free(big_buf);
        close(fd);
        return -1;
 }
index 7897c8f..ef57408 100644 (file)
@@ -480,10 +480,9 @@ static __always_inline void hystart_update(struct sock *sk, __u32 delay)
 
        if (hystart_detect & HYSTART_DELAY) {
                /* obtain the minimum delay of more than sampling packets */
+               if (ca->curr_rtt > delay)
+                       ca->curr_rtt = delay;
                if (ca->sample_cnt < HYSTART_MIN_SAMPLES) {
-                       if (ca->curr_rtt > delay)
-                               ca->curr_rtt = delay;
-
                        ca->sample_cnt++;
                } else {
                        if (ca->curr_rtt > ca->delay_min +
index d5a5eeb..712df7b 100644 (file)
@@ -8,6 +8,10 @@
 char _license[] SEC("license") = "GPL";
 __u32 _version SEC("version") = 1;
 
+#ifndef PAGE_SIZE
+#define PAGE_SIZE 4096
+#endif
+
 #define SOL_CUSTOM                     0xdeadbeef
 
 struct sockopt_sk {
@@ -28,12 +32,14 @@ int _getsockopt(struct bpf_sockopt *ctx)
        __u8 *optval = ctx->optval;
        struct sockopt_sk *storage;
 
-       if (ctx->level == SOL_IP && ctx->optname == IP_TOS)
+       if (ctx->level == SOL_IP && ctx->optname == IP_TOS) {
                /* Not interested in SOL_IP:IP_TOS;
                 * let next BPF program in the cgroup chain or kernel
                 * handle it.
                 */
+               ctx->optlen = 0; /* bypass optval>PAGE_SIZE */
                return 1;
+       }
 
        if (ctx->level == SOL_SOCKET && ctx->optname == SO_SNDBUF) {
                /* Not interested in SOL_SOCKET:SO_SNDBUF;
@@ -51,6 +57,26 @@ int _getsockopt(struct bpf_sockopt *ctx)
                return 1;
        }
 
+       if (ctx->level == SOL_IP && ctx->optname == IP_FREEBIND) {
+               if (optval + 1 > optval_end)
+                       return 0; /* EPERM, bounds check */
+
+               ctx->retval = 0; /* Reset system call return value to zero */
+
+               /* Always export 0x55 */
+               optval[0] = 0x55;
+               ctx->optlen = 1;
+
+               /* Userspace buffer is PAGE_SIZE * 2, but BPF
+                * program can only see the first PAGE_SIZE
+                * bytes of data.
+                */
+               if (optval_end - optval != PAGE_SIZE)
+                       return 0; /* EPERM, unexpected data size */
+
+               return 1;
+       }
+
        if (ctx->level != SOL_CUSTOM)
                return 0; /* EPERM, deny everything except custom level */
 
@@ -81,12 +107,14 @@ int _setsockopt(struct bpf_sockopt *ctx)
        __u8 *optval = ctx->optval;
        struct sockopt_sk *storage;
 
-       if (ctx->level == SOL_IP && ctx->optname == IP_TOS)
+       if (ctx->level == SOL_IP && ctx->optname == IP_TOS) {
                /* Not interested in SOL_IP:IP_TOS;
                 * let next BPF program in the cgroup chain or kernel
                 * handle it.
                 */
+               ctx->optlen = 0; /* bypass optval>PAGE_SIZE */
                return 1;
+       }
 
        if (ctx->level == SOL_SOCKET && ctx->optname == SO_SNDBUF) {
                /* Overwrite SO_SNDBUF value */
@@ -112,6 +140,28 @@ int _setsockopt(struct bpf_sockopt *ctx)
                return 1;
        }
 
+       if (ctx->level == SOL_IP && ctx->optname == IP_FREEBIND) {
+               /* Original optlen is larger than PAGE_SIZE. */
+               if (ctx->optlen != PAGE_SIZE * 2)
+                       return 0; /* EPERM, unexpected data size */
+
+               if (optval + 1 > optval_end)
+                       return 0; /* EPERM, bounds check */
+
+               /* Make sure we can trim the buffer. */
+               optval[0] = 0;
+               ctx->optlen = 1;
+
+               /* Usepace buffer is PAGE_SIZE * 2, but BPF
+                * program can only see the first PAGE_SIZE
+                * bytes of data.
+                */
+               if (optval_end - optval != PAGE_SIZE)
+                       return 0; /* EPERM, unexpected data size */
+
+               return 1;
+       }
+
        if (ctx->level != SOL_CUSTOM)
                return 0; /* EPERM, deny everything except custom level */
 
index 383bac0..ceaad78 100644 (file)
@@ -15,8 +15,9 @@
 #include <inttypes.h>
 #include <linux/net_tstamp.h>
 #include <linux/errqueue.h>
+#include <linux/if_ether.h>
 #include <linux/ipv6.h>
-#include <linux/tcp.h>
+#include <linux/udp.h>
 #include <stdbool.h>
 #include <stdlib.h>
 #include <stdio.h>
@@ -140,8 +141,8 @@ static void do_recv_errqueue_timeout(int fdt)
 {
        char control[CMSG_SPACE(sizeof(struct sock_extended_err)) +
                     CMSG_SPACE(sizeof(struct sockaddr_in6))] = {0};
-       char data[sizeof(struct ipv6hdr) +
-                 sizeof(struct tcphdr) + 1];
+       char data[sizeof(struct ethhdr) + sizeof(struct ipv6hdr) +
+                 sizeof(struct udphdr) + 1];
        struct sock_extended_err *err;
        struct msghdr msg = {0};
        struct iovec iov = {0};
@@ -159,6 +160,8 @@ static void do_recv_errqueue_timeout(int fdt)
        msg.msg_controllen = sizeof(control);
 
        while (1) {
+               const char *reason;
+
                ret = recvmsg(fdt, &msg, MSG_ERRQUEUE);
                if (ret == -1 && errno == EAGAIN)
                        break;
@@ -176,14 +179,30 @@ static void do_recv_errqueue_timeout(int fdt)
                err = (struct sock_extended_err *)CMSG_DATA(cm);
                if (err->ee_origin != SO_EE_ORIGIN_TXTIME)
                        error(1, 0, "errqueue: origin 0x%x\n", err->ee_origin);
-               if (err->ee_code != ECANCELED)
-                       error(1, 0, "errqueue: code 0x%x\n", err->ee_code);
+
+               switch (err->ee_errno) {
+               case ECANCELED:
+                       if (err->ee_code != SO_EE_CODE_TXTIME_MISSED)
+                               error(1, 0, "errqueue: unknown ECANCELED %u\n",
+                                     err->ee_code);
+                       reason = "missed txtime";
+               break;
+               case EINVAL:
+                       if (err->ee_code != SO_EE_CODE_TXTIME_INVALID_PARAM)
+                               error(1, 0, "errqueue: unknown EINVAL %u\n",
+                                     err->ee_code);
+                       reason = "invalid txtime";
+               break;
+               default:
+                       error(1, 0, "errqueue: errno %u code %u\n",
+                             err->ee_errno, err->ee_code);
+               };
 
                tstamp = ((int64_t) err->ee_data) << 32 | err->ee_info;
                tstamp -= (int64_t) glob_tstart;
                tstamp /= 1000 * 1000;
-               fprintf(stderr, "send: pkt %c at %" PRId64 "ms dropped\n",
-                               data[ret - 1], tstamp);
+               fprintf(stderr, "send: pkt %c at %" PRId64 "ms dropped: %s\n",
+                       data[ret - 1], tstamp, reason);
 
                msg.msg_flags = 0;
                msg.msg_controllen = sizeof(control);
index 9c0f758..a179f0d 100644 (file)
@@ -3,7 +3,7 @@
 
 TEST_PROGS := nft_trans_stress.sh nft_nat.sh bridge_brouter.sh \
        conntrack_icmp_related.sh nft_flowtable.sh ipvs.sh \
-       nft_concat_range.sh \
+       nft_concat_range.sh nft_conntrack_helper.sh \
        nft_queue.sh
 
 LDLIBS = -lmnl
diff --git a/tools/testing/selftests/netfilter/nft_conntrack_helper.sh b/tools/testing/selftests/netfilter/nft_conntrack_helper.sh
new file mode 100755 (executable)
index 0000000..edf0a48
--- /dev/null
@@ -0,0 +1,175 @@
+#!/bin/bash
+#
+# This tests connection tracking helper assignment:
+# 1. can attach ftp helper to a connection from nft ruleset.
+# 2. auto-assign still works.
+#
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+ret=0
+
+sfx=$(mktemp -u "XXXXXXXX")
+ns1="ns1-$sfx"
+ns2="ns2-$sfx"
+testipv6=1
+
+cleanup()
+{
+       ip netns del ${ns1}
+       ip netns del ${ns2}
+}
+
+nft --version > /dev/null 2>&1
+if [ $? -ne 0 ];then
+       echo "SKIP: Could not run test without nft tool"
+       exit $ksft_skip
+fi
+
+ip -Version > /dev/null 2>&1
+if [ $? -ne 0 ];then
+       echo "SKIP: Could not run test without ip tool"
+       exit $ksft_skip
+fi
+
+conntrack -V > /dev/null 2>&1
+if [ $? -ne 0 ];then
+       echo "SKIP: Could not run test without conntrack tool"
+       exit $ksft_skip
+fi
+
+which nc >/dev/null 2>&1
+if [ $? -ne 0 ];then
+       echo "SKIP: Could not run test without netcat tool"
+       exit $ksft_skip
+fi
+
+trap cleanup EXIT
+
+ip netns add ${ns1}
+ip netns add ${ns2}
+
+ip link add veth0 netns ${ns1} type veth peer name veth0 netns ${ns2} > /dev/null 2>&1
+if [ $? -ne 0 ];then
+    echo "SKIP: No virtual ethernet pair device support in kernel"
+    exit $ksft_skip
+fi
+
+ip -net ${ns1} link set lo up
+ip -net ${ns1} link set veth0 up
+
+ip -net ${ns2} link set lo up
+ip -net ${ns2} link set veth0 up
+
+ip -net ${ns1} addr add 10.0.1.1/24 dev veth0
+ip -net ${ns1} addr add dead:1::1/64 dev veth0
+
+ip -net ${ns2} addr add 10.0.1.2/24 dev veth0
+ip -net ${ns2} addr add dead:1::2/64 dev veth0
+
+load_ruleset_family() {
+       local family=$1
+       local ns=$2
+
+ip netns exec ${ns} nft -f - <<EOF
+table $family raw {
+       ct helper ftp {
+             type "ftp" protocol tcp
+        }
+       chain pre {
+               type filter hook prerouting priority 0; policy accept;
+               tcp dport 2121 ct helper set "ftp"
+       }
+       chain output {
+               type filter hook output priority 0; policy accept;
+               tcp dport 2121 ct helper set "ftp"
+       }
+}
+EOF
+       return $?
+}
+
+check_for_helper()
+{
+       local netns=$1
+       local message=$2
+       local port=$3
+
+       ip netns exec ${netns} conntrack -L -p tcp --dport $port 2> /dev/null |grep -q 'helper=ftp'
+       if [ $? -ne 0 ] ; then
+               echo "FAIL: ${netns} did not show attached helper $message" 1>&2
+               ret=1
+       fi
+
+       echo "PASS: ${netns} connection on port $port has ftp helper attached" 1>&2
+       return 0
+}
+
+test_helper()
+{
+       local port=$1
+       local msg=$2
+
+       sleep 3 | ip netns exec ${ns2} nc -w 2 -l -p $port > /dev/null &
+
+       sleep 1
+       sleep 1 | ip netns exec ${ns1} nc -w 2 10.0.1.2 $port > /dev/null &
+
+       check_for_helper "$ns1" "ip $msg" $port
+       check_for_helper "$ns2" "ip $msg" $port
+
+       wait
+
+       if [ $testipv6 -eq 0 ] ;then
+               return 0
+       fi
+
+       ip netns exec ${ns1} conntrack -F 2> /dev/null
+       ip netns exec ${ns2} conntrack -F 2> /dev/null
+
+       sleep 3 | ip netns exec ${ns2} nc -w 2 -6 -l -p $port > /dev/null &
+
+       sleep 1
+       sleep 1 | ip netns exec ${ns1} nc -w 2 -6 dead:1::2 $port > /dev/null &
+
+       check_for_helper "$ns1" "ipv6 $msg" $port
+       check_for_helper "$ns2" "ipv6 $msg" $port
+
+       wait
+}
+
+load_ruleset_family ip ${ns1}
+if [ $? -ne 0 ];then
+       echo "FAIL: ${ns1} cannot load ip ruleset" 1>&2
+       exit 1
+fi
+
+load_ruleset_family ip6 ${ns1}
+if [ $? -ne 0 ];then
+       echo "SKIP: ${ns1} cannot load ip6 ruleset" 1>&2
+       testipv6=0
+fi
+
+load_ruleset_family inet ${ns2}
+if [ $? -ne 0 ];then
+       echo "SKIP: ${ns1} cannot load inet ruleset" 1>&2
+       load_ruleset_family ip ${ns2}
+       if [ $? -ne 0 ];then
+               echo "FAIL: ${ns2} cannot load ip ruleset" 1>&2
+               exit 1
+       fi
+
+       if [ $testipv6 -eq 1 ] ;then
+               load_ruleset_family ip6 ${ns2}
+               if [ $? -ne 0 ];then
+                       echo "FAIL: ${ns2} cannot load ip6 ruleset" 1>&2
+                       exit 1
+               fi
+       fi
+fi
+
+test_helper 2121 "set via ruleset"
+ip netns exec ${ns1} sysctl -q 'net.netfilter.nf_conntrack_helper=1'
+ip netns exec ${ns2} sysctl -q 'net.netfilter.nf_conntrack_helper=1'
+test_helper 21 "auto-assign"
+
+exit $ret
index c1921a5..8d728ed 100644 (file)
@@ -95,4 +95,9 @@ static inline int sys_pidfd_getfd(int pidfd, int fd, int flags)
        return syscall(__NR_pidfd_getfd, pidfd, fd, flags);
 }
 
+static inline int sys_memfd_create(const char *name, unsigned int flags)
+{
+       return syscall(__NR_memfd_create, name, flags);
+}
+
 #endif /* __PIDFD_H */
index 401a7c1..84b65ec 100644 (file)
@@ -34,11 +34,6 @@ static int sys_kcmp(pid_t pid1, pid_t pid2, int type, unsigned long idx1,
        return syscall(__NR_kcmp, pid1, pid2, type, idx1, idx2);
 }
 
-static int sys_memfd_create(const char *name, unsigned int flags)
-{
-       return syscall(__NR_memfd_create, name, flags);
-}
-
 static int __child(int sk, int memfd)
 {
        int ret;
index 133ec5b..9418108 100644 (file)
@@ -470,4 +470,16 @@ TEST_F(current_nsset, no_foul_play)
        }
 }
 
+TEST(setns_einval)
+{
+       int fd;
+
+       fd = sys_memfd_create("rostock", 0);
+       EXPECT_GT(fd, 0);
+
+       ASSERT_NE(setns(fd, 0), 0);
+       EXPECT_EQ(errno, EINVAL);
+       close(fd);
+}
+
 TEST_HARNESS_MAIN
index 47a3082..503982b 100644 (file)
                 255
             ]
         ],
-        "cmdUnderTest": "$TC action add action bpf bytecode '4,40 0 0 12,21 0 1 2054,6 0 0 262144,6 0 0 0' index 4294967296 cookie 12345",
+        "cmdUnderTest": "$TC action add action bpf bytecode '4,40 0 0 12,21 0 1 2054,6 0 0 262144,6 0 0 0' index 4294967296 cookie 123456",
         "expExitCode": "255",
         "verifyCmd": "$TC action ls action bpf",
-        "matchPattern": "action order [0-9]*: bpf bytecode '4,40 0 0 12,21 0 1 2048,6 0 0 262144,6 0 0 0' default-action pipe.*cookie 12345",
+        "matchPattern": "action order [0-9]*: bpf bytecode '4,40 0 0 12,21 0 1 2048,6 0 0 262144,6 0 0 0' default-action pipe.*cookie 123456",
         "matchCount": "0",
         "teardown": [
             "$TC action flush action bpf"
index 88ec134..072febf 100644 (file)
                 255
             ]
         ],
-        "cmdUnderTest": "bash -c \"for i in \\`seq 1 32\\`; do cmd=\\\"action csum tcp continue index \\$i cookie aaabbbcccdddeee \\\"; args=\"\\$args\\$cmd\"; done && $TC actions add \\$args\"",
+        "cmdUnderTest": "bash -c \"for i in \\`seq 1 32\\`; do cmd=\\\"action csum tcp continue index \\$i cookie 123456789abcde \\\"; args=\"\\$args\\$cmd\"; done && $TC actions add \\$args\"",
         "expExitCode": "0",
         "verifyCmd": "$TC actions ls action csum",
         "matchPattern": "^[ \t]+index [0-9]* ref",
                 1,
                 255
             ],
-            "bash -c \"for i in \\`seq 1 32\\`; do cmd=\\\"action csum tcp continue index \\$i cookie aaabbbcccdddeee \\\"; args=\"\\$args\\$cmd\"; done && $TC actions add \\$args\""
+            "bash -c \"for i in \\`seq 1 32\\`; do cmd=\\\"action csum tcp continue index \\$i cookie 123456789abcde \\\"; args=\"\\$args\\$cmd\"; done && $TC actions add \\$args\""
         ],
         "cmdUnderTest": "bash -c \"for i in \\`seq 1 32\\`; do cmd=\\\"action csum index \\$i \\\"; args=\"\\$args\\$cmd\"; done && $TC actions del \\$args\"",
         "expExitCode": "0",
index fbeb919..d063469 100644 (file)
         "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 42 dst_port 6081 geneve_opts 0102:80:00880022 index 1",
         "expExitCode": "0",
         "verifyCmd": "$TC actions get action tunnel_key index 1",
-        "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt 0102:80:00880022.*index 1",
+        "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt[s]? 0102:80:00880022.*index 1",
         "matchCount": "1",
         "teardown": [
             "$TC actions flush action tunnel_key"
         "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 42 dst_port 6081 geneve_opts 0102:80:00880022,0408:42:0040007611223344,0111:02:1020304011223344 index 1",
         "expExitCode": "0",
         "verifyCmd": "$TC actions get action tunnel_key index 1",
-        "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt 0102:80:00880022,0408:42:0040007611223344,0111:02:1020304011223344.*index 1",
+        "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt[s]? 0102:80:00880022,0408:42:0040007611223344,0111:02:1020304011223344.*index 1",
         "matchCount": "1",
         "teardown": [
             "$TC actions flush action tunnel_key"
         "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 42 dst_port 6081 geneve_opts 824212:80:00880022 index 1",
         "expExitCode": "255",
         "verifyCmd": "$TC actions get action tunnel_key index 1",
-        "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt 824212:80:00880022.*index 1",
+        "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt[s]? 824212:80:00880022.*index 1",
         "matchCount": "0",
         "teardown": [
             "$TC actions flush action tunnel_key"
         "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 42 dst_port 6081 geneve_opts 0102:4224:00880022 index 1",
         "expExitCode": "255",
         "verifyCmd": "$TC actions get action tunnel_key index 1",
-        "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt 0102:4224:00880022.*index 1",
+        "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt[s]? 0102:4224:00880022.*index 1",
         "matchCount": "0",
         "teardown": [
             "$TC actions flush action tunnel_key"
         "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 42 dst_port 6081 geneve_opts 0102:80:4288 index 1",
         "expExitCode": "255",
         "verifyCmd": "$TC actions get action tunnel_key index 1",
-        "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt 0102:80:4288.*index 1",
+        "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt[s]? 0102:80:4288.*index 1",
         "matchCount": "0",
         "teardown": [
             "$TC actions flush action tunnel_key"
         "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 42 dst_port 6081 geneve_opts 0102:80:4288428822 index 1",
         "expExitCode": "255",
         "verifyCmd": "$TC actions get action tunnel_key index 1",
-        "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt 0102:80:4288428822.*index 1",
+        "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt[s]? 0102:80:4288428822.*index 1",
         "matchCount": "0",
         "teardown": [
             "$TC actions flush action tunnel_key"
         "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 42 dst_port 6081 geneve_opts 0102:80:00880022,0408:42: index 1",
         "expExitCode": "255",
         "verifyCmd": "$TC actions get action tunnel_key index 1",
-        "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt 0102:80:00880022,0408:42:.*index 1",
+        "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt[s]? 0102:80:00880022,0408:42:.*index 1",
         "matchCount": "0",
         "teardown": [
             "$TC actions flush action tunnel_key"
                 1,
                 255
             ],
-            "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 20.20.20.2 dst_port 3128 nocsum id 1 index 1 cookie aabbccddeeff112233445566778800a"
+            "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 20.20.20.2 dst_port 3128 nocsum id 1 index 1 cookie 123456"
         ],
-        "cmdUnderTest": "$TC actions replace action tunnel_key set src_ip 11.11.11.1 dst_ip 21.21.21.2 dst_port 3129 id 11 csum reclassify index 1 cookie a1b1c1d1",
+        "cmdUnderTest": "$TC actions replace action tunnel_key set src_ip 11.11.11.1 dst_ip 21.21.21.2 dst_port 3129 id 11 csum reclassify index 1 cookie 123456",
         "expExitCode": "0",
         "verifyCmd": "$TC actions get action tunnel_key index 1",
-        "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 11.11.11.1.*dst_ip 21.21.21.2.*key_id 11.*dst_port 3129.*csum reclassify.*index 1.*cookie a1b1c1d1",
+        "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 11.11.11.1.*dst_ip 21.21.21.2.*key_id 11.*dst_port 3129.*csum reclassify.*index 1.*cookie 123456",
         "matchCount": "1",
         "teardown": [
             "$TC actions flush action tunnel_key"
index 17a1f53..d77f482 100755 (executable)
@@ -587,9 +587,20 @@ ip0 link set wg0 up
 kill $ncat_pid
 ip0 link del wg0
 
+# Ensure there aren't circular reference loops
+ip1 link add wg1 type wireguard
+ip2 link add wg2 type wireguard
+ip1 link set wg1 netns $netns2
+ip2 link set wg2 netns $netns1
+pp ip netns delete $netns1
+pp ip netns delete $netns2
+pp ip netns add $netns1
+pp ip netns add $netns2
+
+sleep 2 # Wait for cleanup and grace periods
 declare -A objects
 while read -t 0.1 -r line 2>/dev/null || [[ $? -ne 142 ]]; do
-       [[ $line =~ .*(wg[0-9]+:\ [A-Z][a-z]+\ [0-9]+)\ .*(created|destroyed).* ]] || continue
+       [[ $line =~ .*(wg[0-9]+:\ [A-Z][a-z]+\ ?[0-9]*)\ .*(created|destroyed).* ]] || continue
        objects["${BASH_REMATCH[1]}"]+="${BASH_REMATCH[2]}"
 done < /dev/kmsg
 alldeleted=1
index 6683b4a..caab980 100644 (file)
@@ -11,6 +11,7 @@
 
 #include <linux/compiler.h>
 #include <linux/types.h>
+#include <linux/list.h>
 #include <linux/printk.h>
 #include <linux/bug.h>
 #include <errno.h>
@@ -135,10 +136,4 @@ static inline void free_page(unsigned long addr)
        (void) (&_min1 == &_min2);              \
        _min1 < _min2 ? _min1 : _min2; })
 
-/* TODO: empty stubs for now. Broken but enough for virtio_ring.c */
-#define list_add_tail(a, b) do {} while (0)
-#define list_del(a) do {} while (0)
-#define list_for_each_entry(a, b, c) while (0)
-/* end of stubs */
-
 #endif /* KERNEL_H */
index b751350..5d90254 100644 (file)
@@ -11,12 +11,11 @@ struct device {
 struct virtio_device {
        struct device dev;
        u64 features;
+       struct list_head vqs;
 };
 
 struct virtqueue {
-       /* TODO: commented as list macros are empty stubs for now.
-        * Broken but enough for virtio_ring.c
-        * struct list_head list; */
+       struct list_head list;
        void (*callback)(struct virtqueue *vq);
        const char *name;
        struct virtio_device *vdev;
index b427def..cb3f29c 100644 (file)
@@ -1,6 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0
 #define _GNU_SOURCE
 #include <getopt.h>
+#include <limits.h>
 #include <string.h>
 #include <poll.h>
 #include <sys/eventfd.h>
@@ -18,6 +19,8 @@
 #include <linux/virtio_ring.h>
 #include "../../drivers/vhost/test.h"
 
+#define RANDOM_BATCH -1
+
 /* Unused */
 void *__kmalloc_fake, *__kfree_ignore_start, *__kfree_ignore_end;
 
@@ -43,6 +46,10 @@ struct vdev_info {
        struct vhost_memory *mem;
 };
 
+static const struct vhost_vring_file no_backend = { .fd = -1 },
+                                    backend = { .fd = 1 };
+static const struct vhost_vring_state null_state = {};
+
 bool vq_notify(struct virtqueue *vq)
 {
        struct vq_info *info = vq->priv;
@@ -88,6 +95,19 @@ void vhost_vq_setup(struct vdev_info *dev, struct vq_info *info)
        assert(r >= 0);
 }
 
+static void vq_reset(struct vq_info *info, int num, struct virtio_device *vdev)
+{
+       if (info->vq)
+               vring_del_virtqueue(info->vq);
+
+       memset(info->ring, 0, vring_size(num, 4096));
+       vring_init(&info->vring, num, info->ring, 4096);
+       info->vq = __vring_new_virtqueue(info->idx, info->vring, vdev, true,
+                                        false, vq_notify, vq_callback, "test");
+       assert(info->vq);
+       info->vq->priv = info;
+}
+
 static void vq_info_add(struct vdev_info *dev, int num)
 {
        struct vq_info *info = &dev->vqs[dev->nvqs];
@@ -97,14 +117,7 @@ static void vq_info_add(struct vdev_info *dev, int num)
        info->call = eventfd(0, EFD_NONBLOCK);
        r = posix_memalign(&info->ring, 4096, vring_size(num, 4096));
        assert(r >= 0);
-       memset(info->ring, 0, vring_size(num, 4096));
-       vring_init(&info->vring, num, info->ring, 4096);
-       info->vq = vring_new_virtqueue(info->idx,
-                                      info->vring.num, 4096, &dev->vdev,
-                                      true, false, info->ring,
-                                      vq_notify, vq_callback, "test");
-       assert(info->vq);
-       info->vq->priv = info;
+       vq_reset(info, num, &dev->vdev);
        vhost_vq_setup(dev, info);
        dev->fds[info->idx].fd = info->call;
        dev->fds[info->idx].events = POLLIN;
@@ -116,6 +129,7 @@ static void vdev_info_init(struct vdev_info* dev, unsigned long long features)
        int r;
        memset(dev, 0, sizeof *dev);
        dev->vdev.features = features;
+       INIT_LIST_HEAD(&dev->vdev.vqs);
        dev->buf_size = 1024;
        dev->buf = malloc(dev->buf_size);
        assert(dev->buf);
@@ -152,41 +166,93 @@ static void wait_for_interrupt(struct vdev_info *dev)
 }
 
 static void run_test(struct vdev_info *dev, struct vq_info *vq,
-                    bool delayed, int bufs)
+                    bool delayed, int batch, int reset_n, int bufs)
 {
        struct scatterlist sl;
-       long started = 0, completed = 0;
-       long completed_before;
+       long started = 0, completed = 0, next_reset = reset_n;
+       long completed_before, started_before;
        int r, test = 1;
        unsigned len;
        long long spurious = 0;
+       const bool random_batch = batch == RANDOM_BATCH;
+
        r = ioctl(dev->control, VHOST_TEST_RUN, &test);
        assert(r >= 0);
+       if (!reset_n) {
+               next_reset = INT_MAX;
+       }
+
        for (;;) {
                virtqueue_disable_cb(vq->vq);
                completed_before = completed;
+               started_before = started;
                do {
-                       if (started < bufs) {
+                       const bool reset = completed > next_reset;
+                       if (random_batch)
+                               batch = (random() % vq->vring.num) + 1;
+
+                       while (started < bufs &&
+                              (started - completed) < batch) {
                                sg_init_one(&sl, dev->buf, dev->buf_size);
                                r = virtqueue_add_outbuf(vq->vq, &sl, 1,
                                                         dev->buf + started,
                                                         GFP_ATOMIC);
-                               if (likely(r == 0)) {
-                                       ++started;
-                                       if (unlikely(!virtqueue_kick(vq->vq)))
+                               if (unlikely(r != 0)) {
+                                       if (r == -ENOSPC &&
+                                           started > started_before)
+                                               r = 0;
+                                       else
                                                r = -1;
+                                       break;
                                }
-                       } else
+
+                               ++started;
+
+                               if (unlikely(!virtqueue_kick(vq->vq))) {
+                                       r = -1;
+                                       break;
+                               }
+                       }
+
+                       if (started >= bufs)
                                r = -1;
 
+                       if (reset) {
+                               r = ioctl(dev->control, VHOST_TEST_SET_BACKEND,
+                                         &no_backend);
+                               assert(!r);
+                       }
+
                        /* Flush out completed bufs if any */
-                       if (virtqueue_get_buf(vq->vq, &len)) {
+                       while (virtqueue_get_buf(vq->vq, &len)) {
                                ++completed;
                                r = 0;
                        }
 
+                       if (reset) {
+                               struct vhost_vring_state s = { .index = 0 };
+
+                               vq_reset(vq, vq->vring.num, &dev->vdev);
+
+                               r = ioctl(dev->control, VHOST_GET_VRING_BASE,
+                                         &s);
+                               assert(!r);
+
+                               s.num = 0;
+                               r = ioctl(dev->control, VHOST_SET_VRING_BASE,
+                                         &null_state);
+                               assert(!r);
+
+                               r = ioctl(dev->control, VHOST_TEST_SET_BACKEND,
+                                         &backend);
+                               assert(!r);
+
+                               started = completed;
+                               while (completed > next_reset)
+                                       next_reset += completed;
+                       }
                } while (r == 0);
-               if (completed == completed_before)
+               if (completed == completed_before && started == started_before)
                        ++spurious;
                assert(completed <= bufs);
                assert(started <= bufs);
@@ -203,7 +269,9 @@ static void run_test(struct vdev_info *dev, struct vq_info *vq,
        test = 0;
        r = ioctl(dev->control, VHOST_TEST_RUN, &test);
        assert(r >= 0);
-       fprintf(stderr, "spurious wakeups: 0x%llx\n", spurious);
+       fprintf(stderr,
+               "spurious wakeups: 0x%llx started=0x%lx completed=0x%lx\n",
+               spurious, started, completed);
 }
 
 const char optstring[] = "h";
@@ -245,6 +313,16 @@ const struct option longopts[] = {
                .val = 'd',
        },
        {
+               .name = "batch",
+               .val = 'b',
+               .has_arg = required_argument,
+       },
+       {
+               .name = "reset",
+               .val = 'r',
+               .has_arg = optional_argument,
+       },
+       {
        }
 };
 
@@ -255,6 +333,8 @@ static void help(void)
                " [--no-event-idx]"
                " [--no-virtio-1]"
                " [--delayed-interrupt]"
+               " [--batch=random/N]"
+               " [--reset=N]"
                "\n");
 }
 
@@ -263,6 +343,7 @@ int main(int argc, char **argv)
        struct vdev_info dev;
        unsigned long long features = (1ULL << VIRTIO_RING_F_INDIRECT_DESC) |
                (1ULL << VIRTIO_RING_F_EVENT_IDX) | (1ULL << VIRTIO_F_VERSION_1);
+       long batch = 1, reset = 0;
        int o;
        bool delayed = false;
 
@@ -289,6 +370,24 @@ int main(int argc, char **argv)
                case 'D':
                        delayed = true;
                        break;
+               case 'b':
+                       if (0 == strcmp(optarg, "random")) {
+                               batch = RANDOM_BATCH;
+                       } else {
+                               batch = strtol(optarg, NULL, 10);
+                               assert(batch > 0);
+                               assert(batch < (long)INT_MAX + 1);
+                       }
+                       break;
+               case 'r':
+                       if (!optarg) {
+                               reset = 1;
+                       } else {
+                               reset = strtol(optarg, NULL, 10);
+                               assert(reset > 0);
+                               assert(reset < (long)INT_MAX + 1);
+                       }
+                       break;
                default:
                        assert(0);
                        break;
@@ -298,6 +397,6 @@ int main(int argc, char **argv)
 done:
        vdev_info_init(&dev, features);
        vq_info_add(&dev, 256);
-       run_test(&dev, &dev.vqs[0], delayed, 0x100000);
+       run_test(&dev, &dev.vqs[0], delayed, batch, reset, 0x100000);
        return 0;
 }
index 2936534..fa87b58 100644 (file)
@@ -307,6 +307,7 @@ static int parallel_test(u64 features,
                close(to_host[0]);
 
                gvdev.vdev.features = features;
+               INIT_LIST_HEAD(&gvdev.vdev.vqs);
                gvdev.to_host_fd = to_host[1];
                gvdev.notifies = 0;
 
@@ -453,6 +454,7 @@ int main(int argc, char *argv[])
 
        getrange = getrange_iov;
        vdev.features = 0;
+       INIT_LIST_HEAD(&vdev.vqs);
 
        while (argv[1]) {
                if (strcmp(argv[1], "--indirect") == 0)