Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf
authorDavid S. Miller <davem@davemloft.net>
Fri, 2 Apr 2021 18:00:46 +0000 (11:00 -0700)
committerDavid S. Miller <davem@davemloft.net>
Fri, 2 Apr 2021 18:00:46 +0000 (11:00 -0700)
Alexei Starovoitov says:

====================
pull-request: bpf 2021-04-01

The following pull-request contains BPF updates for your *net* tree.

We've added 11 non-merge commits during the last 8 day(s) which contain
a total of 10 files changed, 151 insertions(+), 26 deletions(-).

The main changes are:

1) xsk creation fixes, from Ciara.

2) bpf_get_task_stack fix, from Dave.

3) trampoline in modules fix, from Jiri.

4) bpf_obj_get fix for links and progs, from Lorenz.

5) struct_ops progs must be gpl compatible fix, from Toke.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
98 files changed:
Documentation/devicetree/bindings/net/brcm,bcm4908-enet.yaml
Documentation/devicetree/bindings/net/ethernet-controller.yaml
Documentation/devicetree/bindings/net/micrel-ksz90x1.txt
MAINTAINERS
drivers/net/can/usb/peak_usb/pcan_usb_core.c
drivers/net/dsa/lantiq_gswip.c
drivers/net/ethernet/amd/xgbe/xgbe.h
drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
drivers/net/ethernet/freescale/gianfar.c
drivers/net/ethernet/intel/i40e/i40e.h
drivers/net/ethernet/intel/i40e/i40e_ethtool.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
drivers/net/ethernet/intel/i40e/i40e_xsk.c
drivers/net/ethernet/intel/ice/ice.h
drivers/net/ethernet/intel/ice/ice_common.c
drivers/net/ethernet/intel/ice/ice_controlq.h
drivers/net/ethernet/intel/ice/ice_dcb.c
drivers/net/ethernet/intel/ice/ice_dcb_nl.c
drivers/net/ethernet/intel/ice/ice_ethtool.c
drivers/net/ethernet/intel/ice/ice_lib.c
drivers/net/ethernet/intel/ice/ice_main.c
drivers/net/ethernet/intel/ice/ice_switch.c
drivers/net/ethernet/intel/ice/ice_type.h
drivers/net/ethernet/mellanox/mlx5/core/dev.c
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_gre.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c
drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h
drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
drivers/net/ethernet/mellanox/mlx5/core/eq.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
drivers/net/ethernet/mellanox/mlxsw/spectrum.h
drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
drivers/net/ethernet/myricom/myri10ge/myri10ge.c
drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
drivers/net/ethernet/netronome/nfp/flower/main.h
drivers/net/ethernet/netronome/nfp/flower/metadata.c
drivers/net/ethernet/netronome/nfp/flower/offload.c
drivers/net/ethernet/xilinx/xilinx_axienet.h
drivers/net/ethernet/xilinx/xilinx_axienet_main.c
drivers/net/geneve.c
drivers/net/ieee802154/atusb.c
drivers/net/phy/bcm-phy-lib.c
drivers/net/vxlan.c
drivers/net/wan/hdlc_fr.c
include/linux/avf/virtchnl.h
include/linux/virtio_net.h
include/net/netns/xfrm.h
include/net/red.h
include/net/sock.h
include/net/xfrm.h
include/uapi/linux/can.h
net/can/bcm.c
net/can/isotp.c
net/can/raw.c
net/core/neighbour.c
net/core/sock.c
net/core/xdp.c
net/dsa/dsa2.c
net/dsa/switch.c
net/ipv4/ah4.c
net/ipv4/esp4.c
net/ipv4/esp4_offload.c
net/ipv4/ip_vti.c
net/ipv4/udp.c
net/ipv6/ah6.c
net/ipv6/esp6.c
net/ipv6/esp6_offload.c
net/ipv6/ip6_vti.c
net/mptcp/protocol.c
net/ncsi/ncsi-manage.c
net/nfc/llcp_sock.c
net/qrtr/qrtr.c
net/rds/message.c
net/sched/act_api.c
net/sched/sch_htb.c
net/tipc/socket.c
net/xfrm/xfrm_compat.c
net/xfrm/xfrm_device.c
net/xfrm/xfrm_interface.c
net/xfrm/xfrm_output.c
net/xfrm/xfrm_state.c
tools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh

index 79c38ea142372ab6e5befa4a940ff5d522a919ea..13c26f23a8209c018ee3b74f0bdf51da0af87688 100644 (file)
@@ -32,7 +32,7 @@ required:
   - interrupts
   - interrupt-names
 
-additionalProperties: false
+unevaluatedProperties: false
 
 examples:
   - |
index 4b7d1e5d003c7f282aa1e61852ed4b7bcce4af1d..e8f04687a3e096b5430c9443cc95a356a28442a1 100644 (file)
@@ -49,7 +49,7 @@ properties:
     description:
       Reference to an nvmem node for the MAC address
 
-  nvmem-cells-names:
+  nvmem-cell-names:
     const: mac-address
 
   phy-connection-type:
index b921731cd970e35a9221cae0f33fb982209949df..df9e844dd6bc6e7bcb7c88d93bbbc4af3888f79e 100644 (file)
@@ -65,6 +65,71 @@ KSZ9031:
   step is 60ps. The default value is the neutral setting, so setting
   rxc-skew-ps=<0> actually results in -900 picoseconds adjustment.
 
+  The KSZ9031 hardware supports a range of skew values from negative to
+  positive, where the specific range is property dependent. All values
+  specified in the devicetree are offset by the minimum value so they
+  can be represented as positive integers in the devicetree since it's
+  difficult to represent a negative number in the devictree.
+
+  The following 5-bit values table apply to rxc-skew-ps and txc-skew-ps.
+
+  Pad Skew Value       Delay (ps)      Devicetree Value
+  ------------------------------------------------------
+  0_0000               -900ps          0
+  0_0001               -840ps          60
+  0_0010               -780ps          120
+  0_0011               -720ps          180
+  0_0100               -660ps          240
+  0_0101               -600ps          300
+  0_0110               -540ps          360
+  0_0111               -480ps          420
+  0_1000               -420ps          480
+  0_1001               -360ps          540
+  0_1010               -300ps          600
+  0_1011               -240ps          660
+  0_1100               -180ps          720
+  0_1101               -120ps          780
+  0_1110               -60ps           840
+  0_1111               0ps             900
+  1_0000               60ps            960
+  1_0001               120ps           1020
+  1_0010               180ps           1080
+  1_0011               240ps           1140
+  1_0100               300ps           1200
+  1_0101               360ps           1260
+  1_0110               420ps           1320
+  1_0111               480ps           1380
+  1_1000               540ps           1440
+  1_1001               600ps           1500
+  1_1010               660ps           1560
+  1_1011               720ps           1620
+  1_1100               780ps           1680
+  1_1101               840ps           1740
+  1_1110               900ps           1800
+  1_1111               960ps           1860
+
+  The following 4-bit values table apply to the txdX-skew-ps, rxdX-skew-ps
+  data pads, and the rxdv-skew-ps, txen-skew-ps control pads.
+
+  Pad Skew Value       Delay (ps)      Devicetree Value
+  ------------------------------------------------------
+  0000                 -420ps          0
+  0001                 -360ps          60
+  0010                 -300ps          120
+  0011                 -240ps          180
+  0100                 -180ps          240
+  0101                 -120ps          300
+  0110                 -60ps           360
+  0111                 0ps             420
+  1000                 60ps            480
+  1001                 120ps           540
+  1010                 180ps           600
+  1011                 240ps           660
+  1100                 300ps           720
+  1101                 360ps           780
+  1110                 420ps           840
+  1111                 480ps           900
+
   Optional properties:
 
     Maximum value of 1860, default value 900:
@@ -120,11 +185,21 @@ KSZ9131:
 
 Examples:
 
+       /* Attach to an Ethernet device with autodetected PHY */
+       &enet {
+               rxc-skew-ps = <1800>;
+               rxdv-skew-ps = <0>;
+               txc-skew-ps = <1800>;
+               txen-skew-ps = <0>;
+               status = "okay";
+       };
+
+       /* Attach to an explicitly-specified PHY */
        mdio {
                phy0: ethernet-phy@0 {
-                       rxc-skew-ps = <3000>;
+                       rxc-skew-ps = <1800>;
                        rxdv-skew-ps = <0>;
-                       txc-skew-ps = <3000>;
+                       txc-skew-ps = <1800>;
                        txen-skew-ps = <0>;
                        reg = <0>;
                };
@@ -133,3 +208,20 @@ Examples:
                phy = <&phy0>;
                phy-mode = "rgmii-id";
        };
+
+References
+
+  Micrel ksz9021rl/rn Data Sheet, Revision 1.2. Dated 2/13/2014.
+  http://www.micrel.com/_PDF/Ethernet/datasheets/ksz9021rl-rn_ds.pdf
+
+  Micrel ksz9031rnx Data Sheet, Revision 2.1. Dated 11/20/2014.
+  http://www.micrel.com/_PDF/Ethernet/datasheets/KSZ9031RNX.pdf
+
+Notes:
+
+  Note that a previous version of the Micrel ksz9021rl/rn Data Sheet
+  was missing extended register 106 (transmit data pad skews), and
+  incorrectly specified the ps per step as 200ps/step instead of
+  120ps/step. The latest update to this document reflects the latest
+  revision of the Micrel specification even though usage in the kernel
+  still reflects that incorrect document.
index 8d23b0ec0c9080767310be12c1c52a57a252f124..1cc3976040d5f9893ee7343a57aa211440f0edd1 100644 (file)
@@ -14853,6 +14853,14 @@ L:     linux-arm-msm@vger.kernel.org
 S:     Maintained
 F:     drivers/iommu/arm/arm-smmu/qcom_iommu.c
 
+QUALCOMM IPC ROUTER (QRTR) DRIVER
+M:     Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+L:     linux-arm-msm@vger.kernel.org
+S:     Maintained
+F:     include/trace/events/qrtr.h
+F:     include/uapi/linux/qrtr.h
+F:     net/qrtr/
+
 QUALCOMM IPCC MAILBOX DRIVER
 M:     Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
 L:     linux-arm-msm@vger.kernel.org
index 573b11559d733fc5328130fced5f910b8b81334e..28e916a04047d51548e5b3d1a22a86c1924ec379 100644 (file)
@@ -857,7 +857,7 @@ static int peak_usb_create_dev(const struct peak_usb_adapter *peak_usb_adapter,
        if (dev->adapter->dev_set_bus) {
                err = dev->adapter->dev_set_bus(dev, 0);
                if (err)
-                       goto lbl_unregister_candev;
+                       goto adap_dev_free;
        }
 
        /* get device number early */
@@ -869,6 +869,10 @@ static int peak_usb_create_dev(const struct peak_usb_adapter *peak_usb_adapter,
 
        return 0;
 
+adap_dev_free:
+       if (dev->adapter->dev_free)
+               dev->adapter->dev_free(dev);
+
 lbl_unregister_candev:
        unregister_candev(netdev);
 
index 52e865a3912cfc651edc9b5a87f2a8430121bf2d..809dfa3be6bb2340e385b4b1c526b181a430da28 100644 (file)
@@ -799,10 +799,15 @@ static int gswip_setup(struct dsa_switch *ds)
        /* Configure the MDIO Clock 2.5 MHz */
        gswip_mdio_mask(priv, 0xff, 0x09, GSWIP_MDIO_MDC_CFG1);
 
-       /* Disable the xMII link */
-       for (i = 0; i < priv->hw_info->max_ports; i++)
+       for (i = 0; i < priv->hw_info->max_ports; i++) {
+               /* Disable the xMII link */
                gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, i);
 
+               /* Automatically select the xMII interface clock */
+               gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_RATE_MASK,
+                                  GSWIP_MII_CFG_RATE_AUTO, i);
+       }
+
        /* enable special tag insertion on cpu port */
        gswip_switch_mask(priv, 0, GSWIP_FDMA_PCTRL_STEN,
                          GSWIP_FDMA_PCTRLp(cpu_port));
index ba8321ec1ee73eb70ee3d41f2552a263989fed0a..3305979a9f7c1fc43dba9efd9cec54d3c1286d07 100644 (file)
 #define XGBE_DMA_SYS_AWCR      0x30303030
 
 /* DMA cache settings - PCI device */
-#define XGBE_DMA_PCI_ARCR      0x00000003
-#define XGBE_DMA_PCI_AWCR      0x13131313
-#define XGBE_DMA_PCI_AWARCR    0x00000313
+#define XGBE_DMA_PCI_ARCR      0x000f0f0f
+#define XGBE_DMA_PCI_AWCR      0x0f0f0f0f
+#define XGBE_DMA_PCI_AWARCR    0x00000f0f
 
 /* DMA channel interrupt modes */
 #define XGBE_IRQ_MODE_EDGE     0
index 6c85a10f465cd44a6c4a76af0be0389f92518a01..23a2ebdfd503b2e34ec0ecd798fc095db1b14b51 100644 (file)
@@ -1794,11 +1794,25 @@ int cudbg_collect_sge_indirect(struct cudbg_init *pdbg_init,
        struct cudbg_buffer temp_buff = { 0 };
        struct sge_qbase_reg_field *sge_qbase;
        struct ireg_buf *ch_sge_dbg;
+       u8 padap_running = 0;
        int i, rc;
+       u32 size;
 
-       rc = cudbg_get_buff(pdbg_init, dbg_buff,
-                           sizeof(*ch_sge_dbg) * 2 + sizeof(*sge_qbase),
-                           &temp_buff);
+       /* Accessing SGE_QBASE_MAP[0-3] and SGE_QBASE_INDEX regs can
+        * lead to SGE missing doorbells under heavy traffic. So, only
+        * collect them when adapter is idle.
+        */
+       for_each_port(padap, i) {
+               padap_running = netif_running(padap->port[i]);
+               if (padap_running)
+                       break;
+       }
+
+       size = sizeof(*ch_sge_dbg) * 2;
+       if (!padap_running)
+               size += sizeof(*sge_qbase);
+
+       rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
        if (rc)
                return rc;
 
@@ -1820,7 +1834,8 @@ int cudbg_collect_sge_indirect(struct cudbg_init *pdbg_init,
                ch_sge_dbg++;
        }
 
-       if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5) {
+       if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5 &&
+           !padap_running) {
                sge_qbase = (struct sge_qbase_reg_field *)ch_sge_dbg;
                /* 1 addr reg SGE_QBASE_INDEX and 4 data reg
                 * SGE_QBASE_MAP[0-3]
index 98829e482bfa95f15b464122906a1a2201c32357..80882cfc370f5f00307874e97a06a91ee00cd264 100644 (file)
@@ -2090,7 +2090,8 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
                0x1190, 0x1194,
                0x11a0, 0x11a4,
                0x11b0, 0x11b4,
-               0x11fc, 0x1274,
+               0x11fc, 0x123c,
+               0x1254, 0x1274,
                0x1280, 0x133c,
                0x1800, 0x18fc,
                0x3000, 0x302c,
index 1cf8ef717453dd46bbde2c98e1525618f789b8e8..3ec4d9fddd521bb3fb5f36e275f40c7e5a8bcb4c 100644 (file)
@@ -363,7 +363,11 @@ static void gfar_set_mac_for_addr(struct net_device *dev, int num,
 
 static int gfar_set_mac_addr(struct net_device *dev, void *p)
 {
-       eth_mac_addr(dev, p);
+       int ret;
+
+       ret = eth_mac_addr(dev, p);
+       if (ret)
+               return ret;
 
        gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
 
index cd53981fa5e092469e3568ec4e6520ee14e23355..15f93b3550990b5e768b073a9f9171c64e61a2a7 100644 (file)
@@ -142,6 +142,7 @@ enum i40e_state_t {
        __I40E_VIRTCHNL_OP_PENDING,
        __I40E_RECOVERY_MODE,
        __I40E_VF_RESETS_DISABLED,      /* disable resets during i40e_remove */
+       __I40E_VFS_RELEASING,
        /* This must be last as it determines the size of the BITMAP */
        __I40E_STATE_SIZE__,
 };
index c70dec65a57264fd101cfd7419cc903469ea7ca0..96d5202a73e880c83fa876f35abfbf74e6a69635 100644 (file)
@@ -232,6 +232,8 @@ static void __i40e_add_stat_strings(u8 **p, const struct i40e_stats stats[],
        I40E_STAT(struct i40e_vsi, _name, _stat)
 #define I40E_VEB_STAT(_name, _stat) \
        I40E_STAT(struct i40e_veb, _name, _stat)
+#define I40E_VEB_TC_STAT(_name, _stat) \
+       I40E_STAT(struct i40e_cp_veb_tc_stats, _name, _stat)
 #define I40E_PFC_STAT(_name, _stat) \
        I40E_STAT(struct i40e_pfc_stats, _name, _stat)
 #define I40E_QUEUE_STAT(_name, _stat) \
@@ -266,11 +268,18 @@ static const struct i40e_stats i40e_gstrings_veb_stats[] = {
        I40E_VEB_STAT("veb.rx_unknown_protocol", stats.rx_unknown_protocol),
 };
 
+struct i40e_cp_veb_tc_stats {
+       u64 tc_rx_packets;
+       u64 tc_rx_bytes;
+       u64 tc_tx_packets;
+       u64 tc_tx_bytes;
+};
+
 static const struct i40e_stats i40e_gstrings_veb_tc_stats[] = {
-       I40E_VEB_STAT("veb.tc_%u_tx_packets", tc_stats.tc_tx_packets),
-       I40E_VEB_STAT("veb.tc_%u_tx_bytes", tc_stats.tc_tx_bytes),
-       I40E_VEB_STAT("veb.tc_%u_rx_packets", tc_stats.tc_rx_packets),
-       I40E_VEB_STAT("veb.tc_%u_rx_bytes", tc_stats.tc_rx_bytes),
+       I40E_VEB_TC_STAT("veb.tc_%u_tx_packets", tc_tx_packets),
+       I40E_VEB_TC_STAT("veb.tc_%u_tx_bytes", tc_tx_bytes),
+       I40E_VEB_TC_STAT("veb.tc_%u_rx_packets", tc_rx_packets),
+       I40E_VEB_TC_STAT("veb.tc_%u_rx_bytes", tc_rx_bytes),
 };
 
 static const struct i40e_stats i40e_gstrings_misc_stats[] = {
@@ -1101,6 +1110,7 @@ static int i40e_get_link_ksettings(struct net_device *netdev,
 
        /* Set flow control settings */
        ethtool_link_ksettings_add_link_mode(ks, supported, Pause);
+       ethtool_link_ksettings_add_link_mode(ks, supported, Asym_Pause);
 
        switch (hw->fc.requested_mode) {
        case I40E_FC_FULL:
@@ -2216,6 +2226,29 @@ static int i40e_get_sset_count(struct net_device *netdev, int sset)
        }
 }
 
+/**
+ * i40e_get_veb_tc_stats - copy VEB TC statistics to formatted structure
+ * @tc: the TC statistics in VEB structure (veb->tc_stats)
+ * @i: the index of traffic class in (veb->tc_stats) structure to copy
+ *
+ * Copy VEB TC statistics from structure of arrays (veb->tc_stats) to
+ * one dimensional structure i40e_cp_veb_tc_stats.
+ * Produce formatted i40e_cp_veb_tc_stats structure of the VEB TC
+ * statistics for the given TC.
+ **/
+static struct i40e_cp_veb_tc_stats
+i40e_get_veb_tc_stats(struct i40e_veb_tc_stats *tc, unsigned int i)
+{
+       struct i40e_cp_veb_tc_stats veb_tc = {
+               .tc_rx_packets = tc->tc_rx_packets[i],
+               .tc_rx_bytes = tc->tc_rx_bytes[i],
+               .tc_tx_packets = tc->tc_tx_packets[i],
+               .tc_tx_bytes = tc->tc_tx_bytes[i],
+       };
+
+       return veb_tc;
+}
+
 /**
  * i40e_get_pfc_stats - copy HW PFC statistics to formatted structure
  * @pf: the PF device structure
@@ -2300,8 +2333,16 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
                               i40e_gstrings_veb_stats);
 
        for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
-               i40e_add_ethtool_stats(&data, veb_stats ? veb : NULL,
-                                      i40e_gstrings_veb_tc_stats);
+               if (veb_stats) {
+                       struct i40e_cp_veb_tc_stats veb_tc =
+                               i40e_get_veb_tc_stats(&veb->tc_stats, i);
+
+                       i40e_add_ethtool_stats(&data, &veb_tc,
+                                              i40e_gstrings_veb_tc_stats);
+               } else {
+                       i40e_add_ethtool_stats(&data, NULL,
+                                              i40e_gstrings_veb_tc_stats);
+               }
 
        i40e_add_ethtool_stats(&data, pf, i40e_gstrings_stats);
 
index 17f3b800640e0d3024de1d02468849dc14c8a1bc..af6c25fa493c281d8bd806c1d16222fb8809d079 100644 (file)
@@ -6738,9 +6738,9 @@ out:
                        set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
                        set_bit(__I40E_CLIENT_L2_CHANGE, pf->state);
                }
-       /* registers are set, lets apply */
-       if (pf->hw_features & I40E_HW_USE_SET_LLDP_MIB)
-               ret = i40e_hw_set_dcb_config(pf, new_cfg);
+               /* registers are set, lets apply */
+               if (pf->hw_features & I40E_HW_USE_SET_LLDP_MIB)
+                       ret = i40e_hw_set_dcb_config(pf, new_cfg);
        }
 
 err:
@@ -10573,12 +10573,6 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
                goto end_core_reset;
        }
 
-       if (!lock_acquired)
-               rtnl_lock();
-       ret = i40e_setup_pf_switch(pf, reinit);
-       if (ret)
-               goto end_unlock;
-
 #ifdef CONFIG_I40E_DCB
        /* Enable FW to write a default DCB config on link-up
         * unless I40E_FLAG_TC_MQPRIO was enabled or DCB
@@ -10593,7 +10587,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
                        i40e_aq_set_dcb_parameters(hw, false, NULL);
                        dev_warn(&pf->pdev->dev,
                                 "DCB is not supported for X710-T*L 2.5/5G speeds\n");
-                                pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
+                       pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
                } else {
                        i40e_aq_set_dcb_parameters(hw, true, NULL);
                        ret = i40e_init_pf_dcb(pf);
@@ -10607,6 +10601,11 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
        }
 
 #endif /* CONFIG_I40E_DCB */
+       if (!lock_acquired)
+               rtnl_lock();
+       ret = i40e_setup_pf_switch(pf, reinit);
+       if (ret)
+               goto end_unlock;
 
        /* The driver only wants link up/down and module qualification
         * reports from firmware.  Note the negative logic.
index 1b6ec9be155a6352eb1a330172c50b70b1bad98a..5d301a466f5c516cfd5ed745ee7981d12c4aab07 100644 (file)
@@ -137,6 +137,7 @@ void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
  **/
 static inline void i40e_vc_disable_vf(struct i40e_vf *vf)
 {
+       struct i40e_pf *pf = vf->pf;
        int i;
 
        i40e_vc_notify_vf_reset(vf);
@@ -147,6 +148,11 @@ static inline void i40e_vc_disable_vf(struct i40e_vf *vf)
         * ensure a reset.
         */
        for (i = 0; i < 20; i++) {
+               /* If PF is in VFs releasing state reset VF is impossible,
+                * so leave it.
+                */
+               if (test_bit(__I40E_VFS_RELEASING, pf->state))
+                       return;
                if (i40e_reset_vf(vf, false))
                        return;
                usleep_range(10000, 20000);
@@ -1574,6 +1580,8 @@ void i40e_free_vfs(struct i40e_pf *pf)
 
        if (!pf->vf)
                return;
+
+       set_bit(__I40E_VFS_RELEASING, pf->state);
        while (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
                usleep_range(1000, 2000);
 
@@ -1631,6 +1639,7 @@ void i40e_free_vfs(struct i40e_pf *pf)
                }
        }
        clear_bit(__I40E_VF_DISABLE, pf->state);
+       clear_bit(__I40E_VFS_RELEASING, pf->state);
 }
 
 #ifdef CONFIG_PCI_IOV
index fc32c5019b0f847fe18fa5c61493044c9a9e6e0c..12ca84113587d077b2633de91230cd8844217431 100644 (file)
@@ -471,7 +471,7 @@ static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget)
 
        nb_pkts = xsk_tx_peek_release_desc_batch(xdp_ring->xsk_pool, descs, budget);
        if (!nb_pkts)
-               return false;
+               return true;
 
        if (xdp_ring->next_to_use + nb_pkts >= xdp_ring->count) {
                nb_processed = xdp_ring->count - xdp_ring->next_to_use;
@@ -488,7 +488,7 @@ static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget)
 
        i40e_update_tx_stats(xdp_ring, nb_pkts, total_bytes);
 
-       return true;
+       return nb_pkts < budget;
 }
 
 /**
index 357706444dd506aed7b5d86f80c1084f737a3602..17101c45cbcd847350c634191a1eb3ad22963d45 100644 (file)
@@ -196,7 +196,6 @@ enum ice_state {
        __ICE_NEEDS_RESTART,
        __ICE_PREPARED_FOR_RESET,       /* set by driver when prepared */
        __ICE_RESET_OICR_RECV,          /* set by driver after rcv reset OICR */
-       __ICE_DCBNL_DEVRESET,           /* set by dcbnl devreset */
        __ICE_PFR_REQ,                  /* set by driver and peers */
        __ICE_CORER_REQ,                /* set by driver and peers */
        __ICE_GLOBR_REQ,                /* set by driver and peers */
@@ -624,7 +623,7 @@ int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset);
 void ice_print_link_msg(struct ice_vsi *vsi, bool isup);
 const char *ice_stat_str(enum ice_status stat_err);
 const char *ice_aq_str(enum ice_aq_err aq_err);
-bool ice_is_wol_supported(struct ice_pf *pf);
+bool ice_is_wol_supported(struct ice_hw *hw);
 int
 ice_fdir_write_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input, bool add,
                    bool is_tun);
@@ -642,6 +641,7 @@ int ice_fdir_create_dflt_rules(struct ice_pf *pf);
 int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout,
                          struct ice_rq_event_info *event);
 int ice_open(struct net_device *netdev);
+int ice_open_internal(struct net_device *netdev);
 int ice_stop(struct net_device *netdev);
 void ice_service_task_schedule(struct ice_pf *pf);
 
index 3d9475e222cda6fc8c44505feacb38aa837d7e71..a20edf1538a0003c7c2b4c5f3bc83a236b6f6947 100644 (file)
@@ -717,8 +717,8 @@ static enum ice_status ice_cfg_fw_log(struct ice_hw *hw, bool enable)
 
                        if (!data) {
                                data = devm_kcalloc(ice_hw_to_dev(hw),
-                                                   sizeof(*data),
                                                    ICE_AQC_FW_LOG_ID_MAX,
+                                                   sizeof(*data),
                                                    GFP_KERNEL);
                                if (!data)
                                        return ICE_ERR_NO_MEMORY;
index faaa08e8171b58fe1d6f26835d9deeaecc7e0e6f..68866f4f0eb09ec5e4dc9fb364b3a334abd20a1b 100644 (file)
@@ -31,8 +31,8 @@ enum ice_ctl_q {
        ICE_CTL_Q_MAILBOX,
 };
 
-/* Control Queue timeout settings - max delay 250ms */
-#define ICE_CTL_Q_SQ_CMD_TIMEOUT       2500  /* Count 2500 times */
+/* Control Queue timeout settings - max delay 1s */
+#define ICE_CTL_Q_SQ_CMD_TIMEOUT       10000 /* Count 10000 times */
 #define ICE_CTL_Q_SQ_CMD_USEC          100   /* Check every 100usec */
 #define ICE_CTL_Q_ADMIN_INIT_TIMEOUT   10    /* Count 10 times */
 #define ICE_CTL_Q_ADMIN_INIT_MSEC      100   /* Check every 100msec */
index e42727941ef539ea1115f03008b495e8d9c6b776..211ac6f907adb39e3d7c54029f128a3eacc6f73a 100644 (file)
@@ -738,22 +738,27 @@ ice_aq_get_cee_dcb_cfg(struct ice_hw *hw,
 /**
  * ice_cee_to_dcb_cfg
  * @cee_cfg: pointer to CEE configuration struct
- * @dcbcfg: DCB configuration struct
+ * @pi: port information structure
  *
  * Convert CEE configuration from firmware to DCB configuration
  */
 static void
 ice_cee_to_dcb_cfg(struct ice_aqc_get_cee_dcb_cfg_resp *cee_cfg,
-                  struct ice_dcbx_cfg *dcbcfg)
+                  struct ice_port_info *pi)
 {
        u32 status, tlv_status = le32_to_cpu(cee_cfg->tlv_status);
        u32 ice_aqc_cee_status_mask, ice_aqc_cee_status_shift;
+       u8 i, j, err, sync, oper, app_index, ice_app_sel_type;
        u16 app_prio = le16_to_cpu(cee_cfg->oper_app_prio);
-       u8 i, err, sync, oper, app_index, ice_app_sel_type;
        u16 ice_aqc_cee_app_mask, ice_aqc_cee_app_shift;
+       struct ice_dcbx_cfg *cmp_dcbcfg, *dcbcfg;
        u16 ice_app_prot_id_type;
 
-       /* CEE PG data to ETS config */
+       dcbcfg = &pi->qos_cfg.local_dcbx_cfg;
+       dcbcfg->dcbx_mode = ICE_DCBX_MODE_CEE;
+       dcbcfg->tlv_status = tlv_status;
+
+       /* CEE PG data */
        dcbcfg->etscfg.maxtcs = cee_cfg->oper_num_tc;
 
        /* Note that the FW creates the oper_prio_tc nibbles reversed
@@ -780,10 +785,16 @@ ice_cee_to_dcb_cfg(struct ice_aqc_get_cee_dcb_cfg_resp *cee_cfg,
                }
        }
 
-       /* CEE PFC data to ETS config */
+       /* CEE PFC data */
        dcbcfg->pfc.pfcena = cee_cfg->oper_pfc_en;
        dcbcfg->pfc.pfccap = ICE_MAX_TRAFFIC_CLASS;
 
+       /* CEE APP TLV data */
+       if (dcbcfg->app_mode == ICE_DCBX_APPS_NON_WILLING)
+               cmp_dcbcfg = &pi->qos_cfg.desired_dcbx_cfg;
+       else
+               cmp_dcbcfg = &pi->qos_cfg.remote_dcbx_cfg;
+
        app_index = 0;
        for (i = 0; i < 3; i++) {
                if (i == 0) {
@@ -802,6 +813,18 @@ ice_cee_to_dcb_cfg(struct ice_aqc_get_cee_dcb_cfg_resp *cee_cfg,
                        ice_aqc_cee_app_shift = ICE_AQC_CEE_APP_ISCSI_S;
                        ice_app_sel_type = ICE_APP_SEL_TCPIP;
                        ice_app_prot_id_type = ICE_APP_PROT_ID_ISCSI;
+
+                       for (j = 0; j < cmp_dcbcfg->numapps; j++) {
+                               u16 prot_id = cmp_dcbcfg->app[j].prot_id;
+                               u8 sel = cmp_dcbcfg->app[j].selector;
+
+                               if  (sel == ICE_APP_SEL_TCPIP &&
+                                    (prot_id == ICE_APP_PROT_ID_ISCSI ||
+                                     prot_id == ICE_APP_PROT_ID_ISCSI_860)) {
+                                       ice_app_prot_id_type = prot_id;
+                                       break;
+                               }
+                       }
                } else {
                        /* FIP APP */
                        ice_aqc_cee_status_mask = ICE_AQC_CEE_FIP_STATUS_M;
@@ -892,11 +915,8 @@ enum ice_status ice_get_dcb_cfg(struct ice_port_info *pi)
        ret = ice_aq_get_cee_dcb_cfg(pi->hw, &cee_cfg, NULL);
        if (!ret) {
                /* CEE mode */
-               dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg;
-               dcbx_cfg->dcbx_mode = ICE_DCBX_MODE_CEE;
-               dcbx_cfg->tlv_status = le32_to_cpu(cee_cfg.tlv_status);
-               ice_cee_to_dcb_cfg(&cee_cfg, dcbx_cfg);
                ret = ice_get_ieee_or_cee_dcb_cfg(pi, ICE_DCBX_MODE_CEE);
+               ice_cee_to_dcb_cfg(&cee_cfg, pi);
        } else if (pi->hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT) {
                /* CEE mode not enabled try querying IEEE data */
                dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg;
index 468a63f7eff929de38743deb13c119c1a0a13678..4180f1f35fb89caccb986278495ba091a6822e1f 100644 (file)
@@ -18,12 +18,10 @@ static void ice_dcbnl_devreset(struct net_device *netdev)
        while (ice_is_reset_in_progress(pf->state))
                usleep_range(1000, 2000);
 
-       set_bit(__ICE_DCBNL_DEVRESET, pf->state);
        dev_close(netdev);
        netdev_state_change(netdev);
        dev_open(netdev, NULL);
        netdev_state_change(netdev);
-       clear_bit(__ICE_DCBNL_DEVRESET, pf->state);
 }
 
 /**
index 2dcfa685b76393aba429d791fd224f58633329c3..32ba71a1616520d97592eef9e69a1db816ae5b1a 100644 (file)
@@ -3472,7 +3472,7 @@ static void ice_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
                netdev_warn(netdev, "Wake on LAN is not supported on this interface!\n");
 
        /* Get WoL settings based on the HW capability */
-       if (ice_is_wol_supported(pf)) {
+       if (ice_is_wol_supported(&pf->hw)) {
                wol->supported = WAKE_MAGIC;
                wol->wolopts = pf->wol_ena ? WAKE_MAGIC : 0;
        } else {
@@ -3492,7 +3492,7 @@ static int ice_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
        struct ice_vsi *vsi = np->vsi;
        struct ice_pf *pf = vsi->back;
 
-       if (vsi->type != ICE_VSI_PF || !ice_is_wol_supported(pf))
+       if (vsi->type != ICE_VSI_PF || !ice_is_wol_supported(&pf->hw))
                return -EOPNOTSUPP;
 
        /* only magic packet is supported */
index 8d4e2ad4328d1abba195e7a441281b199391b63f..d13c7fc8fb0a24cbf4921bb3dd5f7534e77c1dc0 100644 (file)
@@ -2620,7 +2620,7 @@ int ice_ena_vsi(struct ice_vsi *vsi, bool locked)
                        if (!locked)
                                rtnl_lock();
 
-                       err = ice_open(vsi->netdev);
+                       err = ice_open_internal(vsi->netdev);
 
                        if (!locked)
                                rtnl_unlock();
@@ -2649,7 +2649,7 @@ void ice_dis_vsi(struct ice_vsi *vsi, bool locked)
                        if (!locked)
                                rtnl_lock();
 
-                       ice_stop(vsi->netdev);
+                       ice_vsi_close(vsi);
 
                        if (!locked)
                                rtnl_unlock();
@@ -3078,7 +3078,6 @@ err_vsi:
 bool ice_is_reset_in_progress(unsigned long *state)
 {
        return test_bit(__ICE_RESET_OICR_RECV, state) ||
-              test_bit(__ICE_DCBNL_DEVRESET, state) ||
               test_bit(__ICE_PFR_REQ, state) ||
               test_bit(__ICE_CORER_REQ, state) ||
               test_bit(__ICE_GLOBR_REQ, state);
index 2c23c8f468a5494994f615782897ae3a95701472..9f1adff85be74e4b5e6a2c6110036ea0c7d224a6 100644 (file)
@@ -3537,15 +3537,14 @@ static int ice_init_interrupt_scheme(struct ice_pf *pf)
 }
 
 /**
- * ice_is_wol_supported - get NVM state of WoL
- * @pf: board private structure
+ * ice_is_wol_supported - check if WoL is supported
+ * @hw: pointer to hardware info
  *
  * Check if WoL is supported based on the HW configuration.
  * Returns true if NVM supports and enables WoL for this port, false otherwise
  */
-bool ice_is_wol_supported(struct ice_pf *pf)
+bool ice_is_wol_supported(struct ice_hw *hw)
 {
-       struct ice_hw *hw = &pf->hw;
        u16 wol_ctrl;
 
        /* A bit set to 1 in the NVM Software Reserved Word 2 (WoL control
@@ -3554,7 +3553,7 @@ bool ice_is_wol_supported(struct ice_pf *pf)
        if (ice_read_sr_word(hw, ICE_SR_NVM_WOL_CFG, &wol_ctrl))
                return false;
 
-       return !(BIT(hw->pf_id) & wol_ctrl);
+       return !(BIT(hw->port_info->lport) & wol_ctrl);
 }
 
 /**
@@ -4192,28 +4191,25 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
                goto err_send_version_unroll;
        }
 
+       /* not a fatal error if this fails */
        err = ice_init_nvm_phy_type(pf->hw.port_info);
-       if (err) {
+       if (err)
                dev_err(dev, "ice_init_nvm_phy_type failed: %d\n", err);
-               goto err_send_version_unroll;
-       }
 
+       /* not a fatal error if this fails */
        err = ice_update_link_info(pf->hw.port_info);
-       if (err) {
+       if (err)
                dev_err(dev, "ice_update_link_info failed: %d\n", err);
-               goto err_send_version_unroll;
-       }
 
        ice_init_link_dflt_override(pf->hw.port_info);
 
        /* if media available, initialize PHY settings */
        if (pf->hw.port_info->phy.link_info.link_info &
            ICE_AQ_MEDIA_AVAILABLE) {
+               /* not a fatal error if this fails */
                err = ice_init_phy_user_cfg(pf->hw.port_info);
-               if (err) {
+               if (err)
                        dev_err(dev, "ice_init_phy_user_cfg failed: %d\n", err);
-                       goto err_send_version_unroll;
-               }
 
                if (!test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) {
                        struct ice_vsi *vsi = ice_get_main_vsi(pf);
@@ -6635,6 +6631,28 @@ static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
  * Returns 0 on success, negative value on failure
  */
 int ice_open(struct net_device *netdev)
+{
+       struct ice_netdev_priv *np = netdev_priv(netdev);
+       struct ice_pf *pf = np->vsi->back;
+
+       if (ice_is_reset_in_progress(pf->state)) {
+               netdev_err(netdev, "can't open net device while reset is in progress");
+               return -EBUSY;
+       }
+
+       return ice_open_internal(netdev);
+}
+
+/**
+ * ice_open_internal - Called when a network interface becomes active
+ * @netdev: network interface device structure
+ *
+ * Internal ice_open implementation. Should not be used directly except for ice_open and reset
+ * handling routine
+ *
+ * Returns 0 on success, negative value on failure
+ */
+int ice_open_internal(struct net_device *netdev)
 {
        struct ice_netdev_priv *np = netdev_priv(netdev);
        struct ice_vsi *vsi = np->vsi;
@@ -6715,6 +6733,12 @@ int ice_stop(struct net_device *netdev)
 {
        struct ice_netdev_priv *np = netdev_priv(netdev);
        struct ice_vsi *vsi = np->vsi;
+       struct ice_pf *pf = vsi->back;
+
+       if (ice_is_reset_in_progress(pf->state)) {
+               netdev_err(netdev, "can't stop net device while reset is in progress");
+               return -EBUSY;
+       }
 
        ice_vsi_close(vsi);
 
index 67c965a3f5d28a5da9ddf4e2a67e2d696b9711b3..834cbd3f7b31945b2a2c59971bebbdf31ee0bfde 100644 (file)
@@ -1238,6 +1238,9 @@ ice_add_update_vsi_list(struct ice_hw *hw,
                        ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
                                                vsi_list_id);
 
+               if (!m_entry->vsi_list_info)
+                       return ICE_ERR_NO_MEMORY;
+
                /* If this entry was large action then the large action needs
                 * to be updated to point to FWD to VSI list
                 */
@@ -2220,6 +2223,7 @@ ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
        return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
                 fm_entry->fltr_info.vsi_handle == vsi_handle) ||
                (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
+                fm_entry->vsi_list_info &&
                 (test_bit(vsi_handle, fm_entry->vsi_list_info->vsi_map))));
 }
 
@@ -2292,14 +2296,12 @@ ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
                return ICE_ERR_PARAM;
 
        list_for_each_entry(fm_entry, lkup_list_head, list_entry) {
-               struct ice_fltr_info *fi;
-
-               fi = &fm_entry->fltr_info;
-               if (!fi || !ice_vsi_uses_fltr(fm_entry, vsi_handle))
+               if (!ice_vsi_uses_fltr(fm_entry, vsi_handle))
                        continue;
 
                status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
-                                                       vsi_list_head, fi);
+                                                       vsi_list_head,
+                                                       &fm_entry->fltr_info);
                if (status)
                        return status;
        }
@@ -2622,7 +2624,7 @@ ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
                                          &remove_list_head);
        mutex_unlock(rule_lock);
        if (status)
-               return;
+               goto free_fltr_list;
 
        switch (lkup) {
        case ICE_SW_LKUP_MAC:
@@ -2645,6 +2647,7 @@ ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
                break;
        }
 
+free_fltr_list:
        list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) {
                list_del(&fm_entry->list_entry);
                devm_kfree(ice_hw_to_dev(hw), fm_entry);
index a6cb0c35748c5fe9fec80d93341da4ed63cacf18..266036b7a49ab2fd1ccaa15081cc4de82fc990e1 100644 (file)
@@ -535,6 +535,7 @@ struct ice_dcb_app_priority_table {
 #define ICE_TLV_STATUS_ERR     0x4
 #define ICE_APP_PROT_ID_FCOE   0x8906
 #define ICE_APP_PROT_ID_ISCSI  0x0cbc
+#define ICE_APP_PROT_ID_ISCSI_860 0x035c
 #define ICE_APP_PROT_ID_FIP    0x8914
 #define ICE_APP_SEL_ETHTYPE    0x1
 #define ICE_APP_SEL_TCPIP      0x2
index b051417ede67bdff2fd6bb732f01ba51a5206b4f..9153c9bda96fa5bf9a0abfd3dba47c0d9994cd47 100644 (file)
@@ -191,12 +191,12 @@ static bool is_ib_supported(struct mlx5_core_dev *dev)
 }
 
 enum {
-       MLX5_INTERFACE_PROTOCOL_ETH_REP,
        MLX5_INTERFACE_PROTOCOL_ETH,
+       MLX5_INTERFACE_PROTOCOL_ETH_REP,
 
+       MLX5_INTERFACE_PROTOCOL_IB,
        MLX5_INTERFACE_PROTOCOL_IB_REP,
        MLX5_INTERFACE_PROTOCOL_MPIB,
-       MLX5_INTERFACE_PROTOCOL_IB,
 
        MLX5_INTERFACE_PROTOCOL_VNET,
 };
index 304b296fe8b989210824b62606bd934b0972c9c4..bc6f77ea0a31f1f2a945e7a87d221cf331300970 100644 (file)
@@ -516,6 +516,7 @@ struct mlx5e_icosq {
        struct mlx5_wq_cyc         wq;
        void __iomem              *uar_map;
        u32                        sqn;
+       u16                        reserved_room;
        unsigned long              state;
 
        /* control path */
index b2cd29847a371ee3dfb4b3b74a2d1154c88ab535..68e54cc1cd1664261d4b596d2da3b596a0171f1c 100644 (file)
@@ -185,6 +185,28 @@ mlx5_tc_ct_entry_has_nat(struct mlx5_ct_entry *entry)
        return !!(entry->tuple_nat_node.next);
 }
 
+static int
+mlx5_get_label_mapping(struct mlx5_tc_ct_priv *ct_priv,
+                      u32 *labels, u32 *id)
+{
+       if (!memchr_inv(labels, 0, sizeof(u32) * 4)) {
+               *id = 0;
+               return 0;
+       }
+
+       if (mapping_add(ct_priv->labels_mapping, labels, id))
+               return -EOPNOTSUPP;
+
+       return 0;
+}
+
+static void
+mlx5_put_label_mapping(struct mlx5_tc_ct_priv *ct_priv, u32 id)
+{
+       if (id)
+               mapping_remove(ct_priv->labels_mapping, id);
+}
+
 static int
 mlx5_tc_ct_rule_to_tuple(struct mlx5_ct_tuple *tuple, struct flow_rule *rule)
 {
@@ -436,7 +458,7 @@ mlx5_tc_ct_entry_del_rule(struct mlx5_tc_ct_priv *ct_priv,
        mlx5_tc_rule_delete(netdev_priv(ct_priv->netdev), zone_rule->rule, attr);
        mlx5e_mod_hdr_detach(ct_priv->dev,
                             ct_priv->mod_hdr_tbl, zone_rule->mh);
-       mapping_remove(ct_priv->labels_mapping, attr->ct_attr.ct_labels_id);
+       mlx5_put_label_mapping(ct_priv, attr->ct_attr.ct_labels_id);
        kfree(attr);
 }
 
@@ -639,8 +661,8 @@ mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
        if (!meta)
                return -EOPNOTSUPP;
 
-       err = mapping_add(ct_priv->labels_mapping, meta->ct_metadata.labels,
-                         &attr->ct_attr.ct_labels_id);
+       err = mlx5_get_label_mapping(ct_priv, meta->ct_metadata.labels,
+                                    &attr->ct_attr.ct_labels_id);
        if (err)
                return -EOPNOTSUPP;
        if (nat) {
@@ -677,7 +699,7 @@ mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
 
 err_mapping:
        dealloc_mod_hdr_actions(&mod_acts);
-       mapping_remove(ct_priv->labels_mapping, attr->ct_attr.ct_labels_id);
+       mlx5_put_label_mapping(ct_priv, attr->ct_attr.ct_labels_id);
        return err;
 }
 
@@ -745,7 +767,7 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
 err_rule:
        mlx5e_mod_hdr_detach(ct_priv->dev,
                             ct_priv->mod_hdr_tbl, zone_rule->mh);
-       mapping_remove(ct_priv->labels_mapping, attr->ct_attr.ct_labels_id);
+       mlx5_put_label_mapping(ct_priv, attr->ct_attr.ct_labels_id);
 err_mod_hdr:
        kfree(attr);
 err_attr:
@@ -1197,7 +1219,7 @@ void mlx5_tc_ct_match_del(struct mlx5_tc_ct_priv *priv, struct mlx5_ct_attr *ct_
        if (!priv || !ct_attr->ct_labels_id)
                return;
 
-       mapping_remove(priv->labels_mapping, ct_attr->ct_labels_id);
+       mlx5_put_label_mapping(priv, ct_attr->ct_labels_id);
 }
 
 int
@@ -1280,7 +1302,7 @@ mlx5_tc_ct_match_add(struct mlx5_tc_ct_priv *priv,
                ct_labels[1] = key->ct_labels[1] & mask->ct_labels[1];
                ct_labels[2] = key->ct_labels[2] & mask->ct_labels[2];
                ct_labels[3] = key->ct_labels[3] & mask->ct_labels[3];
-               if (mapping_add(priv->labels_mapping, ct_labels, &ct_attr->ct_labels_id))
+               if (mlx5_get_label_mapping(priv, ct_labels, &ct_attr->ct_labels_id))
                        return -EOPNOTSUPP;
                mlx5e_tc_match_to_reg_match(spec, LABELS_TO_REG, ct_attr->ct_labels_id,
                                            MLX5_CT_LABELS_MASK);
index 67de2bf36861d2c006c2c7335536cf3e53138103..e1271998b937917133e1dc12ea403f110dae66ce 100644 (file)
@@ -21,6 +21,11 @@ enum {
        MLX5E_TC_TUNNEL_TYPE_MPLSOUDP,
 };
 
+struct mlx5e_encap_key {
+       const struct ip_tunnel_key *ip_tun_key;
+       struct mlx5e_tc_tunnel     *tc_tunnel;
+};
+
 struct mlx5e_tc_tunnel {
        int tunnel_type;
        enum mlx5_flow_match_level match_level;
@@ -44,6 +49,8 @@ struct mlx5e_tc_tunnel {
                            struct flow_cls_offload *f,
                            void *headers_c,
                            void *headers_v);
+       bool (*encap_info_equal)(struct mlx5e_encap_key *a,
+                                struct mlx5e_encap_key *b);
 };
 
 extern struct mlx5e_tc_tunnel vxlan_tunnel;
@@ -101,6 +108,9 @@ int mlx5e_tc_tun_parse_udp_ports(struct mlx5e_priv *priv,
                                 void *headers_c,
                                 void *headers_v);
 
+bool mlx5e_tc_tun_encap_info_equal_generic(struct mlx5e_encap_key *a,
+                                          struct mlx5e_encap_key *b);
+
 #endif /* CONFIG_MLX5_ESWITCH */
 
 #endif //__MLX5_EN_TC_TUNNEL_H__
index 7f7b0f6dcdf954f87d5681454f9082c063fac387..9f16ad2c0710bff4e4056dd5c769445dedd2225c 100644 (file)
@@ -476,16 +476,11 @@ void mlx5e_detach_decap(struct mlx5e_priv *priv,
        mlx5e_decap_dealloc(priv, d);
 }
 
-struct encap_key {
-       const struct ip_tunnel_key *ip_tun_key;
-       struct mlx5e_tc_tunnel *tc_tunnel;
-};
-
-static int cmp_encap_info(struct encap_key *a,
-                         struct encap_key *b)
+bool mlx5e_tc_tun_encap_info_equal_generic(struct mlx5e_encap_key *a,
+                                          struct mlx5e_encap_key *b)
 {
-       return memcmp(a->ip_tun_key, b->ip_tun_key, sizeof(*a->ip_tun_key)) ||
-               a->tc_tunnel->tunnel_type != b->tc_tunnel->tunnel_type;
+       return memcmp(a->ip_tun_key, b->ip_tun_key, sizeof(*a->ip_tun_key)) == 0 &&
+               a->tc_tunnel->tunnel_type == b->tc_tunnel->tunnel_type;
 }
 
 static int cmp_decap_info(struct mlx5e_decap_key *a,
@@ -494,7 +489,7 @@ static int cmp_decap_info(struct mlx5e_decap_key *a,
        return memcmp(&a->key, &b->key, sizeof(b->key));
 }
 
-static int hash_encap_info(struct encap_key *key)
+static int hash_encap_info(struct mlx5e_encap_key *key)
 {
        return jhash(key->ip_tun_key, sizeof(*key->ip_tun_key),
                     key->tc_tunnel->tunnel_type);
@@ -516,18 +511,18 @@ static bool mlx5e_decap_take(struct mlx5e_decap_entry *e)
 }
 
 static struct mlx5e_encap_entry *
-mlx5e_encap_get(struct mlx5e_priv *priv, struct encap_key *key,
+mlx5e_encap_get(struct mlx5e_priv *priv, struct mlx5e_encap_key *key,
                uintptr_t hash_key)
 {
        struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+       struct mlx5e_encap_key e_key;
        struct mlx5e_encap_entry *e;
-       struct encap_key e_key;
 
        hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
                                   encap_hlist, hash_key) {
                e_key.ip_tun_key = &e->tun_info->key;
                e_key.tc_tunnel = e->tunnel;
-               if (!cmp_encap_info(&e_key, key) &&
+               if (e->tunnel->encap_info_equal(&e_key, key) &&
                    mlx5e_encap_take(e))
                        return e;
        }
@@ -694,8 +689,8 @@ int mlx5e_attach_encap(struct mlx5e_priv *priv,
        struct mlx5_flow_attr *attr = flow->attr;
        const struct ip_tunnel_info *tun_info;
        unsigned long tbl_time_before = 0;
-       struct encap_key key;
        struct mlx5e_encap_entry *e;
+       struct mlx5e_encap_key key;
        bool entry_created = false;
        unsigned short family;
        uintptr_t hash_key;
index 7ed3f9f79f11ac95ce86f9b1c8630b43c689d2e7..f5b26f5a7de46054ae57d4605c3513210bee5336 100644 (file)
@@ -329,6 +329,34 @@ static int mlx5e_tc_tun_parse_geneve(struct mlx5e_priv *priv,
        return mlx5e_tc_tun_parse_geneve_options(priv, spec, f);
 }
 
+static bool mlx5e_tc_tun_encap_info_equal_geneve(struct mlx5e_encap_key *a,
+                                                struct mlx5e_encap_key *b)
+{
+       struct ip_tunnel_info *a_info;
+       struct ip_tunnel_info *b_info;
+       bool a_has_opts, b_has_opts;
+
+       if (!mlx5e_tc_tun_encap_info_equal_generic(a, b))
+               return false;
+
+       a_has_opts = !!(a->ip_tun_key->tun_flags & TUNNEL_GENEVE_OPT);
+       b_has_opts = !!(b->ip_tun_key->tun_flags & TUNNEL_GENEVE_OPT);
+
+       /* keys are equal when both don't have any options attached */
+       if (!a_has_opts && !b_has_opts)
+               return true;
+
+       if (a_has_opts != b_has_opts)
+               return false;
+
+       /* geneve options stored in memory next to ip_tunnel_info struct */
+       a_info = container_of(a->ip_tun_key, struct ip_tunnel_info, key);
+       b_info = container_of(b->ip_tun_key, struct ip_tunnel_info, key);
+
+       return a_info->options_len == b_info->options_len &&
+               memcmp(a_info + 1, b_info + 1, a_info->options_len) == 0;
+}
+
 struct mlx5e_tc_tunnel geneve_tunnel = {
        .tunnel_type          = MLX5E_TC_TUNNEL_TYPE_GENEVE,
        .match_level          = MLX5_MATCH_L4,
@@ -338,4 +366,5 @@ struct mlx5e_tc_tunnel geneve_tunnel = {
        .generate_ip_tun_hdr  = mlx5e_gen_ip_tunnel_header_geneve,
        .parse_udp_ports      = mlx5e_tc_tun_parse_udp_ports_geneve,
        .parse_tunnel         = mlx5e_tc_tun_parse_geneve,
+       .encap_info_equal     = mlx5e_tc_tun_encap_info_equal_geneve,
 };
index 2805416c32a3cbfa373c0b08706d88f182ff4669..ada14f0574dc6cb1d7d940a69e8dbddc7a75c337 100644 (file)
@@ -94,4 +94,5 @@ struct mlx5e_tc_tunnel gre_tunnel = {
        .generate_ip_tun_hdr  = mlx5e_gen_ip_tunnel_header_gretap,
        .parse_udp_ports      = NULL,
        .parse_tunnel         = mlx5e_tc_tun_parse_gretap,
+       .encap_info_equal     = mlx5e_tc_tun_encap_info_equal_generic,
 };
index 3479672e84cf4659cd0157a61ecfdec7b7d709af..60952b33b5688835ddd5bfdddc2a0e30fa56fd5f 100644 (file)
@@ -131,4 +131,5 @@ struct mlx5e_tc_tunnel mplsoudp_tunnel = {
        .generate_ip_tun_hdr  = generate_ip_tun_hdr,
        .parse_udp_ports      = parse_udp_ports,
        .parse_tunnel         = parse_tunnel,
+       .encap_info_equal     = mlx5e_tc_tun_encap_info_equal_generic,
 };
index 038a0f1cecec63eb3199306d75fd888bde24dd12..4267f3a1059e7f4933e2da58d33df186dc3797ee 100644 (file)
@@ -150,4 +150,5 @@ struct mlx5e_tc_tunnel vxlan_tunnel = {
        .generate_ip_tun_hdr  = mlx5e_gen_ip_tunnel_header_vxlan,
        .parse_udp_ports      = mlx5e_tc_tun_parse_udp_ports_vxlan,
        .parse_tunnel         = mlx5e_tc_tun_parse_vxlan,
+       .encap_info_equal     = mlx5e_tc_tun_encap_info_equal_generic,
 };
index 2371b83dad9ca86e344795535ae31572135325e2..055c3bc2373393dd656ef8a03372f02c81fd6aaf 100644 (file)
@@ -441,4 +441,10 @@ static inline u16 mlx5e_stop_room_for_wqe(u16 wqe_size)
        return wqe_size * 2 - 1;
 }
 
+static inline bool mlx5e_icosq_can_post_wqe(struct mlx5e_icosq *sq, u16 wqe_size)
+{
+       u16 room = sq->reserved_room + mlx5e_stop_room_for_wqe(wqe_size);
+
+       return mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, room);
+}
 #endif
index d06532d0baa430e7970190a58969507a98ef3fa4..19d22a63313f38e7cabae20a5e31898a5f426f7b 100644 (file)
@@ -46,7 +46,8 @@ struct mlx5e_ktls_offload_context_rx {
        struct tls12_crypto_info_aes_gcm_128 crypto_info;
        struct accel_rule rule;
        struct sock *sk;
-       struct mlx5e_rq_stats *stats;
+       struct mlx5e_rq_stats *rq_stats;
+       struct mlx5e_tls_sw_stats *sw_stats;
        struct completion add_ctx;
        u32 tirn;
        u32 key_id;
@@ -137,11 +138,10 @@ post_static_params(struct mlx5e_icosq *sq,
 {
        struct mlx5e_set_tls_static_params_wqe *wqe;
        struct mlx5e_icosq_wqe_info wi;
-       u16 pi, num_wqebbs, room;
+       u16 pi, num_wqebbs;
 
        num_wqebbs = MLX5E_TLS_SET_STATIC_PARAMS_WQEBBS;
-       room = mlx5e_stop_room_for_wqe(num_wqebbs);
-       if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, room)))
+       if (unlikely(!mlx5e_icosq_can_post_wqe(sq, num_wqebbs)))
                return ERR_PTR(-ENOSPC);
 
        pi = mlx5e_icosq_get_next_pi(sq, num_wqebbs);
@@ -168,11 +168,10 @@ post_progress_params(struct mlx5e_icosq *sq,
 {
        struct mlx5e_set_tls_progress_params_wqe *wqe;
        struct mlx5e_icosq_wqe_info wi;
-       u16 pi, num_wqebbs, room;
+       u16 pi, num_wqebbs;
 
        num_wqebbs = MLX5E_TLS_SET_PROGRESS_PARAMS_WQEBBS;
-       room = mlx5e_stop_room_for_wqe(num_wqebbs);
-       if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, room)))
+       if (unlikely(!mlx5e_icosq_can_post_wqe(sq, num_wqebbs)))
                return ERR_PTR(-ENOSPC);
 
        pi = mlx5e_icosq_get_next_pi(sq, num_wqebbs);
@@ -218,7 +217,7 @@ unlock:
        return err;
 
 err_out:
-       priv_rx->stats->tls_resync_req_skip++;
+       priv_rx->rq_stats->tls_resync_req_skip++;
        err = PTR_ERR(cseg);
        complete(&priv_rx->add_ctx);
        goto unlock;
@@ -277,17 +276,15 @@ resync_post_get_progress_params(struct mlx5e_icosq *sq,
 
        buf->priv_rx = priv_rx;
 
-       BUILD_BUG_ON(MLX5E_KTLS_GET_PROGRESS_WQEBBS != 1);
-
        spin_lock_bh(&sq->channel->async_icosq_lock);
 
-       if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, 1))) {
+       if (unlikely(!mlx5e_icosq_can_post_wqe(sq, MLX5E_KTLS_GET_PROGRESS_WQEBBS))) {
                spin_unlock_bh(&sq->channel->async_icosq_lock);
                err = -ENOSPC;
                goto err_dma_unmap;
        }
 
-       pi = mlx5e_icosq_get_next_pi(sq, 1);
+       pi = mlx5e_icosq_get_next_pi(sq, MLX5E_KTLS_GET_PROGRESS_WQEBBS);
        wqe = MLX5E_TLS_FETCH_GET_PROGRESS_PARAMS_WQE(sq, pi);
 
 #define GET_PSV_DS_CNT (DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS))
@@ -307,7 +304,7 @@ resync_post_get_progress_params(struct mlx5e_icosq *sq,
 
        wi = (struct mlx5e_icosq_wqe_info) {
                .wqe_type = MLX5E_ICOSQ_WQE_GET_PSV_TLS,
-               .num_wqebbs = 1,
+               .num_wqebbs = MLX5E_KTLS_GET_PROGRESS_WQEBBS,
                .tls_get_params.buf = buf,
        };
        icosq_fill_wi(sq, pi, &wi);
@@ -322,7 +319,7 @@ err_dma_unmap:
 err_free:
        kfree(buf);
 err_out:
-       priv_rx->stats->tls_resync_req_skip++;
+       priv_rx->rq_stats->tls_resync_req_skip++;
        return err;
 }
 
@@ -378,13 +375,13 @@ static int resync_handle_seq_match(struct mlx5e_ktls_offload_context_rx *priv_rx
 
        cseg = post_static_params(sq, priv_rx);
        if (IS_ERR(cseg)) {
-               priv_rx->stats->tls_resync_res_skip++;
+               priv_rx->rq_stats->tls_resync_res_skip++;
                err = PTR_ERR(cseg);
                goto unlock;
        }
        /* Do not increment priv_rx refcnt, CQE handling is empty */
        mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg);
-       priv_rx->stats->tls_resync_res_ok++;
+       priv_rx->rq_stats->tls_resync_res_ok++;
 unlock:
        spin_unlock_bh(&c->async_icosq_lock);
 
@@ -420,13 +417,13 @@ void mlx5e_ktls_handle_get_psv_completion(struct mlx5e_icosq_wqe_info *wi,
        auth_state = MLX5_GET(tls_progress_params, ctx, auth_state);
        if (tracker_state != MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_TRACKING ||
            auth_state != MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_NO_OFFLOAD) {
-               priv_rx->stats->tls_resync_req_skip++;
+               priv_rx->rq_stats->tls_resync_req_skip++;
                goto out;
        }
 
        hw_seq = MLX5_GET(tls_progress_params, ctx, hw_resync_tcp_sn);
        tls_offload_rx_resync_async_request_end(priv_rx->sk, cpu_to_be32(hw_seq));
-       priv_rx->stats->tls_resync_req_end++;
+       priv_rx->rq_stats->tls_resync_req_end++;
 out:
        mlx5e_ktls_priv_rx_put(priv_rx);
        dma_unmap_single(dev, buf->dma_addr, PROGRESS_PARAMS_PADDED_SIZE, DMA_FROM_DEVICE);
@@ -609,7 +606,8 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk,
        priv_rx->rxq = rxq;
        priv_rx->sk = sk;
 
-       priv_rx->stats = &priv->channel_stats[rxq].rq;
+       priv_rx->rq_stats = &priv->channel_stats[rxq].rq;
+       priv_rx->sw_stats = &priv->tls->sw_stats;
        mlx5e_set_ktls_rx_priv_ctx(tls_ctx, priv_rx);
 
        rqtn = priv->direct_tir[rxq].rqt.rqtn;
@@ -630,7 +628,7 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk,
        if (err)
                goto err_post_wqes;
 
-       priv_rx->stats->tls_ctx++;
+       atomic64_inc(&priv_rx->sw_stats->rx_tls_ctx);
 
        return 0;
 
@@ -666,7 +664,7 @@ void mlx5e_ktls_del_rx(struct net_device *netdev, struct tls_context *tls_ctx)
        if (cancel_work_sync(&resync->work))
                mlx5e_ktls_priv_rx_put(priv_rx);
 
-       priv_rx->stats->tls_del++;
+       atomic64_inc(&priv_rx->sw_stats->rx_tls_del);
        if (priv_rx->rule.rule)
                mlx5e_accel_fs_del_sk(priv_rx->rule.rule);
 
index d16def68ecff7214c02985ae934c45d059a9e13a..51bdf71073f31f691515e39b392f352a87c49044 100644 (file)
@@ -1,6 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
 // Copyright (c) 2019 Mellanox Technologies.
 
+#include "en_accel/tls.h"
 #include "en_accel/ktls_txrx.h"
 #include "en_accel/ktls_utils.h"
 
@@ -50,6 +51,7 @@ static int mlx5e_ktls_create_tis(struct mlx5_core_dev *mdev, u32 *tisn)
 struct mlx5e_ktls_offload_context_tx {
        struct tls_offload_context_tx *tx_ctx;
        struct tls12_crypto_info_aes_gcm_128 crypto_info;
+       struct mlx5e_tls_sw_stats *sw_stats;
        u32 expected_seq;
        u32 tisn;
        u32 key_id;
@@ -99,6 +101,7 @@ int mlx5e_ktls_add_tx(struct net_device *netdev, struct sock *sk,
        if (err)
                goto err_create_key;
 
+       priv_tx->sw_stats = &priv->tls->sw_stats;
        priv_tx->expected_seq = start_offload_tcp_sn;
        priv_tx->crypto_info  =
                *(struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
@@ -111,6 +114,7 @@ int mlx5e_ktls_add_tx(struct net_device *netdev, struct sock *sk,
                goto err_create_tis;
 
        priv_tx->ctx_post_pending = true;
+       atomic64_inc(&priv_tx->sw_stats->tx_tls_ctx);
 
        return 0;
 
@@ -452,7 +456,6 @@ bool mlx5e_ktls_handle_tx_skb(struct tls_context *tls_ctx, struct mlx5e_txqsq *s
 
        if (unlikely(mlx5e_ktls_tx_offload_test_and_clear_pending(priv_tx))) {
                mlx5e_ktls_tx_post_param_wqes(sq, priv_tx, false, false);
-               stats->tls_ctx++;
        }
 
        seq = ntohl(tcp_hdr(skb)->seq);
index bd270a85c8044074b6efcb1c1b17c7bf3bbbeb76..4c9274d390da1d9345474a2a5efc06319b3d3ab3 100644 (file)
 #include "en.h"
 
 struct mlx5e_tls_sw_stats {
+       atomic64_t tx_tls_ctx;
        atomic64_t tx_tls_drop_metadata;
        atomic64_t tx_tls_drop_resync_alloc;
        atomic64_t tx_tls_drop_no_sync_data;
        atomic64_t tx_tls_drop_bypass_required;
+       atomic64_t rx_tls_ctx;
+       atomic64_t rx_tls_del;
        atomic64_t rx_tls_drop_resync_request;
        atomic64_t rx_tls_resync_request;
        atomic64_t rx_tls_resync_reply;
index b949b9a7538b0d1ee8b5e996703dec76334ab6b3..29463bdb77159c1a5b8813f31dd95a60838a448e 100644 (file)
@@ -45,49 +45,60 @@ static const struct counter_desc mlx5e_tls_sw_stats_desc[] = {
        { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_drop_bypass_required) },
 };
 
+static const struct counter_desc mlx5e_ktls_sw_stats_desc[] = {
+       { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_ctx) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, rx_tls_ctx) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, rx_tls_del) },
+};
+
 #define MLX5E_READ_CTR_ATOMIC64(ptr, dsc, i) \
        atomic64_read((atomic64_t *)((char *)(ptr) + (dsc)[i].offset))
 
-#define NUM_TLS_SW_COUNTERS ARRAY_SIZE(mlx5e_tls_sw_stats_desc)
-
-static bool is_tls_atomic_stats(struct mlx5e_priv *priv)
+static const struct counter_desc *get_tls_atomic_stats(struct mlx5e_priv *priv)
 {
-       return priv->tls && !mlx5_accel_is_ktls_device(priv->mdev);
+       if (!priv->tls)
+               return NULL;
+       if (mlx5_accel_is_ktls_device(priv->mdev))
+               return mlx5e_ktls_sw_stats_desc;
+       return mlx5e_tls_sw_stats_desc;
 }
 
 int mlx5e_tls_get_count(struct mlx5e_priv *priv)
 {
-       if (!is_tls_atomic_stats(priv))
+       if (!priv->tls)
                return 0;
-
-       return NUM_TLS_SW_COUNTERS;
+       if (mlx5_accel_is_ktls_device(priv->mdev))
+               return ARRAY_SIZE(mlx5e_ktls_sw_stats_desc);
+       return ARRAY_SIZE(mlx5e_tls_sw_stats_desc);
 }
 
 int mlx5e_tls_get_strings(struct mlx5e_priv *priv, uint8_t *data)
 {
-       unsigned int i, idx = 0;
+       const struct counter_desc *stats_desc;
+       unsigned int i, n, idx = 0;
 
-       if (!is_tls_atomic_stats(priv))
-               return 0;
+       stats_desc = get_tls_atomic_stats(priv);
+       n = mlx5e_tls_get_count(priv);
 
-       for (i = 0; i < NUM_TLS_SW_COUNTERS; i++)
+       for (i = 0; i < n; i++)
                strcpy(data + (idx++) * ETH_GSTRING_LEN,
-                      mlx5e_tls_sw_stats_desc[i].format);
+                      stats_desc[i].format);
 
-       return NUM_TLS_SW_COUNTERS;
+       return n;
 }
 
 int mlx5e_tls_get_stats(struct mlx5e_priv *priv, u64 *data)
 {
-       int i, idx = 0;
+       const struct counter_desc *stats_desc;
+       unsigned int i, n, idx = 0;
 
-       if (!is_tls_atomic_stats(priv))
-               return 0;
+       stats_desc = get_tls_atomic_stats(priv);
+       n = mlx5e_tls_get_count(priv);
 
-       for (i = 0; i < NUM_TLS_SW_COUNTERS; i++)
+       for (i = 0; i < n; i++)
                data[idx++] =
                    MLX5E_READ_CTR_ATOMIC64(&priv->tls->sw_stats,
-                                           mlx5e_tls_sw_stats_desc, i);
+                                           stats_desc, i);
 
-       return NUM_TLS_SW_COUNTERS;
+       return n;
 }
index f5f2a8fd004695def84fde768e7268ec297fb4bd..53802e18af900cfc4456cea8cc4211a92f62ed7e 100644 (file)
@@ -758,11 +758,11 @@ static int get_fec_supported_advertised(struct mlx5_core_dev *dev,
        return 0;
 }
 
-static void ptys2ethtool_supported_advertised_port(struct ethtool_link_ksettings *link_ksettings,
-                                                  u32 eth_proto_cap,
-                                                  u8 connector_type, bool ext)
+static void ptys2ethtool_supported_advertised_port(struct mlx5_core_dev *mdev,
+                                                  struct ethtool_link_ksettings *link_ksettings,
+                                                  u32 eth_proto_cap, u8 connector_type)
 {
-       if ((!connector_type && !ext) || connector_type >= MLX5E_CONNECTOR_TYPE_NUMBER) {
+       if (!MLX5_CAP_PCAM_FEATURE(mdev, ptys_connector_type)) {
                if (eth_proto_cap & (MLX5E_PROT_MASK(MLX5E_10GBASE_CR)
                                   | MLX5E_PROT_MASK(MLX5E_10GBASE_SR)
                                   | MLX5E_PROT_MASK(MLX5E_40GBASE_CR4)
@@ -898,9 +898,9 @@ static int ptys2connector_type[MLX5E_CONNECTOR_TYPE_NUMBER] = {
                [MLX5E_PORT_OTHER]              = PORT_OTHER,
        };
 
-static u8 get_connector_port(u32 eth_proto, u8 connector_type, bool ext)
+static u8 get_connector_port(struct mlx5_core_dev *mdev, u32 eth_proto, u8 connector_type)
 {
-       if ((connector_type || ext) && connector_type < MLX5E_CONNECTOR_TYPE_NUMBER)
+       if (MLX5_CAP_PCAM_FEATURE(mdev, ptys_connector_type))
                return ptys2connector_type[connector_type];
 
        if (eth_proto &
@@ -1001,11 +1001,11 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
                         data_rate_oper, link_ksettings);
 
        eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
-
-       link_ksettings->base.port = get_connector_port(eth_proto_oper,
-                                                      connector_type, ext);
-       ptys2ethtool_supported_advertised_port(link_ksettings, eth_proto_admin,
-                                              connector_type, ext);
+       connector_type = connector_type < MLX5E_CONNECTOR_TYPE_NUMBER ?
+                        connector_type : MLX5E_PORT_UNKNOWN;
+       link_ksettings->base.port = get_connector_port(mdev, eth_proto_oper, connector_type);
+       ptys2ethtool_supported_advertised_port(mdev, link_ksettings, eth_proto_admin,
+                                              connector_type);
        get_lp_advertising(mdev, eth_proto_lp, link_ksettings);
 
        if (an_status == MLX5_AN_COMPLETE)
index 158f947a85031ea0147e5d50f8c259813f87733a..5db63b9f3b70d99a36d2f6a6e6043ee6f085b74f 100644 (file)
@@ -1091,6 +1091,7 @@ static int mlx5e_alloc_icosq(struct mlx5e_channel *c,
 
        sq->channel   = c;
        sq->uar_map   = mdev->mlx5e_res.bfreg.map;
+       sq->reserved_room = param->stop_room;
 
        param->wq.db_numa_node = cpu_to_node(c->cpu);
        err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, wq, &sq->wq_ctrl);
@@ -2350,6 +2351,24 @@ void mlx5e_build_icosq_param(struct mlx5e_priv *priv,
        mlx5e_build_ico_cq_param(priv, log_wq_size, &param->cqp);
 }
 
+static void mlx5e_build_async_icosq_param(struct mlx5e_priv *priv,
+                                         struct mlx5e_params *params,
+                                         u8 log_wq_size,
+                                         struct mlx5e_sq_param *param)
+{
+       void *sqc = param->sqc;
+       void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
+
+       mlx5e_build_sq_param_common(priv, param);
+
+       /* async_icosq is used by XSK only if xdp_prog is active */
+       if (params->xdp_prog)
+               param->stop_room = mlx5e_stop_room_for_wqe(1); /* for XSK NOP */
+       MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(priv->mdev, reg_umr_sq));
+       MLX5_SET(wq, wq, log_wq_sz, log_wq_size);
+       mlx5e_build_ico_cq_param(priv, log_wq_size, &param->cqp);
+}
+
 void mlx5e_build_xdpsq_param(struct mlx5e_priv *priv,
                             struct mlx5e_params *params,
                             struct mlx5e_sq_param *param)
@@ -2398,7 +2417,7 @@ static void mlx5e_build_channel_param(struct mlx5e_priv *priv,
        mlx5e_build_sq_param(priv, params, &cparam->txq_sq);
        mlx5e_build_xdpsq_param(priv, params, &cparam->xdp_sq);
        mlx5e_build_icosq_param(priv, icosq_log_wq_sz, &cparam->icosq);
-       mlx5e_build_icosq_param(priv, async_icosq_log_wq_sz, &cparam->async_icosq);
+       mlx5e_build_async_icosq_param(priv, params, async_icosq_log_wq_sz, &cparam->async_icosq);
 }
 
 int mlx5e_open_channels(struct mlx5e_priv *priv,
index 92c5b81427b971f81817f4149c662fb9e92e71e4..88a01c59ce612083fff1031d57545729e30287b2 100644 (file)
@@ -116,7 +116,6 @@ static const struct counter_desc sw_stats_desc[] = {
 #ifdef CONFIG_MLX5_EN_TLS
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_encrypted_packets) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_encrypted_bytes) },
-       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ctx) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ooo) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_packets) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_bytes) },
@@ -180,8 +179,6 @@ static const struct counter_desc sw_stats_desc[] = {
 #ifdef CONFIG_MLX5_EN_TLS
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_packets) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_bytes) },
-       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_ctx) },
-       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_del) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_pkt) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_start) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_end) },
@@ -342,8 +339,6 @@ static void mlx5e_stats_grp_sw_update_stats_rq_stats(struct mlx5e_sw_stats *s,
 #ifdef CONFIG_MLX5_EN_TLS
        s->rx_tls_decrypted_packets   += rq_stats->tls_decrypted_packets;
        s->rx_tls_decrypted_bytes     += rq_stats->tls_decrypted_bytes;
-       s->rx_tls_ctx                 += rq_stats->tls_ctx;
-       s->rx_tls_del                 += rq_stats->tls_del;
        s->rx_tls_resync_req_pkt      += rq_stats->tls_resync_req_pkt;
        s->rx_tls_resync_req_start    += rq_stats->tls_resync_req_start;
        s->rx_tls_resync_req_end      += rq_stats->tls_resync_req_end;
@@ -390,7 +385,6 @@ static void mlx5e_stats_grp_sw_update_stats_sq(struct mlx5e_sw_stats *s,
 #ifdef CONFIG_MLX5_EN_TLS
        s->tx_tls_encrypted_packets += sq_stats->tls_encrypted_packets;
        s->tx_tls_encrypted_bytes   += sq_stats->tls_encrypted_bytes;
-       s->tx_tls_ctx               += sq_stats->tls_ctx;
        s->tx_tls_ooo               += sq_stats->tls_ooo;
        s->tx_tls_dump_bytes        += sq_stats->tls_dump_bytes;
        s->tx_tls_dump_packets      += sq_stats->tls_dump_packets;
@@ -1622,8 +1616,6 @@ static const struct counter_desc rq_stats_desc[] = {
 #ifdef CONFIG_MLX5_EN_TLS
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_packets) },
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_bytes) },
-       { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_ctx) },
-       { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_del) },
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_pkt) },
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_start) },
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_end) },
@@ -1650,7 +1642,6 @@ static const struct counter_desc sq_stats_desc[] = {
 #ifdef CONFIG_MLX5_EN_TLS
        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_packets) },
        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_bytes) },
-       { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_ctx) },
        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_ooo) },
        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_packets) },
        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_bytes) },
@@ -1776,7 +1767,6 @@ static const struct counter_desc qos_sq_stats_desc[] = {
 #ifdef CONFIG_MLX5_EN_TLS
        { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_packets) },
        { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_bytes) },
-       { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_ctx) },
        { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_ooo) },
        { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_dump_packets) },
        { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_dump_bytes) },
index 93c41312fb037ac36b00e229fadeafbe708bcb99..adf9b7b8b71201d5bb63da405aaa5c76bada12f5 100644 (file)
@@ -191,7 +191,6 @@ struct mlx5e_sw_stats {
 #ifdef CONFIG_MLX5_EN_TLS
        u64 tx_tls_encrypted_packets;
        u64 tx_tls_encrypted_bytes;
-       u64 tx_tls_ctx;
        u64 tx_tls_ooo;
        u64 tx_tls_dump_packets;
        u64 tx_tls_dump_bytes;
@@ -202,8 +201,6 @@ struct mlx5e_sw_stats {
 
        u64 rx_tls_decrypted_packets;
        u64 rx_tls_decrypted_bytes;
-       u64 rx_tls_ctx;
-       u64 rx_tls_del;
        u64 rx_tls_resync_req_pkt;
        u64 rx_tls_resync_req_start;
        u64 rx_tls_resync_req_end;
@@ -334,8 +331,6 @@ struct mlx5e_rq_stats {
 #ifdef CONFIG_MLX5_EN_TLS
        u64 tls_decrypted_packets;
        u64 tls_decrypted_bytes;
-       u64 tls_ctx;
-       u64 tls_del;
        u64 tls_resync_req_pkt;
        u64 tls_resync_req_start;
        u64 tls_resync_req_end;
@@ -364,7 +359,6 @@ struct mlx5e_sq_stats {
 #ifdef CONFIG_MLX5_EN_TLS
        u64 tls_encrypted_packets;
        u64 tls_encrypted_bytes;
-       u64 tls_ctx;
        u64 tls_ooo;
        u64 tls_dump_packets;
        u64 tls_dump_bytes;
index 174dfbc996c6164de252648c6f140232a63fada4..1fa9c18563da911085c4ab2de44c0e08bced092d 100644 (file)
@@ -931,13 +931,24 @@ void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev)
        mutex_unlock(&table->lock);
 }
 
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+#define MLX5_MAX_ASYNC_EQS 4
+#else
+#define MLX5_MAX_ASYNC_EQS 3
+#endif
+
 int mlx5_eq_table_create(struct mlx5_core_dev *dev)
 {
        struct mlx5_eq_table *eq_table = dev->priv.eq_table;
+       int num_eqs = MLX5_CAP_GEN(dev, max_num_eqs) ?
+                     MLX5_CAP_GEN(dev, max_num_eqs) :
+                     1 << MLX5_CAP_GEN(dev, log_max_eq);
        int err;
 
        eq_table->num_comp_eqs =
-               mlx5_irq_get_num_comp(eq_table->irq_table);
+               min_t(int,
+                     mlx5_irq_get_num_comp(eq_table->irq_table),
+                     num_eqs - MLX5_MAX_ASYNC_EQS);
 
        err = create_async_eqs(dev);
        if (err) {
index 8694b83968b4c4fae3a681b8c39200407a8b1242..d4a2f8d1ee9f154d0ce71cb4ff5ca983b6635d88 100644 (file)
@@ -537,6 +537,14 @@ esw_setup_vport_dests(struct mlx5_flow_destination *dest, struct mlx5_flow_act *
        return i;
 }
 
+static bool
+esw_src_port_rewrite_supported(struct mlx5_eswitch *esw)
+{
+       return MLX5_CAP_GEN(esw->dev, reg_c_preserve) &&
+              mlx5_eswitch_vport_match_metadata_enabled(esw) &&
+              MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level);
+}
+
 static int
 esw_setup_dests(struct mlx5_flow_destination *dest,
                struct mlx5_flow_act *flow_act,
@@ -550,9 +558,7 @@ esw_setup_dests(struct mlx5_flow_destination *dest,
        int err = 0;
 
        if (!mlx5_eswitch_termtbl_required(esw, attr, flow_act, spec) &&
-           MLX5_CAP_GEN(esw_attr->in_mdev, reg_c_preserve) &&
-           mlx5_eswitch_vport_match_metadata_enabled(esw) &&
-           MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level))
+           esw_src_port_rewrite_supported(esw))
                attr->flags |= MLX5_ESW_ATTR_FLAG_SRC_REWRITE;
 
        if (attr->dest_ft) {
@@ -1716,36 +1722,40 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw)
        }
        esw->fdb_table.offloads.send_to_vport_grp = g;
 
-       /* meta send to vport */
-       memset(flow_group_in, 0, inlen);
-       MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
-                MLX5_MATCH_MISC_PARAMETERS_2);
-
-       match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
+       if (esw_src_port_rewrite_supported(esw)) {
+               /* meta send to vport */
+               memset(flow_group_in, 0, inlen);
+               MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
+                        MLX5_MATCH_MISC_PARAMETERS_2);
 
-       MLX5_SET(fte_match_param, match_criteria,
-                misc_parameters_2.metadata_reg_c_0, mlx5_eswitch_get_vport_metadata_mask());
-       MLX5_SET(fte_match_param, match_criteria,
-                misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK);
+               match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
 
-       num_vfs = esw->esw_funcs.num_vfs;
-       if (num_vfs) {
-               MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
-               MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix + num_vfs - 1);
-               ix += num_vfs;
+               MLX5_SET(fte_match_param, match_criteria,
+                        misc_parameters_2.metadata_reg_c_0,
+                        mlx5_eswitch_get_vport_metadata_mask());
+               MLX5_SET(fte_match_param, match_criteria,
+                        misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK);
 
-               g = mlx5_create_flow_group(fdb, flow_group_in);
-               if (IS_ERR(g)) {
-                       err = PTR_ERR(g);
-                       esw_warn(dev, "Failed to create send-to-vport meta flow group err(%d)\n",
-                                err);
-                       goto send_vport_meta_err;
+               num_vfs = esw->esw_funcs.num_vfs;
+               if (num_vfs) {
+                       MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
+                       MLX5_SET(create_flow_group_in, flow_group_in,
+                                end_flow_index, ix + num_vfs - 1);
+                       ix += num_vfs;
+
+                       g = mlx5_create_flow_group(fdb, flow_group_in);
+                       if (IS_ERR(g)) {
+                               err = PTR_ERR(g);
+                               esw_warn(dev, "Failed to create send-to-vport meta flow group err(%d)\n",
+                                        err);
+                               goto send_vport_meta_err;
+                       }
+                       esw->fdb_table.offloads.send_to_vport_meta_grp = g;
+
+                       err = mlx5_eswitch_add_send_to_vport_meta_rules(esw);
+                       if (err)
+                               goto meta_rule_err;
                }
-               esw->fdb_table.offloads.send_to_vport_meta_grp = g;
-
-               err = mlx5_eswitch_add_send_to_vport_meta_rules(esw);
-               if (err)
-                       goto meta_rule_err;
        }
 
        if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) {
index d9d9e1f488f94e7ba2498b8e87ad9ea263badf55..ba28ac7e79bcd5cbde98df6d77412452dcf0ac24 100644 (file)
@@ -21,6 +21,7 @@
 #include <net/red.h>
 #include <net/vxlan.h>
 #include <net/flow_offload.h>
+#include <net/inet_ecn.h>
 
 #include "port.h"
 #include "core.h"
@@ -347,6 +348,20 @@ struct mlxsw_sp_port_type_speed_ops {
        u32 (*ptys_proto_cap_masked_get)(u32 eth_proto_cap);
 };
 
+static inline u8 mlxsw_sp_tunnel_ecn_decap(u8 outer_ecn, u8 inner_ecn,
+                                          bool *trap_en)
+{
+       bool set_ce = false;
+
+       *trap_en = !!__INET_ECN_decapsulate(outer_ecn, inner_ecn, &set_ce);
+       if (set_ce)
+               return INET_ECN_CE;
+       else if (outer_ecn == INET_ECN_ECT_1 && inner_ecn == INET_ECN_ECT_0)
+               return INET_ECN_ECT_1;
+       else
+               return inner_ecn;
+}
+
 static inline struct net_device *
 mlxsw_sp_bridge_vxlan_dev_find(struct net_device *br_dev)
 {
index 6ccca39bae84529e8167c1e99a633d01a69c84aa..64a8f838eb53238cdba0925eb6f4b715730c7a9f 100644 (file)
@@ -335,12 +335,11 @@ static int mlxsw_sp_ipip_ecn_decap_init_one(struct mlxsw_sp *mlxsw_sp,
                                            u8 inner_ecn, u8 outer_ecn)
 {
        char tidem_pl[MLXSW_REG_TIDEM_LEN];
-       bool trap_en, set_ce = false;
        u8 new_inner_ecn;
+       bool trap_en;
 
-       trap_en = __INET_ECN_decapsulate(outer_ecn, inner_ecn, &set_ce);
-       new_inner_ecn = set_ce ? INET_ECN_CE : inner_ecn;
-
+       new_inner_ecn = mlxsw_sp_tunnel_ecn_decap(outer_ecn, inner_ecn,
+                                                 &trap_en);
        mlxsw_reg_tidem_pack(tidem_pl, outer_ecn, inner_ecn, new_inner_ecn,
                             trap_en, trap_en ? MLXSW_TRAP_ID_DECAP_ECN0 : 0);
        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tidem), tidem_pl);
index e5ec595593f45e6c12238d6468e43c899d220560..9eba8fa684aee1fa0fcf8d7384052a800848d4f5 100644 (file)
@@ -909,12 +909,11 @@ static int __mlxsw_sp_nve_ecn_decap_init(struct mlxsw_sp *mlxsw_sp,
                                         u8 inner_ecn, u8 outer_ecn)
 {
        char tndem_pl[MLXSW_REG_TNDEM_LEN];
-       bool trap_en, set_ce = false;
        u8 new_inner_ecn;
+       bool trap_en;
 
-       trap_en = !!__INET_ECN_decapsulate(outer_ecn, inner_ecn, &set_ce);
-       new_inner_ecn = set_ce ? INET_ECN_CE : inner_ecn;
-
+       new_inner_ecn = mlxsw_sp_tunnel_ecn_decap(outer_ecn, inner_ecn,
+                                                 &trap_en);
        mlxsw_reg_tndem_pack(tndem_pl, outer_ecn, inner_ecn, new_inner_ecn,
                             trap_en, trap_en ? MLXSW_TRAP_ID_DECAP_ECN0 : 0);
        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tndem), tndem_pl);
index 1634ca6d4a8f02ec9e86c287d5eb0d4807648dc5..c84c8bf2bc20eaefb6711b5836652502f9196da8 100644 (file)
@@ -2897,7 +2897,7 @@ static netdev_tx_t myri10ge_sw_tso(struct sk_buff *skb,
                        dev_kfree_skb_any(curr);
                        if (segs != NULL) {
                                curr = segs;
-                               segs = segs->next;
+                               segs = next;
                                curr->next = NULL;
                                dev_kfree_skb_any(segs);
                        }
index 0e2db6ea79e96f7e9daca0fdcd84ce7b4483b0ed..2ec62c8d86e1c1858d9cce0efcfab055f2ef9add 100644 (file)
@@ -454,6 +454,7 @@ void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb)
                        dev_consume_skb_any(skb);
                else
                        dev_kfree_skb_any(skb);
+               return;
        }
 
        nfp_ccm_rx(&bpf->ccm, skb);
index caf12eec99459ab01c03d23df38eb6fbf4578051..56833a41f3d27b41de4b21a8685bb7a17dcfc4b5 100644 (file)
@@ -190,6 +190,7 @@ struct nfp_fl_internal_ports {
  * @qos_rate_limiters: Current active qos rate limiters
  * @qos_stats_lock:    Lock on qos stats updates
  * @pre_tun_rule_cnt:  Number of pre-tunnel rules offloaded
+ * @merge_table:       Hash table to store merged flows
  */
 struct nfp_flower_priv {
        struct nfp_app *app;
@@ -223,6 +224,7 @@ struct nfp_flower_priv {
        unsigned int qos_rate_limiters;
        spinlock_t qos_stats_lock; /* Protect the qos stats */
        int pre_tun_rule_cnt;
+       struct rhashtable merge_table;
 };
 
 /**
@@ -350,6 +352,12 @@ struct nfp_fl_payload_link {
 };
 
 extern const struct rhashtable_params nfp_flower_table_params;
+extern const struct rhashtable_params merge_table_params;
+
+struct nfp_merge_info {
+       u64 parent_ctx;
+       struct rhash_head ht_node;
+};
 
 struct nfp_fl_stats_frame {
        __be32 stats_con_id;
index aa06fcb38f8b993a1f6c684d2f9c66eb98e3e429..327bb56b3ef5696ff894776985f2c73f0726238a 100644 (file)
@@ -490,6 +490,12 @@ const struct rhashtable_params nfp_flower_table_params = {
        .automatic_shrinking    = true,
 };
 
+const struct rhashtable_params merge_table_params = {
+       .key_offset     = offsetof(struct nfp_merge_info, parent_ctx),
+       .head_offset    = offsetof(struct nfp_merge_info, ht_node),
+       .key_len        = sizeof(u64),
+};
+
 int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
                             unsigned int host_num_mems)
 {
@@ -506,6 +512,10 @@ int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
        if (err)
                goto err_free_flow_table;
 
+       err = rhashtable_init(&priv->merge_table, &merge_table_params);
+       if (err)
+               goto err_free_stats_ctx_table;
+
        get_random_bytes(&priv->mask_id_seed, sizeof(priv->mask_id_seed));
 
        /* Init ring buffer and unallocated mask_ids. */
@@ -513,7 +523,7 @@ int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
                kmalloc_array(NFP_FLOWER_MASK_ENTRY_RS,
                              NFP_FLOWER_MASK_ELEMENT_RS, GFP_KERNEL);
        if (!priv->mask_ids.mask_id_free_list.buf)
-               goto err_free_stats_ctx_table;
+               goto err_free_merge_table;
 
        priv->mask_ids.init_unallocated = NFP_FLOWER_MASK_ENTRY_RS - 1;
 
@@ -550,6 +560,8 @@ err_free_last_used:
        kfree(priv->mask_ids.last_used);
 err_free_mask_id:
        kfree(priv->mask_ids.mask_id_free_list.buf);
+err_free_merge_table:
+       rhashtable_destroy(&priv->merge_table);
 err_free_stats_ctx_table:
        rhashtable_destroy(&priv->stats_ctx_table);
 err_free_flow_table:
@@ -568,6 +580,8 @@ void nfp_flower_metadata_cleanup(struct nfp_app *app)
                                    nfp_check_rhashtable_empty, NULL);
        rhashtable_free_and_destroy(&priv->stats_ctx_table,
                                    nfp_check_rhashtable_empty, NULL);
+       rhashtable_free_and_destroy(&priv->merge_table,
+                                   nfp_check_rhashtable_empty, NULL);
        kvfree(priv->stats);
        kfree(priv->mask_ids.mask_id_free_list.buf);
        kfree(priv->mask_ids.last_used);
index d72225d64a75da13a123a3fcf7d362b5310a3a4f..e95969c462e46de7403272452fcde5633b203fe4 100644 (file)
@@ -1009,6 +1009,8 @@ int nfp_flower_merge_offloaded_flows(struct nfp_app *app,
        struct netlink_ext_ack *extack = NULL;
        struct nfp_fl_payload *merge_flow;
        struct nfp_fl_key_ls merge_key_ls;
+       struct nfp_merge_info *merge_info;
+       u64 parent_ctx = 0;
        int err;
 
        ASSERT_RTNL();
@@ -1019,6 +1021,15 @@ int nfp_flower_merge_offloaded_flows(struct nfp_app *app,
            nfp_flower_is_merge_flow(sub_flow2))
                return -EINVAL;
 
+       /* check if the two flows are already merged */
+       parent_ctx = (u64)(be32_to_cpu(sub_flow1->meta.host_ctx_id)) << 32;
+       parent_ctx |= (u64)(be32_to_cpu(sub_flow2->meta.host_ctx_id));
+       if (rhashtable_lookup_fast(&priv->merge_table,
+                                  &parent_ctx, merge_table_params)) {
+               nfp_flower_cmsg_warn(app, "The two flows are already merged.\n");
+               return 0;
+       }
+
        err = nfp_flower_can_merge(sub_flow1, sub_flow2);
        if (err)
                return err;
@@ -1060,16 +1071,33 @@ int nfp_flower_merge_offloaded_flows(struct nfp_app *app,
        if (err)
                goto err_release_metadata;
 
+       merge_info = kmalloc(sizeof(*merge_info), GFP_KERNEL);
+       if (!merge_info) {
+               err = -ENOMEM;
+               goto err_remove_rhash;
+       }
+       merge_info->parent_ctx = parent_ctx;
+       err = rhashtable_insert_fast(&priv->merge_table, &merge_info->ht_node,
+                                    merge_table_params);
+       if (err)
+               goto err_destroy_merge_info;
+
        err = nfp_flower_xmit_flow(app, merge_flow,
                                   NFP_FLOWER_CMSG_TYPE_FLOW_MOD);
        if (err)
-               goto err_remove_rhash;
+               goto err_remove_merge_info;
 
        merge_flow->in_hw = true;
        sub_flow1->in_hw = false;
 
        return 0;
 
+err_remove_merge_info:
+       WARN_ON_ONCE(rhashtable_remove_fast(&priv->merge_table,
+                                           &merge_info->ht_node,
+                                           merge_table_params));
+err_destroy_merge_info:
+       kfree(merge_info);
 err_remove_rhash:
        WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
                                            &merge_flow->fl_node,
@@ -1359,7 +1387,9 @@ nfp_flower_remove_merge_flow(struct nfp_app *app,
 {
        struct nfp_flower_priv *priv = app->priv;
        struct nfp_fl_payload_link *link, *temp;
+       struct nfp_merge_info *merge_info;
        struct nfp_fl_payload *origin;
+       u64 parent_ctx = 0;
        bool mod = false;
        int err;
 
@@ -1396,8 +1426,22 @@ nfp_flower_remove_merge_flow(struct nfp_app *app,
 err_free_links:
        /* Clean any links connected with the merged flow. */
        list_for_each_entry_safe(link, temp, &merge_flow->linked_flows,
-                                merge_flow.list)
+                                merge_flow.list) {
+               u32 ctx_id = be32_to_cpu(link->sub_flow.flow->meta.host_ctx_id);
+
+               parent_ctx = (parent_ctx << 32) | (u64)(ctx_id);
                nfp_flower_unlink_flow(link);
+       }
+
+       merge_info = rhashtable_lookup_fast(&priv->merge_table,
+                                           &parent_ctx,
+                                           merge_table_params);
+       if (merge_info) {
+               WARN_ON_ONCE(rhashtable_remove_fast(&priv->merge_table,
+                                                   &merge_info->ht_node,
+                                                   merge_table_params));
+               kfree(merge_info);
+       }
 
        kfree(merge_flow->action_data);
        kfree(merge_flow->mask_data);
index 1e966a39967e5f8eb885c109d857be722158c343..aca7f82f6791b6f7551df1720b349e69af41e7cc 100644 (file)
@@ -504,6 +504,18 @@ static inline u32 axinet_ior_read_mcr(struct axienet_local *lp)
        return axienet_ior(lp, XAE_MDIO_MCR_OFFSET);
 }
 
+static inline void axienet_lock_mii(struct axienet_local *lp)
+{
+       if (lp->mii_bus)
+               mutex_lock(&lp->mii_bus->mdio_lock);
+}
+
+static inline void axienet_unlock_mii(struct axienet_local *lp)
+{
+       if (lp->mii_bus)
+               mutex_unlock(&lp->mii_bus->mdio_lock);
+}
+
 /**
  * axienet_iow - Memory mapped Axi Ethernet register write
  * @lp:         Pointer to axienet local structure
index 5d677db0aee5dd0c071a387a9a914d63e7c3cac1..f8f8654ea728c24dcefca36cdabfd0256ff6078b 100644 (file)
@@ -1053,9 +1053,9 @@ static int axienet_open(struct net_device *ndev)
         * including the MDIO. MDIO must be disabled before resetting.
         * Hold MDIO bus lock to avoid MDIO accesses during the reset.
         */
-       mutex_lock(&lp->mii_bus->mdio_lock);
+       axienet_lock_mii(lp);
        ret = axienet_device_reset(ndev);
-       mutex_unlock(&lp->mii_bus->mdio_lock);
+       axienet_unlock_mii(lp);
 
        ret = phylink_of_phy_connect(lp->phylink, lp->dev->of_node, 0);
        if (ret) {
@@ -1148,9 +1148,9 @@ static int axienet_stop(struct net_device *ndev)
        }
 
        /* Do a reset to ensure DMA is really stopped */
-       mutex_lock(&lp->mii_bus->mdio_lock);
+       axienet_lock_mii(lp);
        __axienet_device_reset(lp);
-       mutex_unlock(&lp->mii_bus->mdio_lock);
+       axienet_unlock_mii(lp);
 
        cancel_work_sync(&lp->dma_err_task);
 
@@ -1709,9 +1709,9 @@ static void axienet_dma_err_handler(struct work_struct *work)
         * including the MDIO. MDIO must be disabled before resetting.
         * Hold MDIO bus lock to avoid MDIO accesses during the reset.
         */
-       mutex_lock(&lp->mii_bus->mdio_lock);
+       axienet_lock_mii(lp);
        __axienet_device_reset(lp);
-       mutex_unlock(&lp->mii_bus->mdio_lock);
+       axienet_unlock_mii(lp);
 
        for (i = 0; i < lp->tx_bd_num; i++) {
                cur_p = &lp->tx_bd_v[i];
index 4ac0373326efd995171b1179bd7a5e7a202b89a2..d5b1e48e0c090277b201fe266bf18a9430aae51a 100644 (file)
@@ -908,8 +908,16 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
 
                info = skb_tunnel_info(skb);
                if (info) {
-                       info->key.u.ipv4.dst = fl4.saddr;
-                       info->key.u.ipv4.src = fl4.daddr;
+                       struct ip_tunnel_info *unclone;
+
+                       unclone = skb_tunnel_info_unclone(skb);
+                       if (unlikely(!unclone)) {
+                               dst_release(&rt->dst);
+                               return -ENOMEM;
+                       }
+
+                       unclone->key.u.ipv4.dst = fl4.saddr;
+                       unclone->key.u.ipv4.src = fl4.daddr;
                }
 
                if (!pskb_may_pull(skb, ETH_HLEN)) {
@@ -993,8 +1001,16 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
                struct ip_tunnel_info *info = skb_tunnel_info(skb);
 
                if (info) {
-                       info->key.u.ipv6.dst = fl6.saddr;
-                       info->key.u.ipv6.src = fl6.daddr;
+                       struct ip_tunnel_info *unclone;
+
+                       unclone = skb_tunnel_info_unclone(skb);
+                       if (unlikely(!unclone)) {
+                               dst_release(dst);
+                               return -ENOMEM;
+                       }
+
+                       unclone->key.u.ipv6.dst = fl6.saddr;
+                       unclone->key.u.ipv6.src = fl6.daddr;
                }
 
                if (!pskb_may_pull(skb, ETH_HLEN)) {
index 0dd0ba915ab970cf7a142a57279c9271c22c84a9..23ee0b14cbfa1f39f5d3a828d2cad130b456bffd 100644 (file)
@@ -365,6 +365,7 @@ static int atusb_alloc_urbs(struct atusb *atusb, int n)
                        return -ENOMEM;
                }
                usb_anchor_urb(urb, &atusb->idle_urbs);
+               usb_free_urb(urb);
                n--;
        }
        return 0;
index 53282a6d5928f1f426f5a1e592f72a2e8d1e536f..287cccf8f7f4e5417d2750609e40ec9e393cee2a 100644 (file)
@@ -369,7 +369,7 @@ EXPORT_SYMBOL_GPL(bcm_phy_enable_apd);
 
 int bcm_phy_set_eee(struct phy_device *phydev, bool enable)
 {
-       int val;
+       int val, mask = 0;
 
        /* Enable EEE at PHY level */
        val = phy_read_mmd(phydev, MDIO_MMD_AN, BRCM_CL45VEN_EEE_CONTROL);
@@ -388,10 +388,17 @@ int bcm_phy_set_eee(struct phy_device *phydev, bool enable)
        if (val < 0)
                return val;
 
+       if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+                             phydev->supported))
+               mask |= MDIO_EEE_1000T;
+       if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+                             phydev->supported))
+               mask |= MDIO_EEE_100TX;
+
        if (enable)
-               val |= (MDIO_EEE_100TX | MDIO_EEE_1000T);
+               val |= mask;
        else
-               val &= ~(MDIO_EEE_100TX | MDIO_EEE_1000T);
+               val &= ~mask;
 
        phy_write_mmd(phydev, MDIO_MMD_AN, BCM_CL45VEN_EEE_ADV, (u32)val);
 
index 666dd201c3d5fac335d9251456bf408675928376..53dbc67e8a34f2535cead882faf4f4e19b9d3810 100644 (file)
@@ -2725,12 +2725,17 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
                        goto tx_error;
                } else if (err) {
                        if (info) {
+                               struct ip_tunnel_info *unclone;
                                struct in_addr src, dst;
 
+                               unclone = skb_tunnel_info_unclone(skb);
+                               if (unlikely(!unclone))
+                                       goto tx_error;
+
                                src = remote_ip.sin.sin_addr;
                                dst = local_ip.sin.sin_addr;
-                               info->key.u.ipv4.src = src.s_addr;
-                               info->key.u.ipv4.dst = dst.s_addr;
+                               unclone->key.u.ipv4.src = src.s_addr;
+                               unclone->key.u.ipv4.dst = dst.s_addr;
                        }
                        vxlan_encap_bypass(skb, vxlan, vxlan, vni, false);
                        dst_release(ndst);
@@ -2781,12 +2786,17 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
                        goto tx_error;
                } else if (err) {
                        if (info) {
+                               struct ip_tunnel_info *unclone;
                                struct in6_addr src, dst;
 
+                               unclone = skb_tunnel_info_unclone(skb);
+                               if (unlikely(!unclone))
+                                       goto tx_error;
+
                                src = remote_ip.sin6.sin6_addr;
                                dst = local_ip.sin6.sin6_addr;
-                               info->key.u.ipv6.src = src;
-                               info->key.u.ipv6.dst = dst;
+                               unclone->key.u.ipv6.src = src;
+                               unclone->key.u.ipv6.dst = dst;
                        }
 
                        vxlan_encap_bypass(skb, vxlan, vxlan, vni, false);
index 0720f5f92caa7b0bfcb80ffb432340356ea7034c..4d9dc7d159089c2ee12e87c8cffb167ef31af753 100644 (file)
@@ -415,7 +415,7 @@ static netdev_tx_t pvc_xmit(struct sk_buff *skb, struct net_device *dev)
 
                if (pad > 0) { /* Pad the frame with zeros */
                        if (__skb_pad(skb, pad, false))
-                               goto drop;
+                               goto out;
                        skb_put(skb, pad);
                }
        }
@@ -448,8 +448,9 @@ static netdev_tx_t pvc_xmit(struct sk_buff *skb, struct net_device *dev)
        return NETDEV_TX_OK;
 
 drop:
-       dev->stats.tx_dropped++;
        kfree_skb(skb);
+out:
+       dev->stats.tx_dropped++;
        return NETDEV_TX_OK;
 }
 
index 40bad71865ea762869e917d2174aa2ec6cb31286..532bcbfc471616f01a08a8356fc954f91c36c257 100644 (file)
@@ -476,7 +476,6 @@ struct virtchnl_rss_key {
        u16 vsi_id;
        u16 key_len;
        u8 key[1];         /* RSS hash key, packed bytes */
-       u8 pad[1];
 };
 
 VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_key);
@@ -485,7 +484,6 @@ struct virtchnl_rss_lut {
        u16 vsi_id;
        u16 lut_entries;
        u8 lut[1];        /* RSS lookup table */
-       u8 pad[1];
 };
 
 VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_lut);
index 6b5fcfa1e5553576b0e853ae31a2df655c04204b..98775d7fa69632e2c2da30b581a666f7fbb94b64 100644 (file)
@@ -62,6 +62,8 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
                        return -EINVAL;
        }
 
+       skb_reset_mac_header(skb);
+
        if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
                u16 start = __virtio16_to_cpu(little_endian, hdr->csum_start);
                u16 off = __virtio16_to_cpu(little_endian, hdr->csum_offset);
index 59f45b1e9dac06386fdff3eb78016f2da15d4f67..e816b6a3ef2b0ef28ce9ffc0a6bbb8e6b419f567 100644 (file)
@@ -72,7 +72,9 @@ struct netns_xfrm {
 #if IS_ENABLED(CONFIG_IPV6)
        struct dst_ops          xfrm6_dst_ops;
 #endif
-       spinlock_t xfrm_state_lock;
+       spinlock_t              xfrm_state_lock;
+       seqcount_spinlock_t     xfrm_state_hash_generation;
+
        spinlock_t xfrm_policy_lock;
        struct mutex xfrm_cfg_mutex;
 };
index 0b39eff1d50aebc43784ebec3fe615fec0bf995c..be11dbd26492094e8b477339c66b3a098e06c39f 100644 (file)
@@ -171,9 +171,9 @@ static inline void red_set_vars(struct red_vars *v)
 static inline bool red_check_params(u32 qth_min, u32 qth_max, u8 Wlog,
                                    u8 Scell_log, u8 *stab)
 {
-       if (fls(qth_min) + Wlog > 32)
+       if (fls(qth_min) + Wlog >= 32)
                return false;
-       if (fls(qth_max) + Wlog > 32)
+       if (fls(qth_max) + Wlog >= 32)
                return false;
        if (Scell_log >= 32)
                return false;
index 0b6266fd6bf6f4496b09dd170869ff4db38dfeb9..8487f58da36d21335f690edd2194986c3d4fed23 100644 (file)
@@ -934,9 +934,13 @@ static inline void sk_acceptq_added(struct sock *sk)
        WRITE_ONCE(sk->sk_ack_backlog, sk->sk_ack_backlog + 1);
 }
 
+/* Note: If you think the test should be:
+ *     return READ_ONCE(sk->sk_ack_backlog) >= READ_ONCE(sk->sk_max_ack_backlog);
+ * Then please take a look at commit 64a146513f8f ("[NET]: Revert incorrect accept queue backlog changes.")
+ */
 static inline bool sk_acceptq_is_full(const struct sock *sk)
 {
-       return READ_ONCE(sk->sk_ack_backlog) >= READ_ONCE(sk->sk_max_ack_backlog);
+       return READ_ONCE(sk->sk_ack_backlog) > READ_ONCE(sk->sk_max_ack_backlog);
 }
 
 /*
@@ -2221,6 +2225,15 @@ static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
        sk_mem_charge(sk, skb->truesize);
 }
 
+static inline void skb_set_owner_sk_safe(struct sk_buff *skb, struct sock *sk)
+{
+       if (sk && refcount_inc_not_zero(&sk->sk_refcnt)) {
+               skb_orphan(skb);
+               skb->destructor = sock_efree;
+               skb->sk = sk;
+       }
+}
+
 void sk_reset_timer(struct sock *sk, struct timer_list *timer,
                    unsigned long expires);
 
index b2a06f10b62ce11fb2618b3b4c6001d8a2e757b6..c58a6d4eb61033d222dd22c2242e321f286b3d93 100644 (file)
@@ -1097,7 +1097,7 @@ static inline int __xfrm_policy_check2(struct sock *sk, int dir,
                return __xfrm_policy_check(sk, ndir, skb, family);
 
        return  (!net->xfrm.policy_count[dir] && !secpath_exists(skb)) ||
-               (skb_dst(skb)->flags & DST_NOPOLICY) ||
+               (skb_dst(skb) && (skb_dst(skb)->flags & DST_NOPOLICY)) ||
                __xfrm_policy_check(sk, ndir, skb, family);
 }
 
@@ -1557,7 +1557,7 @@ int xfrm_trans_queue_net(struct net *net, struct sk_buff *skb,
 int xfrm_trans_queue(struct sk_buff *skb,
                     int (*finish)(struct net *, struct sock *,
                                   struct sk_buff *));
-int xfrm_output_resume(struct sk_buff *skb, int err);
+int xfrm_output_resume(struct sock *sk, struct sk_buff *skb, int err);
 int xfrm_output(struct sock *sk, struct sk_buff *skb);
 
 #if IS_ENABLED(CONFIG_NET_PKTGEN)
index f75238ac6dced4cbc59fb6a4d1fb1fe47113c14c..c7535352fef646937a923bddf1d9656e9edcb982 100644 (file)
@@ -113,7 +113,7 @@ struct can_frame {
                 */
                __u8 len;
                __u8 can_dlc; /* deprecated */
-       };
+       } __attribute__((packed)); /* disable padding added in some ABIs */
        __u8 __pad; /* padding */
        __u8 __res0; /* reserved / padding */
        __u8 len8_dlc; /* optional DLC for 8 byte payload length (9 .. 15) */
index 0e5c37be4a2bd0a53afef34ab1805c18d768c72b..909b9e684e04305c19593278c30d93ec8660b8ee 100644 (file)
@@ -86,6 +86,8 @@ MODULE_LICENSE("Dual BSD/GPL");
 MODULE_AUTHOR("Oliver Hartkopp <oliver.hartkopp@volkswagen.de>");
 MODULE_ALIAS("can-proto-2");
 
+#define BCM_MIN_NAMELEN CAN_REQUIRED_SIZE(struct sockaddr_can, can_ifindex)
+
 /*
  * easy access to the first 64 bit of can(fd)_frame payload. cp->data is
  * 64 bit aligned so the offset has to be multiples of 8 which is ensured
@@ -1292,7 +1294,7 @@ static int bcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
                /* no bound device as default => check msg_name */
                DECLARE_SOCKADDR(struct sockaddr_can *, addr, msg->msg_name);
 
-               if (msg->msg_namelen < CAN_REQUIRED_SIZE(*addr, can_ifindex))
+               if (msg->msg_namelen < BCM_MIN_NAMELEN)
                        return -EINVAL;
 
                if (addr->can_family != AF_CAN)
@@ -1534,7 +1536,7 @@ static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len,
        struct net *net = sock_net(sk);
        int ret = 0;
 
-       if (len < CAN_REQUIRED_SIZE(*addr, can_ifindex))
+       if (len < BCM_MIN_NAMELEN)
                return -EINVAL;
 
        lock_sock(sk);
@@ -1616,8 +1618,8 @@ static int bcm_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
        sock_recv_ts_and_drops(msg, sk, skb);
 
        if (msg->msg_name) {
-               __sockaddr_check_size(sizeof(struct sockaddr_can));
-               msg->msg_namelen = sizeof(struct sockaddr_can);
+               __sockaddr_check_size(BCM_MIN_NAMELEN);
+               msg->msg_namelen = BCM_MIN_NAMELEN;
                memcpy(msg->msg_name, skb->cb, msg->msg_namelen);
        }
 
index 15ea1234d45730232941a1bbbe441466f865af46..9f94ad3caee92938a81e0fb22e850ec97d73b9c9 100644 (file)
@@ -77,6 +77,8 @@ MODULE_LICENSE("Dual BSD/GPL");
 MODULE_AUTHOR("Oliver Hartkopp <socketcan@hartkopp.net>");
 MODULE_ALIAS("can-proto-6");
 
+#define ISOTP_MIN_NAMELEN CAN_REQUIRED_SIZE(struct sockaddr_can, can_addr.tp)
+
 #define SINGLE_MASK(id) (((id) & CAN_EFF_FLAG) ? \
                         (CAN_EFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG) : \
                         (CAN_SFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG))
@@ -986,7 +988,8 @@ static int isotp_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
        sock_recv_timestamp(msg, sk, skb);
 
        if (msg->msg_name) {
-               msg->msg_namelen = sizeof(struct sockaddr_can);
+               __sockaddr_check_size(ISOTP_MIN_NAMELEN);
+               msg->msg_namelen = ISOTP_MIN_NAMELEN;
                memcpy(msg->msg_name, skb->cb, msg->msg_namelen);
        }
 
@@ -1056,7 +1059,7 @@ static int isotp_bind(struct socket *sock, struct sockaddr *uaddr, int len)
        int notify_enetdown = 0;
        int do_rx_reg = 1;
 
-       if (len < CAN_REQUIRED_SIZE(struct sockaddr_can, can_addr.tp))
+       if (len < ISOTP_MIN_NAMELEN)
                return -EINVAL;
 
        /* do not register frame reception for functional addressing */
@@ -1152,13 +1155,13 @@ static int isotp_getname(struct socket *sock, struct sockaddr *uaddr, int peer)
        if (peer)
                return -EOPNOTSUPP;
 
-       memset(addr, 0, sizeof(*addr));
+       memset(addr, 0, ISOTP_MIN_NAMELEN);
        addr->can_family = AF_CAN;
        addr->can_ifindex = so->ifindex;
        addr->can_addr.tp.rx_id = so->rxid;
        addr->can_addr.tp.tx_id = so->txid;
 
-       return sizeof(*addr);
+       return ISOTP_MIN_NAMELEN;
 }
 
 static int isotp_setsockopt(struct socket *sock, int level, int optname,
index 37b47a39a3edcc22cb3923fed23a2d6cb6ef0b96..139d9471ddcf44754a2806e4b47d4c275690089f 100644 (file)
@@ -60,6 +60,8 @@ MODULE_LICENSE("Dual BSD/GPL");
 MODULE_AUTHOR("Urs Thuermann <urs.thuermann@volkswagen.de>");
 MODULE_ALIAS("can-proto-1");
 
+#define RAW_MIN_NAMELEN CAN_REQUIRED_SIZE(struct sockaddr_can, can_ifindex)
+
 #define MASK_ALL 0
 
 /* A raw socket has a list of can_filters attached to it, each receiving
@@ -394,7 +396,7 @@ static int raw_bind(struct socket *sock, struct sockaddr *uaddr, int len)
        int err = 0;
        int notify_enetdown = 0;
 
-       if (len < CAN_REQUIRED_SIZE(*addr, can_ifindex))
+       if (len < RAW_MIN_NAMELEN)
                return -EINVAL;
        if (addr->can_family != AF_CAN)
                return -EINVAL;
@@ -475,11 +477,11 @@ static int raw_getname(struct socket *sock, struct sockaddr *uaddr,
        if (peer)
                return -EOPNOTSUPP;
 
-       memset(addr, 0, sizeof(*addr));
+       memset(addr, 0, RAW_MIN_NAMELEN);
        addr->can_family  = AF_CAN;
        addr->can_ifindex = ro->ifindex;
 
-       return sizeof(*addr);
+       return RAW_MIN_NAMELEN;
 }
 
 static int raw_setsockopt(struct socket *sock, int level, int optname,
@@ -739,7 +741,7 @@ static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
        if (msg->msg_name) {
                DECLARE_SOCKADDR(struct sockaddr_can *, addr, msg->msg_name);
 
-               if (msg->msg_namelen < CAN_REQUIRED_SIZE(*addr, can_ifindex))
+               if (msg->msg_namelen < RAW_MIN_NAMELEN)
                        return -EINVAL;
 
                if (addr->can_family != AF_CAN)
@@ -832,8 +834,8 @@ static int raw_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
        sock_recv_ts_and_drops(msg, sk, skb);
 
        if (msg->msg_name) {
-               __sockaddr_check_size(sizeof(struct sockaddr_can));
-               msg->msg_namelen = sizeof(struct sockaddr_can);
+               __sockaddr_check_size(RAW_MIN_NAMELEN);
+               msg->msg_namelen = RAW_MIN_NAMELEN;
                memcpy(msg->msg_name, skb->cb, msg->msg_namelen);
        }
 
index e2982b3970b88dfa671c7d7da23bd55154f8988c..8379719d1dcef1bb1ea0b673d3dcf1030380439c 100644 (file)
@@ -1379,7 +1379,7 @@ static int __neigh_update(struct neighbour *neigh, const u8 *lladdr,
                         * we can reinject the packet there.
                         */
                        n2 = NULL;
-                       if (dst) {
+                       if (dst && dst->obsolete != DST_OBSOLETE_DEAD) {
                                n2 = dst_neigh_lookup_skb(dst, skb);
                                if (n2)
                                        n1 = n2;
index cc31b601ae103387fdcdc83713f31b726ec25fc7..5ec90f99e102895417adb6422c00a6943fd09182 100644 (file)
@@ -2132,16 +2132,10 @@ void skb_orphan_partial(struct sk_buff *skb)
        if (skb_is_tcp_pure_ack(skb))
                return;
 
-       if (can_skb_orphan_partial(skb)) {
-               struct sock *sk = skb->sk;
-
-               if (refcount_inc_not_zero(&sk->sk_refcnt)) {
-                       WARN_ON(refcount_sub_and_test(skb->truesize, &sk->sk_wmem_alloc));
-                       skb->destructor = sock_efree;
-               }
-       } else {
+       if (can_skb_orphan_partial(skb))
+               skb_set_owner_sk_safe(skb, skb->sk);
+       else
                skb_orphan(skb);
-       }
 }
 EXPORT_SYMBOL(skb_orphan_partial);
 
index 05354976c1fcfd34dd8c2142a54befa8770017bf..858276e72c6893111ee4ab5d161cb235b286bfc1 100644 (file)
@@ -350,7 +350,8 @@ static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct,
                /* mem->id is valid, checked in xdp_rxq_info_reg_mem_model() */
                xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
                page = virt_to_head_page(data);
-               napi_direct &= !xdp_return_frame_no_direct();
+               if (napi_direct && xdp_return_frame_no_direct())
+                       napi_direct = false;
                page_pool_put_full_page(xa->page_pool, page, napi_direct);
                rcu_read_unlock();
                break;
index d142eb2b288b3d9a28934b5a10f43ae5c363d961..3c3e56a1f34d1fbf45b8fdb8b133b3a7c72cef9a 100644 (file)
@@ -795,8 +795,14 @@ static int dsa_tree_setup_switches(struct dsa_switch_tree *dst)
 
        list_for_each_entry(dp, &dst->ports, list) {
                err = dsa_port_setup(dp);
-               if (err)
+               if (err) {
+                       dsa_port_devlink_teardown(dp);
+                       dp->type = DSA_PORT_TYPE_UNUSED;
+                       err = dsa_port_devlink_setup(dp);
+                       if (err)
+                               goto teardown;
                        continue;
+               }
        }
 
        return 0;
index 4b5da89dc27a25cc37787646690c0b9dcee8fb80..32963276452f84499b8eb481bc1b3b1560c57a5a 100644 (file)
@@ -107,7 +107,7 @@ static int dsa_switch_bridge_leave(struct dsa_switch *ds,
        bool unset_vlan_filtering = br_vlan_enabled(info->br);
        struct dsa_switch_tree *dst = ds->dst;
        struct netlink_ext_ack extack = {0};
-       int err, i;
+       int err, port;
 
        if (dst->index == info->tree_index && ds->index == info->sw_index &&
            ds->ops->port_bridge_join)
@@ -124,13 +124,16 @@ static int dsa_switch_bridge_leave(struct dsa_switch *ds,
         * it. That is a good thing, because that lets us handle it and also
         * handle the case where the switch's vlan_filtering setting is global
         * (not per port). When that happens, the correct moment to trigger the
-        * vlan_filtering callback is only when the last port left this bridge.
+        * vlan_filtering callback is only when the last port leaves the last
+        * VLAN-aware bridge.
         */
        if (unset_vlan_filtering && ds->vlan_filtering_is_global) {
-               for (i = 0; i < ds->num_ports; i++) {
-                       if (i == info->port)
-                               continue;
-                       if (dsa_to_port(ds, i)->bridge_dev == info->br) {
+               for (port = 0; port < ds->num_ports; port++) {
+                       struct net_device *bridge_dev;
+
+                       bridge_dev = dsa_to_port(ds, port)->bridge_dev;
+
+                       if (bridge_dev && br_vlan_enabled(bridge_dev)) {
                                unset_vlan_filtering = false;
                                break;
                        }
index d99e1be94019d06553243ff654c6d99c00de9397..36ed85bf2ad51cfd80bf374a54aa7e0815872997 100644 (file)
@@ -141,7 +141,7 @@ static void ah_output_done(struct crypto_async_request *base, int err)
        }
 
        kfree(AH_SKB_CB(skb)->tmp);
-       xfrm_output_resume(skb, err);
+       xfrm_output_resume(skb->sk, skb, err);
 }
 
 static int ah_output(struct xfrm_state *x, struct sk_buff *skb)
index a3271ec3e1627fb4f6e29da0e0fb1a638fe7e789..4b834bbf95e074d215d32bc6354fe83e2db2dc34 100644 (file)
@@ -279,7 +279,7 @@ static void esp_output_done(struct crypto_async_request *base, int err)
                    x->encap && x->encap->encap_type == TCP_ENCAP_ESPINTCP)
                        esp_output_tail_tcp(x, skb);
                else
-                       xfrm_output_resume(skb, err);
+                       xfrm_output_resume(skb->sk, skb, err);
        }
 }
 
index 601f5fbfc63fbecf115ee31948ea3110af37bc40..33687cf58286b7cf63958b6e0eb004524084e0f2 100644 (file)
@@ -217,10 +217,12 @@ static struct sk_buff *esp4_gso_segment(struct sk_buff *skb,
 
        if ((!(skb->dev->gso_partial_features & NETIF_F_HW_ESP) &&
             !(features & NETIF_F_HW_ESP)) || x->xso.dev != skb->dev)
-               esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK);
+               esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK |
+                                           NETIF_F_SCTP_CRC);
        else if (!(features & NETIF_F_HW_ESP_TX_CSUM) &&
                 !(skb->dev->gso_partial_features & NETIF_F_HW_ESP_TX_CSUM))
-               esp_features = features & ~NETIF_F_CSUM_MASK;
+               esp_features = features & ~(NETIF_F_CSUM_MASK |
+                                           NETIF_F_SCTP_CRC);
 
        xo->flags |= XFRM_GSO_SEGMENT;
 
@@ -312,8 +314,17 @@ static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb,  netdev_features_
        ip_hdr(skb)->tot_len = htons(skb->len);
        ip_send_check(ip_hdr(skb));
 
-       if (hw_offload)
+       if (hw_offload) {
+               if (!skb_ext_add(skb, SKB_EXT_SEC_PATH))
+                       return -ENOMEM;
+
+               xo = xfrm_offload(skb);
+               if (!xo)
+                       return -EINVAL;
+
+               xo->flags |= XFRM_XMIT;
                return 0;
+       }
 
        err = esp_output_tail(x, skb, &esp);
        if (err)
index eb207089ece0b29bbb96dd66544be79133be2ecc..31c6c6d99d5ecf6ff1752c450c46981f8eac7da0 100644 (file)
@@ -218,7 +218,7 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
        }
 
        if (dst->flags & DST_XFRM_QUEUE)
-               goto queued;
+               goto xmit;
 
        if (!vti_state_check(dst->xfrm, parms->iph.daddr, parms->iph.saddr)) {
                dev->stats.tx_carrier_errors++;
@@ -238,6 +238,8 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
        if (skb->len > mtu) {
                skb_dst_update_pmtu_no_confirm(skb, mtu);
                if (skb->protocol == htons(ETH_P_IP)) {
+                       if (!(ip_hdr(skb)->frag_off & htons(IP_DF)))
+                               goto xmit;
                        icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
                                      htonl(mtu));
                } else {
@@ -251,7 +253,7 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
                goto tx_error;
        }
 
-queued:
+xmit:
        skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(dev)));
        skb_dst_set(skb, dst);
        skb->dev = skb_dst(skb)->dev;
index 4a0478b17243aca6eff4a783132546f37c08d524..99d743eb9dc4688c23198c0663a25776b086c2d1 100644 (file)
@@ -2754,6 +2754,10 @@ int udp_lib_getsockopt(struct sock *sk, int level, int optname,
                val = up->gso_size;
                break;
 
+       case UDP_GRO:
+               val = up->gro_enabled;
+               break;
+
        /* The following two cannot be changed on UDP sockets, the return is
         * always 0 (which corresponds to the full checksum coverage of UDP). */
        case UDPLITE_SEND_CSCOV:
index 440080da805b5ead265b8ae3e018719b1048a2ae..080ee7f44c649151f7ec7b788a8ced07272823f4 100644 (file)
@@ -316,7 +316,7 @@ static void ah6_output_done(struct crypto_async_request *base, int err)
        }
 
        kfree(AH_SKB_CB(skb)->tmp);
-       xfrm_output_resume(skb, err);
+       xfrm_output_resume(skb->sk, skb, err);
 }
 
 static int ah6_output(struct xfrm_state *x, struct sk_buff *skb)
index 153ad103ba74eb0fb61d09bf08bca07feba33ae2..727d791ed5e67a6f9117a7d31d1bcc84b7db663a 100644 (file)
@@ -314,7 +314,7 @@ static void esp_output_done(struct crypto_async_request *base, int err)
                    x->encap && x->encap->encap_type == TCP_ENCAP_ESPINTCP)
                        esp_output_tail_tcp(x, skb);
                else
-                       xfrm_output_resume(skb, err);
+                       xfrm_output_resume(skb->sk, skb, err);
        }
 }
 
index 1ca516fb30e1c29f29c94e90351587ab33b43323..4af56affaafd436fbd35ade87ffd2b7c8e6d4d91 100644 (file)
@@ -254,9 +254,11 @@ static struct sk_buff *esp6_gso_segment(struct sk_buff *skb,
        skb->encap_hdr_csum = 1;
 
        if (!(features & NETIF_F_HW_ESP) || x->xso.dev != skb->dev)
-               esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK);
+               esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK |
+                                           NETIF_F_SCTP_CRC);
        else if (!(features & NETIF_F_HW_ESP_TX_CSUM))
-               esp_features = features & ~NETIF_F_CSUM_MASK;
+               esp_features = features & ~(NETIF_F_CSUM_MASK |
+                                           NETIF_F_SCTP_CRC);
 
        xo->flags |= XFRM_GSO_SEGMENT;
 
@@ -346,8 +348,17 @@ static int esp6_xmit(struct xfrm_state *x, struct sk_buff *skb,  netdev_features
 
        ipv6_hdr(skb)->payload_len = htons(len);
 
-       if (hw_offload)
+       if (hw_offload) {
+               if (!skb_ext_add(skb, SKB_EXT_SEC_PATH))
+                       return -ENOMEM;
+
+               xo = xfrm_offload(skb);
+               if (!xo)
+                       return -EINVAL;
+
+               xo->flags |= XFRM_XMIT;
                return 0;
+       }
 
        err = esp6_output_tail(x, skb, &esp);
        if (err)
index f10e7a72ea6248e5ec1fbdb8b4e1c4e0e874cb96..e0cc32e45880117307e921afebedd5817e2d52ea 100644 (file)
@@ -494,7 +494,7 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
        }
 
        if (dst->flags & DST_XFRM_QUEUE)
-               goto queued;
+               goto xmit;
 
        x = dst->xfrm;
        if (!vti6_state_check(x, &t->parms.raddr, &t->parms.laddr))
@@ -523,6 +523,8 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
 
                        icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
                } else {
+                       if (!(ip_hdr(skb)->frag_off & htons(IP_DF)))
+                               goto xmit;
                        icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
                                      htonl(mtu));
                }
@@ -531,7 +533,7 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
                goto tx_err_dst_release;
        }
 
-queued:
+xmit:
        skb_scrub_packet(skb, !net_eq(t->net, dev_net(dev)));
        skb_dst_set(skb, dst);
        skb->dev = skb_dst(skb)->dev;
index 1590b9d4cde28f0e6acdd04d7bf2a09ad6a0da8a..4bde960e19dc00fb379ef694484f36cefc94fa6a 100644 (file)
@@ -11,7 +11,6 @@
 #include <linux/netdevice.h>
 #include <linux/sched/signal.h>
 #include <linux/atomic.h>
-#include <linux/igmp.h>
 #include <net/sock.h>
 #include <net/inet_common.h>
 #include <net/inet_hashtables.h>
@@ -20,7 +19,6 @@
 #include <net/tcp_states.h>
 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
 #include <net/transp_v6.h>
-#include <net/addrconf.h>
 #endif
 #include <net/mptcp.h>
 #include <net/xfrm.h>
@@ -2878,6 +2876,48 @@ static int mptcp_setsockopt_v6(struct mptcp_sock *msk, int optname,
        return ret;
 }
 
+static bool mptcp_unsupported(int level, int optname)
+{
+       if (level == SOL_IP) {
+               switch (optname) {
+               case IP_ADD_MEMBERSHIP:
+               case IP_ADD_SOURCE_MEMBERSHIP:
+               case IP_DROP_MEMBERSHIP:
+               case IP_DROP_SOURCE_MEMBERSHIP:
+               case IP_BLOCK_SOURCE:
+               case IP_UNBLOCK_SOURCE:
+               case MCAST_JOIN_GROUP:
+               case MCAST_LEAVE_GROUP:
+               case MCAST_JOIN_SOURCE_GROUP:
+               case MCAST_LEAVE_SOURCE_GROUP:
+               case MCAST_BLOCK_SOURCE:
+               case MCAST_UNBLOCK_SOURCE:
+               case MCAST_MSFILTER:
+                       return true;
+               }
+               return false;
+       }
+       if (level == SOL_IPV6) {
+               switch (optname) {
+               case IPV6_ADDRFORM:
+               case IPV6_ADD_MEMBERSHIP:
+               case IPV6_DROP_MEMBERSHIP:
+               case IPV6_JOIN_ANYCAST:
+               case IPV6_LEAVE_ANYCAST:
+               case MCAST_JOIN_GROUP:
+               case MCAST_LEAVE_GROUP:
+               case MCAST_JOIN_SOURCE_GROUP:
+               case MCAST_LEAVE_SOURCE_GROUP:
+               case MCAST_BLOCK_SOURCE:
+               case MCAST_UNBLOCK_SOURCE:
+               case MCAST_MSFILTER:
+                       return true;
+               }
+               return false;
+       }
+       return false;
+}
+
 static int mptcp_setsockopt(struct sock *sk, int level, int optname,
                            sockptr_t optval, unsigned int optlen)
 {
@@ -2886,6 +2926,9 @@ static int mptcp_setsockopt(struct sock *sk, int level, int optname,
 
        pr_debug("msk=%p", msk);
 
+       if (mptcp_unsupported(level, optname))
+               return -ENOPROTOOPT;
+
        if (level == SOL_SOCKET)
                return mptcp_setsockopt_sol_socket(msk, optname, optval, optlen);
 
@@ -3419,34 +3462,10 @@ static __poll_t mptcp_poll(struct file *file, struct socket *sock,
        return mask;
 }
 
-static int mptcp_release(struct socket *sock)
-{
-       struct mptcp_subflow_context *subflow;
-       struct sock *sk = sock->sk;
-       struct mptcp_sock *msk;
-
-       if (!sk)
-               return 0;
-
-       lock_sock(sk);
-
-       msk = mptcp_sk(sk);
-
-       mptcp_for_each_subflow(msk, subflow) {
-               struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
-
-               ip_mc_drop_socket(ssk);
-       }
-
-       release_sock(sk);
-
-       return inet_release(sock);
-}
-
 static const struct proto_ops mptcp_stream_ops = {
        .family            = PF_INET,
        .owner             = THIS_MODULE,
-       .release           = mptcp_release,
+       .release           = inet_release,
        .bind              = mptcp_bind,
        .connect           = mptcp_stream_connect,
        .socketpair        = sock_no_socketpair,
@@ -3538,35 +3557,10 @@ void __init mptcp_proto_init(void)
 }
 
 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
-static int mptcp6_release(struct socket *sock)
-{
-       struct mptcp_subflow_context *subflow;
-       struct mptcp_sock *msk;
-       struct sock *sk = sock->sk;
-
-       if (!sk)
-               return 0;
-
-       lock_sock(sk);
-
-       msk = mptcp_sk(sk);
-
-       mptcp_for_each_subflow(msk, subflow) {
-               struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
-
-               ip_mc_drop_socket(ssk);
-               ipv6_sock_mc_close(ssk);
-               ipv6_sock_ac_close(ssk);
-       }
-
-       release_sock(sk);
-       return inet6_release(sock);
-}
-
 static const struct proto_ops mptcp_v6_stream_ops = {
        .family            = PF_INET6,
        .owner             = THIS_MODULE,
-       .release           = mptcp6_release,
+       .release           = inet6_release,
        .bind              = mptcp_bind,
        .connect           = mptcp_stream_connect,
        .socketpair        = sock_no_socketpair,
index a9cb355324d1ab5a4d33870c13f2a178ec793a81..ffff8da707b8c1c9d8f771072ce39f906cb67a69 100644 (file)
@@ -105,13 +105,20 @@ static void ncsi_channel_monitor(struct timer_list *t)
        monitor_state = nc->monitor.state;
        spin_unlock_irqrestore(&nc->lock, flags);
 
-       if (!enabled || chained) {
-               ncsi_stop_channel_monitor(nc);
-               return;
-       }
+       if (!enabled)
+               return;         /* expected race disabling timer */
+       if (WARN_ON_ONCE(chained))
+               goto bad_state;
+
        if (state != NCSI_CHANNEL_INACTIVE &&
            state != NCSI_CHANNEL_ACTIVE) {
-               ncsi_stop_channel_monitor(nc);
+bad_state:
+               netdev_warn(ndp->ndev.dev,
+                           "Bad NCSI monitor state channel %d 0x%x %s queue\n",
+                           nc->id, state, chained ? "on" : "off");
+               spin_lock_irqsave(&nc->lock, flags);
+               nc->monitor.enabled = false;
+               spin_unlock_irqrestore(&nc->lock, flags);
                return;
        }
 
@@ -136,10 +143,9 @@ static void ncsi_channel_monitor(struct timer_list *t)
                ncsi_report_link(ndp, true);
                ndp->flags |= NCSI_DEV_RESHUFFLE;
 
-               ncsi_stop_channel_monitor(nc);
-
                ncm = &nc->modes[NCSI_MODE_LINK];
                spin_lock_irqsave(&nc->lock, flags);
+               nc->monitor.enabled = false;
                nc->state = NCSI_CHANNEL_INVISIBLE;
                ncm->data[2] &= ~0x1;
                spin_unlock_irqrestore(&nc->lock, flags);
index d257ed3b732ae356441b7ea205c79817fd2daad6..a3b46f8888033e191e133eedd0f3ea65df82bd33 100644 (file)
@@ -108,11 +108,13 @@ static int llcp_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
                                          llcp_sock->service_name_len,
                                          GFP_KERNEL);
        if (!llcp_sock->service_name) {
+               nfc_llcp_local_put(llcp_sock->local);
                ret = -ENOMEM;
                goto put_dev;
        }
        llcp_sock->ssap = nfc_llcp_get_sdp_ssap(local, llcp_sock);
        if (llcp_sock->ssap == LLCP_SAP_MAX) {
+               nfc_llcp_local_put(llcp_sock->local);
                kfree(llcp_sock->service_name);
                llcp_sock->service_name = NULL;
                ret = -EADDRINUSE;
@@ -671,6 +673,10 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr,
                ret = -EISCONN;
                goto error;
        }
+       if (sk->sk_state == LLCP_CONNECTING) {
+               ret = -EINPROGRESS;
+               goto error;
+       }
 
        dev = nfc_get_device(addr->dev_idx);
        if (dev == NULL) {
@@ -702,6 +708,7 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr,
        llcp_sock->local = nfc_llcp_local_get(local);
        llcp_sock->ssap = nfc_llcp_get_local_ssap(local);
        if (llcp_sock->ssap == LLCP_SAP_MAX) {
+               nfc_llcp_local_put(llcp_sock->local);
                ret = -ENOMEM;
                goto put_dev;
        }
@@ -743,9 +750,12 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr,
 
 sock_unlink:
        nfc_llcp_sock_unlink(&local->connecting_sockets, sk);
+       kfree(llcp_sock->service_name);
+       llcp_sock->service_name = NULL;
 
 sock_llcp_release:
        nfc_llcp_put_ssap(local, llcp_sock->ssap);
+       nfc_llcp_local_put(llcp_sock->local);
 
 put_dev:
        nfc_put_device(dev);
index dfc820ee553a0948cc64f25f5b8f9c5d0061cfd4..1e4fb568fa841d10e82564291c02e0e2d84c9ac4 100644 (file)
@@ -271,7 +271,10 @@ static int qrtr_tx_wait(struct qrtr_node *node, int dest_node, int dest_port,
                flow = kzalloc(sizeof(*flow), GFP_KERNEL);
                if (flow) {
                        init_waitqueue_head(&flow->resume_tx);
-                       radix_tree_insert(&node->qrtr_tx_flow, key, flow);
+                       if (radix_tree_insert(&node->qrtr_tx_flow, key, flow)) {
+                               kfree(flow);
+                               flow = NULL;
+                       }
                }
        }
        mutex_unlock(&node->qrtr_tx_lock);
index 071a261fdaabbfbefe2ddc007c162f61e70a1d96..799034e0f513d988334280186cbdf255fbf50eb7 100644 (file)
@@ -347,8 +347,9 @@ struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned in
        rm->data.op_nents = DIV_ROUND_UP(total_len, PAGE_SIZE);
        rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs);
        if (IS_ERR(rm->data.op_sg)) {
+               void *err = ERR_CAST(rm->data.op_sg);
                rds_message_put(rm);
-               return ERR_CAST(rm->data.op_sg);
+               return err;
        }
 
        for (i = 0; i < rm->data.op_nents; ++i) {
index b919826939e0bd4e3b5fc986f024b3e1f0f5e4c6..43cceb924976fbf644bdcb962b3175c199110e1b 100644 (file)
@@ -1042,6 +1042,9 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
        if (err != ACT_P_CREATED)
                module_put(a_o->owner);
 
+       if (!bind && ovr && err == ACT_P_CREATED)
+               refcount_set(&a->tcfa_refcnt, 2);
+
        return a;
 
 err_out:
index 62e12cb41a3e1c0b2403552eb05128e4d3ac50b6..081c11d5717c4a7ad2d76a45f1fd45c47100a5bf 100644 (file)
@@ -1675,9 +1675,10 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg,
                                          cl->parent->common.classid,
                                          NULL);
                if (q->offload) {
-                       if (new_q)
+                       if (new_q) {
                                htb_set_lockdep_class_child(new_q);
-                       htb_parent_to_leaf_offload(sch, dev_queue, new_q);
+                               htb_parent_to_leaf_offload(sch, dev_queue, new_q);
+                       }
                }
        }
 
index cebcc104dc70ae6313278e2958e51a04909b956f..022999e0202d71d46b39b6d4785a4aca6a48731a 100644 (file)
@@ -1265,7 +1265,7 @@ void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
                spin_lock_bh(&inputq->lock);
                if (skb_peek(arrvq) == skb) {
                        skb_queue_splice_tail_init(&tmpq, inputq);
-                       kfree_skb(__skb_dequeue(arrvq));
+                       __skb_dequeue(arrvq);
                }
                spin_unlock_bh(&inputq->lock);
                __skb_queue_purge(&tmpq);
index d8e8a11ca845e3603680c61aa0dc3a9b2ecfefe2..a20aec9d73933a5df26c36581c0793f2fd328624 100644 (file)
@@ -216,7 +216,7 @@ static struct nlmsghdr *xfrm_nlmsg_put_compat(struct sk_buff *skb,
        case XFRM_MSG_GETSADINFO:
        case XFRM_MSG_GETSPDINFO:
        default:
-               WARN_ONCE(1, "unsupported nlmsg_type %d", nlh_src->nlmsg_type);
+               pr_warn_once("unsupported nlmsg_type %d\n", nlh_src->nlmsg_type);
                return ERR_PTR(-EOPNOTSUPP);
        }
 
@@ -277,7 +277,7 @@ static int xfrm_xlate64_attr(struct sk_buff *dst, const struct nlattr *src)
                return xfrm_nla_cpy(dst, src, nla_len(src));
        default:
                BUILD_BUG_ON(XFRMA_MAX != XFRMA_IF_ID);
-               WARN_ONCE(1, "unsupported nla_type %d", src->nla_type);
+               pr_warn_once("unsupported nla_type %d\n", src->nla_type);
                return -EOPNOTSUPP;
        }
 }
@@ -315,8 +315,10 @@ static int xfrm_alloc_compat(struct sk_buff *skb, const struct nlmsghdr *nlh_src
        struct sk_buff *new = NULL;
        int err;
 
-       if (WARN_ON_ONCE(type >= ARRAY_SIZE(xfrm_msg_min)))
+       if (type >= ARRAY_SIZE(xfrm_msg_min)) {
+               pr_warn_once("unsupported nlmsg_type %d\n", nlh_src->nlmsg_type);
                return -EOPNOTSUPP;
+       }
 
        if (skb_shinfo(skb)->frag_list == NULL) {
                new = alloc_skb(skb->len + skb_tailroom(skb), GFP_ATOMIC);
@@ -378,6 +380,10 @@ static int xfrm_attr_cpy32(void *dst, size_t *pos, const struct nlattr *src,
        struct nlmsghdr *nlmsg = dst;
        struct nlattr *nla;
 
+       /* xfrm_user_rcv_msg_compat() relies on fact that 32-bit messages
+        * have the same len or shorted than 64-bit ones.
+        * 32-bit translation that is bigger than 64-bit original is unexpected.
+        */
        if (WARN_ON_ONCE(copy_len > payload))
                copy_len = payload;
 
index edf11893dbe81ffcc026a18b8b66f19355c6ff39..6d6917b68856fc7f544961d018a23a2c13bc63d9 100644 (file)
@@ -134,8 +134,6 @@ struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t featur
                return skb;
        }
 
-       xo->flags |= XFRM_XMIT;
-
        if (skb_is_gso(skb) && unlikely(x->xso.dev != dev)) {
                struct sk_buff *segs;
 
index 495b1f5c979bc31bacdbca3aae4c50f4f7f21541..8831f5a9e99233c8b1b19bd20705c5174f88530b 100644 (file)
@@ -306,6 +306,8 @@ xfrmi_xmit2(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
 
                        icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
                } else {
+                       if (!(ip_hdr(skb)->frag_off & htons(IP_DF)))
+                               goto xmit;
                        icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
                                      htonl(mtu));
                }
@@ -314,6 +316,7 @@ xfrmi_xmit2(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
                return -EMSGSIZE;
        }
 
+xmit:
        xfrmi_scrub_packet(skb, !net_eq(xi->net, dev_net(dev)));
        skb_dst_set(skb, dst);
        skb->dev = tdev;
index a7ab19353313ce1d8b6ce6a4e6f6c1821c8982d0..e4cb0ff4dcf413227676d9adf62c9ac2898d5e9a 100644 (file)
@@ -503,22 +503,22 @@ out:
        return err;
 }
 
-int xfrm_output_resume(struct sk_buff *skb, int err)
+int xfrm_output_resume(struct sock *sk, struct sk_buff *skb, int err)
 {
        struct net *net = xs_net(skb_dst(skb)->xfrm);
 
        while (likely((err = xfrm_output_one(skb, err)) == 0)) {
                nf_reset_ct(skb);
 
-               err = skb_dst(skb)->ops->local_out(net, skb->sk, skb);
+               err = skb_dst(skb)->ops->local_out(net, sk, skb);
                if (unlikely(err != 1))
                        goto out;
 
                if (!skb_dst(skb)->xfrm)
-                       return dst_output(net, skb->sk, skb);
+                       return dst_output(net, sk, skb);
 
                err = nf_hook(skb_dst(skb)->ops->family,
-                             NF_INET_POST_ROUTING, net, skb->sk, skb,
+                             NF_INET_POST_ROUTING, net, sk, skb,
                              NULL, skb_dst(skb)->dev, xfrm_output2);
                if (unlikely(err != 1))
                        goto out;
@@ -534,7 +534,7 @@ EXPORT_SYMBOL_GPL(xfrm_output_resume);
 
 static int xfrm_output2(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
-       return xfrm_output_resume(skb, 1);
+       return xfrm_output_resume(sk, skb, 1);
 }
 
 static int xfrm_output_gso(struct net *net, struct sock *sk, struct sk_buff *skb)
@@ -660,6 +660,12 @@ static int xfrm4_extract_output(struct xfrm_state *x, struct sk_buff *skb)
 {
        int err;
 
+       if (x->outer_mode.encap == XFRM_MODE_BEET &&
+           ip_is_fragment(ip_hdr(skb))) {
+               net_warn_ratelimited("BEET mode doesn't support inner IPv4 fragments\n");
+               return -EAFNOSUPPORT;
+       }
+
        err = xfrm4_tunnel_check_size(skb);
        if (err)
                return err;
@@ -705,8 +711,15 @@ out:
 static int xfrm6_extract_output(struct xfrm_state *x, struct sk_buff *skb)
 {
 #if IS_ENABLED(CONFIG_IPV6)
+       unsigned int ptr = 0;
        int err;
 
+       if (x->outer_mode.encap == XFRM_MODE_BEET &&
+           ipv6_find_hdr(skb, &ptr, NEXTHDR_FRAGMENT, NULL, NULL) >= 0) {
+               net_warn_ratelimited("BEET mode doesn't support inner IPv6 fragments\n");
+               return -EAFNOSUPPORT;
+       }
+
        err = xfrm6_tunnel_check_size(skb);
        if (err)
                return err;
index d01ca1a184189bfc00d111cfda1443baee571bda..4496f7efa220017b1c8d8a747e6be3aef65f07b3 100644 (file)
@@ -44,7 +44,6 @@ static void xfrm_state_gc_task(struct work_struct *work);
  */
 
 static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024;
-static __read_mostly seqcount_t xfrm_state_hash_generation = SEQCNT_ZERO(xfrm_state_hash_generation);
 static struct kmem_cache *xfrm_state_cache __ro_after_init;
 
 static DECLARE_WORK(xfrm_state_gc_work, xfrm_state_gc_task);
@@ -140,7 +139,7 @@ static void xfrm_hash_resize(struct work_struct *work)
        }
 
        spin_lock_bh(&net->xfrm.xfrm_state_lock);
-       write_seqcount_begin(&xfrm_state_hash_generation);
+       write_seqcount_begin(&net->xfrm.xfrm_state_hash_generation);
 
        nhashmask = (nsize / sizeof(struct hlist_head)) - 1U;
        odst = xfrm_state_deref_prot(net->xfrm.state_bydst, net);
@@ -156,7 +155,7 @@ static void xfrm_hash_resize(struct work_struct *work)
        rcu_assign_pointer(net->xfrm.state_byspi, nspi);
        net->xfrm.state_hmask = nhashmask;
 
-       write_seqcount_end(&xfrm_state_hash_generation);
+       write_seqcount_end(&net->xfrm.xfrm_state_hash_generation);
        spin_unlock_bh(&net->xfrm.xfrm_state_lock);
 
        osize = (ohashmask + 1) * sizeof(struct hlist_head);
@@ -1063,7 +1062,7 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
 
        to_put = NULL;
 
-       sequence = read_seqcount_begin(&xfrm_state_hash_generation);
+       sequence = read_seqcount_begin(&net->xfrm.xfrm_state_hash_generation);
 
        rcu_read_lock();
        h = xfrm_dst_hash(net, daddr, saddr, tmpl->reqid, encap_family);
@@ -1176,7 +1175,7 @@ out:
        if (to_put)
                xfrm_state_put(to_put);
 
-       if (read_seqcount_retry(&xfrm_state_hash_generation, sequence)) {
+       if (read_seqcount_retry(&net->xfrm.xfrm_state_hash_generation, sequence)) {
                *err = -EAGAIN;
                if (x) {
                        xfrm_state_put(x);
@@ -2666,6 +2665,8 @@ int __net_init xfrm_state_init(struct net *net)
        net->xfrm.state_num = 0;
        INIT_WORK(&net->xfrm.state_hash_work, xfrm_hash_resize);
        spin_lock_init(&net->xfrm.xfrm_state_lock);
+       seqcount_spinlock_init(&net->xfrm.xfrm_state_hash_generation,
+                              &net->xfrm.xfrm_state_lock);
        return 0;
 
 out_byspi:
index 0ccb1dda099ae9558d7cb8b2579092f1128f0a5d..eb307ca37bfa69df1acd7cccad1435fed0cf166e 100755 (executable)
@@ -657,10 +657,21 @@ test_ecn_decap()
 {
        # In accordance with INET_ECN_decapsulate()
        __test_ecn_decap 00 00 0x00
+       __test_ecn_decap 00 01 0x00
+       __test_ecn_decap 00 02 0x00
+       # 00 03 is tested in test_ecn_decap_error()
+       __test_ecn_decap 01 00 0x01
        __test_ecn_decap 01 01 0x01
-       __test_ecn_decap 02 01 0x01
+       __test_ecn_decap 01 02 0x01
        __test_ecn_decap 01 03 0x03
+       __test_ecn_decap 02 00 0x02
+       __test_ecn_decap 02 01 0x01
+       __test_ecn_decap 02 02 0x02
        __test_ecn_decap 02 03 0x03
+       __test_ecn_decap 03 00 0x03
+       __test_ecn_decap 03 01 0x03
+       __test_ecn_decap 03 02 0x03
+       __test_ecn_decap 03 03 0x03
        test_ecn_decap_error
 }