Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <torvalds@linux-foundation.org>
Mon, 22 Jul 2019 16:30:34 +0000 (09:30 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Mon, 22 Jul 2019 16:30:34 +0000 (09:30 -0700)
Pull preemption Kconfig fix from Thomas Gleixner:
 "The PREEMPT_RT stub config renamed PREEMPT to PREEMPT_LL and defined
  PREEMPT outside of the menu and made it selectable by both PREEMPT_LL
  and PREEMPT_RT.

  Stupid me missed that 114 defconfigs select CONFIG_PREEMPT which
  obviously can't work anymore. oldconfig builds are affected as well,
  but it's more obvious as the user gets asked. [old]defconfig silently
  fixes it up and selects PREEMPT_NONE.

  Unbreak it by undoing the rename and adding a intermediate config
  symbol which is selected by both PREEMPT and PREEMPT_RT. That requires
  to chase down a few #ifdefs, but it's better than tweaking 114
  defconfigs and annoying users"

* 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  sched/rt, Kconfig: Unbreak def/oldconfig with CONFIG_PREEMPT=y

98 files changed:
drivers/connector/connector.c
drivers/media/v4l2-core/v4l2-subdev.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
drivers/net/ethernet/chelsio/cxgb/my3126.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
drivers/net/ethernet/intel/igc/igc_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
drivers/net/ethernet/mscc/ocelot_flower.c
drivers/net/ethernet/mscc/ocelot_tc.c
drivers/net/ethernet/netronome/nfp/flower/offload.c
drivers/net/ethernet/qlogic/qed/qed_rdma.c
drivers/net/ethernet/realtek/r8169_main.c
drivers/net/hyperv/netvsc_drv.c
drivers/net/phy/sfp.c
drivers/net/vrf.c
drivers/net/wireless/ath/wil6210/cfg80211.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/vendor.c
drivers/net/wireless/ti/wlcore/vendor_cmd.c
fs/btrfs/Kconfig
fs/btrfs/disk-io.c
fs/btrfs/inode.c
fs/btrfs/volumes.c
include/linux/connector.h
include/linux/netfilter/nf_conntrack_h323_asn1.h
include/net/cfg80211.h
include/net/flow_offload.h
include/net/netfilter/nf_conntrack_expect.h
include/net/netfilter/nf_conntrack_synproxy.h
include/net/netfilter/nf_tables.h
include/net/pkt_cls.h
include/net/sch_generic.h
include/net/tcp.h
include/uapi/linux/nl80211.h
include/uapi/linux/videodev2.h
kernel/exit.c
net/bridge/netfilter/Kconfig
net/core/flow_offload.c
net/dsa/slave.c
net/ipv4/netfilter/ipt_CLUSTERIP.c
net/ipv4/netfilter/ipt_SYNPROXY.c
net/ipv4/netfilter/ipt_rpfilter.c
net/ipv4/netfilter/nf_nat_h323.c
net/ipv4/tcp_output.c
net/ipv6/netfilter/ip6t_SYNPROXY.c
net/ipv6/netfilter/ip6t_rpfilter.c
net/mac80211/cfg.c
net/mac80211/driver-ops.c
net/netfilter/Kconfig
net/netfilter/ipvs/ip_vs_nfct.c
net/netfilter/nf_conntrack_amanda.c
net/netfilter/nf_conntrack_broadcast.c
net/netfilter/nf_conntrack_core.c
net/netfilter/nf_conntrack_expect.c
net/netfilter/nf_conntrack_ftp.c
net/netfilter/nf_conntrack_h323_asn1.c
net/netfilter/nf_conntrack_h323_main.c
net/netfilter/nf_conntrack_irc.c
net/netfilter/nf_conntrack_netlink.c
net/netfilter/nf_conntrack_pptp.c
net/netfilter/nf_conntrack_proto_gre.c
net/netfilter/nf_conntrack_proto_icmp.c
net/netfilter/nf_conntrack_proto_tcp.c
net/netfilter/nf_conntrack_sane.c
net/netfilter/nf_conntrack_sip.c
net/netfilter/nf_conntrack_tftp.c
net/netfilter/nf_nat_amanda.c
net/netfilter/nf_nat_core.c
net/netfilter/nf_nat_ftp.c
net/netfilter/nf_nat_irc.c
net/netfilter/nf_nat_sip.c
net/netfilter/nf_nat_tftp.c
net/netfilter/nf_synproxy_core.c
net/netfilter/nf_tables_api.c
net/netfilter/nf_tables_offload.c
net/netfilter/nfnetlink.c
net/netfilter/nft_chain_filter.c
net/netfilter/nft_chain_nat.c
net/netfilter/nft_ct.c
net/netfilter/nft_hash.c
net/netfilter/nft_meta.c
net/netfilter/nft_redir.c
net/netfilter/nft_synproxy.c
net/openvswitch/flow.c
net/openvswitch/flow.h
net/openvswitch/flow_table.c
net/sched/cls_api.c
net/sched/cls_bpf.c
net/sched/cls_flower.c
net/sched/cls_matchall.c
net/sched/cls_u32.c
net/tipc/topsrv.c

index 23553ed..2d22d6b 100644 (file)
@@ -248,16 +248,12 @@ static int __maybe_unused cn_proc_show(struct seq_file *m, void *v)
        return 0;
 }
 
-static struct cn_dev cdev = {
-       .input   = cn_rx_skb,
-};
-
 static int cn_init(void)
 {
        struct cn_dev *dev = &cdev;
        struct netlink_kernel_cfg cfg = {
                .groups = CN_NETLINK_USERS + 0xf,
-               .input  = dev->input,
+               .input  = cn_rx_skb,
        };
 
        dev->nls = netlink_kernel_create(&init_net, NETLINK_CONNECTOR, &cfg);
index 21fb90d..25c73c1 100644 (file)
@@ -124,7 +124,7 @@ static inline int check_which(__u32 which)
 static inline int check_pad(struct v4l2_subdev *sd, __u32 pad)
 {
 #if defined(CONFIG_MEDIA_CONTROLLER)
-       if (sd->entity.graph_obj.mdev) {
+       if (sd->entity.num_pads) {
                if (pad >= sd->entity.num_pads)
                        return -EINVAL;
                return 0;
index 656ed80..e2be5a6 100644 (file)
@@ -285,6 +285,9 @@ int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
        hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
        sw_cons = txdata->tx_pkt_cons;
 
+       /* Ensure subsequent loads occur after hw_cons */
+       smp_rmb();
+
        while (sw_cons != hw_cons) {
                u16 pkt_cons;
 
index 20c09cc..60aa45b 100644 (file)
@@ -94,7 +94,7 @@ static int my3126_interrupt_handler(struct cphy *cphy)
        return cphy_cause_link_change;
 }
 
-static void my3216_poll(struct work_struct *work)
+static void my3126_poll(struct work_struct *work)
 {
        struct cphy *cphy = container_of(work, struct cphy, phy_update.work);
 
@@ -177,7 +177,7 @@ static struct cphy *my3126_phy_create(struct net_device *dev,
                return NULL;
 
        cphy_init(cphy, dev, phy_addr, &my3126_ops, mdio_ops);
-       INIT_DELAYED_WORK(&cphy->phy_update, my3216_poll);
+       INIT_DELAYED_WORK(&cphy->phy_update, my3126_poll);
        cphy->bmsr = 0;
 
        return cphy;
index 67202b6..4311ad9 100644 (file)
@@ -5561,7 +5561,6 @@ static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs)
                char name[IFNAMSIZ];
                u32 devcap2;
                u16 flags;
-               int pos;
 
                /* If we want to instantiate Virtual Functions, then our
                 * parent bridge's PCI-E needs to support Alternative Routing
@@ -5569,9 +5568,8 @@ static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs)
                 * and above.
                 */
                pbridge = pdev->bus->self;
-               pos = pci_find_capability(pbridge, PCI_CAP_ID_EXP);
-               pci_read_config_word(pbridge, pos + PCI_EXP_FLAGS, &flags);
-               pci_read_config_dword(pbridge, pos + PCI_EXP_DEVCAP2, &devcap2);
+               pcie_capability_read_word(pbridge, PCI_EXP_FLAGS, &flags);
+               pcie_capability_read_dword(pbridge, PCI_EXP_DEVCAP2, &devcap2);
 
                if ((flags & PCI_EXP_FLAGS_VERS) < 2 ||
                    !(devcap2 & PCI_EXP_DEVCAP2_ARI)) {
index 312599c..e447976 100644 (file)
@@ -67,7 +67,8 @@ static struct ch_tc_pedit_fields pedits[] = {
 static struct ch_tc_flower_entry *allocate_flower_entry(void)
 {
        struct ch_tc_flower_entry *new = kzalloc(sizeof(*new), GFP_KERNEL);
-       spin_lock_init(&new->lock);
+       if (new)
+               spin_lock_init(&new->lock);
        return new;
 }
 
index 9dd5ed9..f7fc553 100644 (file)
@@ -7309,7 +7309,6 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
        } else {
                unsigned int pack_align;
                unsigned int ingpad, ingpack;
-               unsigned int pcie_cap;
 
                /* T5 introduced the separation of the Free List Padding and
                 * Packing Boundaries.  Thus, we can select a smaller Padding
@@ -7334,8 +7333,7 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
                 * multiple of the Maximum Payload Size.
                 */
                pack_align = fl_align;
-               pcie_cap = pci_find_capability(adap->pdev, PCI_CAP_ID_EXP);
-               if (pcie_cap) {
+               if (pci_is_pcie(adap->pdev)) {
                        unsigned int mps, mps_log;
                        u16 devctl;
 
@@ -7343,9 +7341,8 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
                         * [bits 7:5] encodes sizes as powers of 2 starting at
                         * 128 bytes.
                         */
-                       pci_read_config_word(adap->pdev,
-                                            pcie_cap + PCI_EXP_DEVCTL,
-                                            &devctl);
+                       pcie_capability_read_word(adap->pdev, PCI_EXP_DEVCTL,
+                                                 &devctl);
                        mps_log = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5) + 7;
                        mps = 1 << mps_log;
                        if (mps > pack_align)
index b7a246b..2edb86e 100644 (file)
@@ -4698,8 +4698,13 @@ int be_update_queues(struct be_adapter *adapter)
        int status;
 
        if (netif_running(netdev)) {
+               /* be_tx_timeout() must not run concurrently with this
+                * function, synchronize with an already-running dev_watchdog
+                */
+               netif_tx_lock_bh(netdev);
                /* device cannot transmit now, avoid dev_watchdog timeouts */
                netif_carrier_off(netdev);
+               netif_tx_unlock_bh(netdev);
 
                be_close(netdev);
        }
index 8ad5292..75329ab 100644 (file)
@@ -43,7 +43,7 @@ enum HCLGE_MBX_OPCODE {
        HCLGE_MBX_GET_QID_IN_PF,        /* (VF -> PF) get queue id in pf */
        HCLGE_MBX_LINK_STAT_MODE,       /* (PF -> VF) link mode has changed */
        HCLGE_MBX_GET_LINK_MODE,        /* (VF -> PF) get the link mode of pf */
-       HLCGE_MBX_PUSH_VLAN_INFO,       /* (PF -> VF) push port base vlan */
+       HCLGE_MBX_PUSH_VLAN_INFO,       /* (PF -> VF) push port base vlan */
        HCLGE_MBX_GET_MEDIA_TYPE,       /* (VF -> PF) get media type */
 
        HCLGE_MBX_GET_VF_FLR_STATUS = 200, /* (M7 -> PF) get vf reset status */
index a38ac7c..690b999 100644 (file)
@@ -304,7 +304,7 @@ int hclge_push_vf_port_base_vlan_info(struct hclge_vport *vport, u8 vfid,
        memcpy(&msg_data[6], &vlan_tag, sizeof(u16));
 
        return hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data),
-                                 HLCGE_MBX_PUSH_VLAN_INFO, vfid);
+                                 HCLGE_MBX_PUSH_VLAN_INFO, vfid);
 }
 
 static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport,
index f60b80b..6a96987 100644 (file)
@@ -204,7 +204,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
                case HCLGE_MBX_LINK_STAT_CHANGE:
                case HCLGE_MBX_ASSERTING_RESET:
                case HCLGE_MBX_LINK_STAT_MODE:
-               case HLCGE_MBX_PUSH_VLAN_INFO:
+               case HCLGE_MBX_PUSH_VLAN_INFO:
                        /* set this mbx event as pending. This is required as we
                         * might loose interrupt event when mbx task is busy
                         * handling. This shall be cleared when mbx task just
@@ -307,7 +307,7 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
                        hclgevf_reset_task_schedule(hdev);
 
                        break;
-               case HLCGE_MBX_PUSH_VLAN_INFO:
+               case HCLGE_MBX_PUSH_VLAN_INFO:
                        state = le16_to_cpu(msg_q[1]);
                        vlan_info = &msg_q[1];
                        hclgevf_update_port_base_vlan_info(hdev, state,
index 93f3b4e..aa9323e 100644 (file)
@@ -3912,13 +3912,11 @@ void igc_write_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value)
 s32 igc_read_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value)
 {
        struct igc_adapter *adapter = hw->back;
-       u16 cap_offset;
 
-       cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
-       if (!cap_offset)
+       if (!pci_is_pcie(adapter->pdev))
                return -IGC_ERR_CONFIG;
 
-       pci_read_config_word(adapter->pdev, cap_offset + reg, value);
+       pcie_capability_read_word(adapter->pdev, reg, value);
 
        return IGC_SUCCESS;
 }
@@ -3926,13 +3924,11 @@ s32 igc_read_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value)
 s32 igc_write_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value)
 {
        struct igc_adapter *adapter = hw->back;
-       u16 cap_offset;
 
-       cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
-       if (!cap_offset)
+       if (!pci_is_pcie(adapter->pdev))
                return -IGC_ERR_CONFIG;
 
-       pci_write_config_word(adapter->pdev, cap_offset + reg, *value);
+       pcie_capability_write_word(adapter->pdev, reg, *value);
 
        return IGC_SUCCESS;
 }
index 7245d28..7f747cb 100644 (file)
@@ -735,8 +735,7 @@ mlx5e_rep_indr_setup_tc_block(struct net_device *netdev,
                list_add(&indr_priv->list,
                         &rpriv->uplink_priv.tc_indr_block_priv_list);
 
-               block_cb = flow_block_cb_alloc(f->net,
-                                              mlx5e_rep_indr_setup_block_cb,
+               block_cb = flow_block_cb_alloc(mlx5e_rep_indr_setup_block_cb,
                                               indr_priv, indr_priv,
                                               mlx5e_rep_indr_tc_block_unbind);
                if (IS_ERR(block_cb)) {
@@ -753,7 +752,7 @@ mlx5e_rep_indr_setup_tc_block(struct net_device *netdev,
                if (!indr_priv)
                        return -ENOENT;
 
-               block_cb = flow_block_cb_lookup(f,
+               block_cb = flow_block_cb_lookup(f->block,
                                                mlx5e_rep_indr_setup_block_cb,
                                                indr_priv);
                if (!block_cb)
index 4d34d42..6506381 100644 (file)
@@ -1604,14 +1604,14 @@ mlxsw_sp_setup_tc_block_flower_bind(struct mlxsw_sp_port *mlxsw_sp_port,
        bool register_block = false;
        int err;
 
-       block_cb = flow_block_cb_lookup(f, mlxsw_sp_setup_tc_block_cb_flower,
+       block_cb = flow_block_cb_lookup(f->block,
+                                       mlxsw_sp_setup_tc_block_cb_flower,
                                        mlxsw_sp);
        if (!block_cb) {
                acl_block = mlxsw_sp_acl_block_create(mlxsw_sp, f->net);
                if (!acl_block)
                        return -ENOMEM;
-               block_cb = flow_block_cb_alloc(f->net,
-                                              mlxsw_sp_setup_tc_block_cb_flower,
+               block_cb = flow_block_cb_alloc(mlxsw_sp_setup_tc_block_cb_flower,
                                               mlxsw_sp, acl_block,
                                               mlxsw_sp_tc_block_flower_release);
                if (IS_ERR(block_cb)) {
@@ -1657,7 +1657,8 @@ mlxsw_sp_setup_tc_block_flower_unbind(struct mlxsw_sp_port *mlxsw_sp_port,
        struct flow_block_cb *block_cb;
        int err;
 
-       block_cb = flow_block_cb_lookup(f, mlxsw_sp_setup_tc_block_cb_flower,
+       block_cb = flow_block_cb_lookup(f->block,
+                                       mlxsw_sp_setup_tc_block_cb_flower,
                                        mlxsw_sp);
        if (!block_cb)
                return;
@@ -1680,7 +1681,7 @@ static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port,
                                   struct flow_block_offload *f)
 {
        struct flow_block_cb *block_cb;
-       tc_setup_cb_t *cb;
+       flow_setup_cb_t *cb;
        bool ingress;
        int err;
 
@@ -1702,7 +1703,7 @@ static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port,
                                          &mlxsw_sp_block_cb_list))
                        return -EBUSY;
 
-               block_cb = flow_block_cb_alloc(f->net, cb, mlxsw_sp_port,
+               block_cb = flow_block_cb_alloc(cb, mlxsw_sp_port,
                                               mlxsw_sp_port, NULL);
                if (IS_ERR(block_cb))
                        return PTR_ERR(block_cb);
@@ -1718,7 +1719,7 @@ static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port,
        case FLOW_BLOCK_UNBIND:
                mlxsw_sp_setup_tc_block_flower_unbind(mlxsw_sp_port,
                                                      f, ingress);
-               block_cb = flow_block_cb_lookup(f, cb, mlxsw_sp_port);
+               block_cb = flow_block_cb_lookup(f->block, cb, mlxsw_sp_port);
                if (!block_cb)
                        return -ENOENT;
 
index 7aaddc0..59487d4 100644 (file)
@@ -316,15 +316,14 @@ int ocelot_setup_tc_block_flower_bind(struct ocelot_port *port,
        if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
                return -EOPNOTSUPP;
 
-       block_cb = flow_block_cb_lookup(f, ocelot_setup_tc_block_cb_flower,
-                                       port);
+       block_cb = flow_block_cb_lookup(f->block,
+                                       ocelot_setup_tc_block_cb_flower, port);
        if (!block_cb) {
                port_block = ocelot_port_block_create(port);
                if (!port_block)
                        return -ENOMEM;
 
-               block_cb = flow_block_cb_alloc(f->net,
-                                              ocelot_setup_tc_block_cb_flower,
+               block_cb = flow_block_cb_alloc(ocelot_setup_tc_block_cb_flower,
                                               port, port_block,
                                               ocelot_tc_block_unbind);
                if (IS_ERR(block_cb)) {
@@ -351,8 +350,8 @@ void ocelot_setup_tc_block_flower_unbind(struct ocelot_port *port,
 {
        struct flow_block_cb *block_cb;
 
-       block_cb = flow_block_cb_lookup(f, ocelot_setup_tc_block_cb_flower,
-                                       port);
+       block_cb = flow_block_cb_lookup(f->block,
+                                       ocelot_setup_tc_block_cb_flower, port);
        if (!block_cb)
                return;
 
index 9e6464f..16a6db7 100644 (file)
@@ -134,7 +134,7 @@ static int ocelot_setup_tc_block(struct ocelot_port *port,
                                 struct flow_block_offload *f)
 {
        struct flow_block_cb *block_cb;
-       tc_setup_cb_t *cb;
+       flow_setup_cb_t *cb;
        int err;
 
        netdev_dbg(port->dev, "tc_block command %d, binder_type %d\n",
@@ -156,7 +156,7 @@ static int ocelot_setup_tc_block(struct ocelot_port *port,
                if (flow_block_cb_is_busy(cb, port, &ocelot_block_cb_list))
                        return -EBUSY;
 
-               block_cb = flow_block_cb_alloc(f->net, cb, port, port, NULL);
+               block_cb = flow_block_cb_alloc(cb, port, port, NULL);
                if (IS_ERR(block_cb))
                        return PTR_ERR(block_cb);
 
@@ -169,7 +169,7 @@ static int ocelot_setup_tc_block(struct ocelot_port *port,
                list_add_tail(&block_cb->driver_list, f->driver_block_list);
                return 0;
        case FLOW_BLOCK_UNBIND:
-               block_cb = flow_block_cb_lookup(f, cb, port);
+               block_cb = flow_block_cb_lookup(f->block, cb, port);
                if (!block_cb)
                        return -ENOENT;
 
index faa8ba0..e209f15 100644 (file)
@@ -1318,8 +1318,7 @@ static int nfp_flower_setup_tc_block(struct net_device *netdev,
                                          &nfp_block_cb_list))
                        return -EBUSY;
 
-               block_cb = flow_block_cb_alloc(f->net,
-                                              nfp_flower_setup_tc_block_cb,
+               block_cb = flow_block_cb_alloc(nfp_flower_setup_tc_block_cb,
                                               repr, repr, NULL);
                if (IS_ERR(block_cb))
                        return PTR_ERR(block_cb);
@@ -1328,7 +1327,8 @@ static int nfp_flower_setup_tc_block(struct net_device *netdev,
                list_add_tail(&block_cb->driver_list, &nfp_block_cb_list);
                return 0;
        case FLOW_BLOCK_UNBIND:
-               block_cb = flow_block_cb_lookup(f, nfp_flower_setup_tc_block_cb,
+               block_cb = flow_block_cb_lookup(f->block,
+                                               nfp_flower_setup_tc_block_cb,
                                                repr);
                if (!block_cb)
                        return -ENOENT;
@@ -1424,8 +1424,7 @@ nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app,
                cb_priv->app = app;
                list_add(&cb_priv->list, &priv->indr_block_cb_priv);
 
-               block_cb = flow_block_cb_alloc(f->net,
-                                              nfp_flower_setup_indr_block_cb,
+               block_cb = flow_block_cb_alloc(nfp_flower_setup_indr_block_cb,
                                               cb_priv, cb_priv,
                                               nfp_flower_setup_indr_tc_release);
                if (IS_ERR(block_cb)) {
@@ -1442,7 +1441,7 @@ nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app,
                if (!cb_priv)
                        return -ENOENT;
 
-               block_cb = flow_block_cb_lookup(f,
+               block_cb = flow_block_cb_lookup(f->block,
                                                nfp_flower_setup_indr_block_cb,
                                                cb_priv);
                if (!block_cb)
index f900fde..17c64e4 100644 (file)
@@ -530,9 +530,8 @@ static void qed_rdma_init_devinfo(struct qed_hwfn *p_hwfn,
        SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_LOCAL_INV_FENCE, 1);
 
        /* Check atomic operations support in PCI configuration space. */
-       pci_read_config_dword(cdev->pdev,
-                             cdev->pdev->pcie_cap + PCI_EXP_DEVCTL2,
-                             &pci_status_control);
+       pcie_capability_read_dword(cdev->pdev, PCI_EXP_DEVCTL2,
+                                  &pci_status_control);
 
        if (pci_status_control & PCI_EXP_DEVCTL2_LTR_EN)
                SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_ATOMIC_OP, 1);
index 0637c67..6272115 100644 (file)
@@ -3251,9 +3251,9 @@ static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp)
 
        ret = phy_read_paged(tp->phydev, 0x0a46, 0x13);
        if (ret & BIT(8))
-               phy_modify_paged(tp->phydev, 0x0c41, 0x12, 0, BIT(1));
+               phy_modify_paged(tp->phydev, 0x0c41, 0x15, 0, BIT(1));
        else
-               phy_modify_paged(tp->phydev, 0x0c41, 0x12, BIT(1), 0);
+               phy_modify_paged(tp->phydev, 0x0c41, 0x15, BIT(1), 0);
 
        /* Enable PHY auto speed down */
        phy_modify_paged(tp->phydev, 0x0a44, 0x11, 0, BIT(3) | BIT(2));
index afdcc56..3544e19 100644 (file)
@@ -836,7 +836,6 @@ int netvsc_recv_callback(struct net_device *net,
 
        if (unlikely(!skb)) {
                ++net_device_ctx->eth_stats.rx_no_memory;
-               rcu_read_unlock();
                return NVSP_STAT_FAIL;
        }
 
index 2d816aa..e36c04c 100644 (file)
@@ -517,7 +517,7 @@ static int sfp_hwmon_read_sensor(struct sfp *sfp, int reg, long *value)
 
 static void sfp_hwmon_to_rx_power(long *value)
 {
-       *value = DIV_ROUND_CLOSEST(*value, 100);
+       *value = DIV_ROUND_CLOSEST(*value, 10);
 }
 
 static void sfp_hwmon_calibrate(struct sfp *sfp, unsigned int slope, int offset,
index 54edf89..6e84328 100644 (file)
@@ -165,23 +165,29 @@ static int vrf_ip6_local_out(struct net *net, struct sock *sk,
 static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb,
                                           struct net_device *dev)
 {
-       const struct ipv6hdr *iph = ipv6_hdr(skb);
+       const struct ipv6hdr *iph;
        struct net *net = dev_net(skb->dev);
-       struct flowi6 fl6 = {
-               /* needed to match OIF rule */
-               .flowi6_oif = dev->ifindex,
-               .flowi6_iif = LOOPBACK_IFINDEX,
-               .daddr = iph->daddr,
-               .saddr = iph->saddr,
-               .flowlabel = ip6_flowinfo(iph),
-               .flowi6_mark = skb->mark,
-               .flowi6_proto = iph->nexthdr,
-               .flowi6_flags = FLOWI_FLAG_SKIP_NH_OIF,
-       };
+       struct flowi6 fl6;
        int ret = NET_XMIT_DROP;
        struct dst_entry *dst;
        struct dst_entry *dst_null = &net->ipv6.ip6_null_entry->dst;
 
+       if (!pskb_may_pull(skb, ETH_HLEN + sizeof(struct ipv6hdr)))
+               goto err;
+
+       iph = ipv6_hdr(skb);
+
+       memset(&fl6, 0, sizeof(fl6));
+       /* needed to match OIF rule */
+       fl6.flowi6_oif = dev->ifindex;
+       fl6.flowi6_iif = LOOPBACK_IFINDEX;
+       fl6.daddr = iph->daddr;
+       fl6.saddr = iph->saddr;
+       fl6.flowlabel = ip6_flowinfo(iph);
+       fl6.flowi6_mark = skb->mark;
+       fl6.flowi6_proto = iph->nexthdr;
+       fl6.flowi6_flags = FLOWI_FLAG_SKIP_NH_OIF;
+
        dst = ip6_route_output(net, NULL, &fl6);
        if (dst == dst_null)
                goto err;
@@ -237,21 +243,27 @@ static int vrf_ip_local_out(struct net *net, struct sock *sk,
 static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb,
                                           struct net_device *vrf_dev)
 {
-       struct iphdr *ip4h = ip_hdr(skb);
+       struct iphdr *ip4h;
        int ret = NET_XMIT_DROP;
-       struct flowi4 fl4 = {
-               /* needed to match OIF rule */
-               .flowi4_oif = vrf_dev->ifindex,
-               .flowi4_iif = LOOPBACK_IFINDEX,
-               .flowi4_tos = RT_TOS(ip4h->tos),
-               .flowi4_flags = FLOWI_FLAG_ANYSRC | FLOWI_FLAG_SKIP_NH_OIF,
-               .flowi4_proto = ip4h->protocol,
-               .daddr = ip4h->daddr,
-               .saddr = ip4h->saddr,
-       };
+       struct flowi4 fl4;
        struct net *net = dev_net(vrf_dev);
        struct rtable *rt;
 
+       if (!pskb_may_pull(skb, ETH_HLEN + sizeof(struct iphdr)))
+               goto err;
+
+       ip4h = ip_hdr(skb);
+
+       memset(&fl4, 0, sizeof(fl4));
+       /* needed to match OIF rule */
+       fl4.flowi4_oif = vrf_dev->ifindex;
+       fl4.flowi4_iif = LOOPBACK_IFINDEX;
+       fl4.flowi4_tos = RT_TOS(ip4h->tos);
+       fl4.flowi4_flags = FLOWI_FLAG_ANYSRC | FLOWI_FLAG_SKIP_NH_OIF;
+       fl4.flowi4_proto = ip4h->protocol;
+       fl4.daddr = ip4h->daddr;
+       fl4.saddr = ip4h->saddr;
+
        rt = ip_route_output_flow(net, &fl4, NULL);
        if (IS_ERR(rt))
                goto err;
index d436cc5..2fb4258 100644 (file)
@@ -177,6 +177,7 @@ static const struct wiphy_vendor_command wil_nl80211_vendor_commands[] = {
                .info.subcmd = QCA_NL80211_VENDOR_SUBCMD_DMG_RF_GET_SECTOR_CFG,
                .flags = WIPHY_VENDOR_CMD_NEED_WDEV |
                         WIPHY_VENDOR_CMD_NEED_RUNNING,
+               .policy = wil_rf_sector_policy,
                .doit = wil_rf_sector_get_cfg
        },
        {
@@ -184,6 +185,7 @@ static const struct wiphy_vendor_command wil_nl80211_vendor_commands[] = {
                .info.subcmd = QCA_NL80211_VENDOR_SUBCMD_DMG_RF_SET_SECTOR_CFG,
                .flags = WIPHY_VENDOR_CMD_NEED_WDEV |
                         WIPHY_VENDOR_CMD_NEED_RUNNING,
+               .policy = wil_rf_sector_policy,
                .doit = wil_rf_sector_set_cfg
        },
        {
@@ -192,6 +194,7 @@ static const struct wiphy_vendor_command wil_nl80211_vendor_commands[] = {
                        QCA_NL80211_VENDOR_SUBCMD_DMG_RF_GET_SELECTED_SECTOR,
                .flags = WIPHY_VENDOR_CMD_NEED_WDEV |
                         WIPHY_VENDOR_CMD_NEED_RUNNING,
+               .policy = wil_rf_sector_policy,
                .doit = wil_rf_sector_get_selected
        },
        {
@@ -200,6 +203,7 @@ static const struct wiphy_vendor_command wil_nl80211_vendor_commands[] = {
                        QCA_NL80211_VENDOR_SUBCMD_DMG_RF_SET_SELECTED_SECTOR,
                .flags = WIPHY_VENDOR_CMD_NEED_WDEV |
                         WIPHY_VENDOR_CMD_NEED_RUNNING,
+               .policy = wil_rf_sector_policy,
                .doit = wil_rf_sector_set_selected
        },
 };
index f650089..d07e7c7 100644 (file)
@@ -112,6 +112,7 @@ const struct wiphy_vendor_command brcmf_vendor_cmds[] = {
                },
                .flags = WIPHY_VENDOR_CMD_NEED_WDEV |
                         WIPHY_VENDOR_CMD_NEED_NETDEV,
+               .policy = VENDOR_CMD_RAW_DATA,
                .doit = brcmf_cfg80211_vndr_cmds_dcmd_handler
        },
 };
index 5cf0b32..e1bd344 100644 (file)
@@ -163,6 +163,7 @@ static const struct wiphy_vendor_command wlcore_vendor_commands[] = {
                .flags = WIPHY_VENDOR_CMD_NEED_NETDEV |
                         WIPHY_VENDOR_CMD_NEED_RUNNING,
                .doit = wlcore_vendor_cmd_smart_config_start,
+               .policy = wlcore_vendor_attr_policy,
        },
        {
                .info = {
@@ -172,6 +173,7 @@ static const struct wiphy_vendor_command wlcore_vendor_commands[] = {
                .flags = WIPHY_VENDOR_CMD_NEED_NETDEV |
                         WIPHY_VENDOR_CMD_NEED_RUNNING,
                .doit = wlcore_vendor_cmd_smart_config_stop,
+               .policy = wlcore_vendor_attr_policy,
        },
        {
                .info = {
@@ -181,6 +183,7 @@ static const struct wiphy_vendor_command wlcore_vendor_commands[] = {
                .flags = WIPHY_VENDOR_CMD_NEED_NETDEV |
                         WIPHY_VENDOR_CMD_NEED_RUNNING,
                .doit = wlcore_vendor_cmd_smart_config_set_group_key,
+               .policy = wlcore_vendor_attr_policy,
        },
 };
 
index 212b4a8..38651fa 100644 (file)
@@ -4,6 +4,7 @@ config BTRFS_FS
        tristate "Btrfs filesystem support"
        select CRYPTO
        select CRYPTO_CRC32C
+       select LIBCRC32C
        select ZLIB_INFLATE
        select ZLIB_DEFLATE
        select LZO_COMPRESS
index 41a2bd2..5f7ee70 100644 (file)
@@ -4106,6 +4106,7 @@ void close_ctree(struct btrfs_fs_info *fs_info)
        percpu_counter_destroy(&fs_info->dev_replace.bio_counter);
        cleanup_srcu_struct(&fs_info->subvol_srcu);
 
+       btrfs_free_csum_hash(fs_info);
        btrfs_free_stripe_hash_table(fs_info);
        btrfs_free_ref_cache(fs_info);
 }
index 1af069a..ee582a3 100644 (file)
@@ -395,10 +395,31 @@ static noinline int add_async_extent(struct async_chunk *cow,
        return 0;
 }
 
+/*
+ * Check if the inode has flags compatible with compression
+ */
+static inline bool inode_can_compress(struct inode *inode)
+{
+       if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW ||
+           BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
+               return false;
+       return true;
+}
+
+/*
+ * Check if the inode needs to be submitted to compression, based on mount
+ * options, defragmentation, properties or heuristics.
+ */
 static inline int inode_need_compress(struct inode *inode, u64 start, u64 end)
 {
        struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 
+       if (!inode_can_compress(inode)) {
+               WARN(IS_ENABLED(CONFIG_BTRFS_DEBUG),
+                       KERN_ERR "BTRFS: unexpected compression for ino %llu\n",
+                       btrfs_ino(BTRFS_I(inode)));
+               return 0;
+       }
        /* force compress */
        if (btrfs_test_opt(fs_info, FORCE_COMPRESS))
                return 1;
@@ -1631,7 +1652,8 @@ int btrfs_run_delalloc_range(struct inode *inode, struct page *locked_page,
        } else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC && !force_cow) {
                ret = run_delalloc_nocow(inode, locked_page, start, end,
                                         page_started, 0, nr_written);
-       } else if (!inode_need_compress(inode, start, end)) {
+       } else if (!inode_can_compress(inode) ||
+                  !inode_need_compress(inode, start, end)) {
                ret = cow_file_range(inode, locked_page, start, end, end,
                                      page_started, nr_written, 1, NULL);
        } else {
index a13ddba..d74b74c 100644 (file)
@@ -5941,6 +5941,7 @@ int btrfs_get_io_geometry(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
        u64 stripe_len;
        u64 raid56_full_stripe_start = (u64)-1;
        int data_stripes;
+       int ret = 0;
 
        ASSERT(op != BTRFS_MAP_DISCARD);
 
@@ -5961,8 +5962,8 @@ int btrfs_get_io_geometry(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
                btrfs_crit(fs_info,
 "stripe math has gone wrong, stripe_offset=%llu offset=%llu start=%llu logical=%llu stripe_len=%llu",
                        stripe_offset, offset, em->start, logical, stripe_len);
-               free_extent_map(em);
-               return -EINVAL;
+               ret = -EINVAL;
+               goto out;
        }
 
        /* stripe_offset is the offset of this block in its stripe */
@@ -6009,7 +6010,10 @@ int btrfs_get_io_geometry(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
        io_geom->stripe_offset = stripe_offset;
        io_geom->raid56_stripe_offset = raid56_full_stripe_start;
 
-       return 0;
+out:
+       /* once for us */
+       free_extent_map(em);
+       return ret;
 }
 
 static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
index 6b6c739..cb73264 100644 (file)
@@ -50,7 +50,6 @@ struct cn_dev {
 
        u32 seq, groups;
        struct sock *nls;
-       void (*input) (struct sk_buff *skb);
 
        struct cn_queue_dev *cbdev;
 };
index 91d6275..19df783 100644 (file)
@@ -1,7 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /****************************************************************************
- * ip_conntrack_h323_asn1.h - BER and PER decoding library for H.323
- *                           conntrack/NAT module.
+ * BER and PER decoding library for H.323 conntrack/NAT module.
  *
  * Copyright (c) 2006 by Jing Min Zhao <zhaojingmin@users.sourceforge.net>
  *
index 88c2715..45850a8 100644 (file)
@@ -4170,7 +4170,7 @@ struct sta_opmode_info {
        u8 rx_nss;
 };
 
-#define VENDOR_CMD_RAW_DATA ((const struct nla_policy *)ERR_PTR(-ENODATA))
+#define VENDOR_CMD_RAW_DATA ((const struct nla_policy *)(long)(-ENODATA))
 
 /**
  * struct wiphy_vendor_command - vendor command definition
index db33729..b16d216 100644 (file)
@@ -2,8 +2,8 @@
 #define _NET_FLOW_OFFLOAD_H
 
 #include <linux/kernel.h>
+#include <linux/list.h>
 #include <net/flow_dissector.h>
-#include <net/sch_generic.h>
 
 struct flow_match {
        struct flow_dissector   *dissector;
@@ -249,6 +249,10 @@ enum flow_block_binder_type {
        FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS,
 };
 
+struct flow_block {
+       struct list_head cb_list;
+};
+
 struct netlink_ext_ack;
 
 struct flow_block_offload {
@@ -256,29 +260,33 @@ struct flow_block_offload {
        enum flow_block_binder_type binder_type;
        bool block_shared;
        struct net *net;
+       struct flow_block *block;
        struct list_head cb_list;
        struct list_head *driver_block_list;
        struct netlink_ext_ack *extack;
 };
 
+enum tc_setup_type;
+typedef int flow_setup_cb_t(enum tc_setup_type type, void *type_data,
+                           void *cb_priv);
+
 struct flow_block_cb {
        struct list_head        driver_list;
        struct list_head        list;
-       struct net              *net;
-       tc_setup_cb_t           *cb;
+       flow_setup_cb_t         *cb;
        void                    *cb_ident;
        void                    *cb_priv;
        void                    (*release)(void *cb_priv);
        unsigned int            refcnt;
 };
 
-struct flow_block_cb *flow_block_cb_alloc(struct net *net, tc_setup_cb_t *cb,
+struct flow_block_cb *flow_block_cb_alloc(flow_setup_cb_t *cb,
                                          void *cb_ident, void *cb_priv,
                                          void (*release)(void *cb_priv));
 void flow_block_cb_free(struct flow_block_cb *block_cb);
 
-struct flow_block_cb *flow_block_cb_lookup(struct flow_block_offload *offload,
-                                          tc_setup_cb_t *cb, void *cb_ident);
+struct flow_block_cb *flow_block_cb_lookup(struct flow_block *block,
+                                          flow_setup_cb_t *cb, void *cb_ident);
 
 void *flow_block_cb_priv(struct flow_block_cb *block_cb);
 void flow_block_cb_incref(struct flow_block_cb *block_cb);
@@ -296,11 +304,12 @@ static inline void flow_block_cb_remove(struct flow_block_cb *block_cb,
        list_move(&block_cb->list, &offload->cb_list);
 }
 
-bool flow_block_cb_is_busy(tc_setup_cb_t *cb, void *cb_ident,
+bool flow_block_cb_is_busy(flow_setup_cb_t *cb, void *cb_ident,
                           struct list_head *driver_block_list);
 
 int flow_block_cb_setup_simple(struct flow_block_offload *f,
-                              struct list_head *driver_list, tc_setup_cb_t *cb,
+                              struct list_head *driver_list,
+                              flow_setup_cb_t *cb,
                               void *cb_ident, void *cb_priv, bool ingress_only);
 
 enum flow_cls_command {
@@ -333,4 +342,9 @@ flow_cls_offload_flow_rule(struct flow_cls_offload *flow_cmd)
        return flow_cmd->rule;
 }
 
+static inline void flow_block_init(struct flow_block *flow_block)
+{
+       INIT_LIST_HEAD(&flow_block->cb_list);
+}
+
 #endif /* _NET_FLOW_OFFLOAD_H */
index 93ce6b0..573429b 100644 (file)
@@ -76,6 +76,11 @@ struct nf_conntrack_expect_policy {
 #define NF_CT_EXPECT_CLASS_DEFAULT     0
 #define NF_CT_EXPECT_MAX_CNT           255
 
+/* Allow to reuse expectations with the same tuples from different master
+ * conntracks.
+ */
+#define NF_CT_EXP_F_SKIP_MASTER        0x1
+
 int nf_conntrack_expect_pernet_init(struct net *net);
 void nf_conntrack_expect_pernet_fini(struct net *net);
 
@@ -122,10 +127,11 @@ void nf_ct_expect_init(struct nf_conntrack_expect *, unsigned int, u_int8_t,
                       u_int8_t, const __be16 *, const __be16 *);
 void nf_ct_expect_put(struct nf_conntrack_expect *exp);
 int nf_ct_expect_related_report(struct nf_conntrack_expect *expect, 
-                               u32 portid, int report);
-static inline int nf_ct_expect_related(struct nf_conntrack_expect *expect)
+                               u32 portid, int report, unsigned int flags);
+static inline int nf_ct_expect_related(struct nf_conntrack_expect *expect,
+                                      unsigned int flags)
 {
-       return nf_ct_expect_related_report(expect, 0, 0);
+       return nf_ct_expect_related_report(expect, 0, 0, flags);
 }
 
 #endif /*_NF_CONNTRACK_EXPECT_H*/
index 8f00125..44513b9 100644 (file)
@@ -68,6 +68,7 @@ struct synproxy_options {
        u8                              options;
        u8                              wscale;
        u16                             mss;
+       u16                             mss_encode;
        u32                             tsval;
        u32                             tsecr;
 };
index 35dfdd9..9b62456 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/rhashtable.h>
 #include <net/netfilter/nf_flow_table.h>
 #include <net/netlink.h>
+#include <net/flow_offload.h>
 
 struct module;
 
@@ -951,7 +952,7 @@ struct nft_stats {
  *     @stats: per-cpu chain stats
  *     @chain: the chain
  *     @dev_name: device name that this base chain is attached to (if any)
- *     @cb_list: list of flow block callbacks (for hardware offload)
+ *     @flow_block: flow block (for hardware offload)
  */
 struct nft_base_chain {
        struct nf_hook_ops              ops;
@@ -961,7 +962,7 @@ struct nft_base_chain {
        struct nft_stats __percpu       *stats;
        struct nft_chain                chain;
        char                            dev_name[IFNAMSIZ];
-       struct list_head                cb_list;
+       struct flow_block               flow_block;
 };
 
 static inline struct nft_base_chain *nft_base_chain(const struct nft_chain *chain)
index 841faad..e429809 100644 (file)
@@ -6,7 +6,6 @@
 #include <linux/workqueue.h>
 #include <net/sch_generic.h>
 #include <net/act_api.h>
-#include <net/flow_offload.h>
 #include <net/net_namespace.h>
 
 /* TC action not accessible from user space */
@@ -126,14 +125,14 @@ static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
 }
 
 static inline
-int tc_setup_cb_block_register(struct tcf_block *block, tc_setup_cb_t *cb,
+int tc_setup_cb_block_register(struct tcf_block *block, flow_setup_cb_t *cb,
                               void *cb_priv)
 {
        return 0;
 }
 
 static inline
-void tc_setup_cb_block_unregister(struct tcf_block *block, tc_setup_cb_t *cb,
+void tc_setup_cb_block_unregister(struct tcf_block *block, flow_setup_cb_t *cb,
                                  void *cb_priv)
 {
 }
index 855167b..6b6b012 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/mutex.h>
 #include <net/gen_stats.h>
 #include <net/rtnetlink.h>
+#include <net/flow_offload.h>
 
 struct Qdisc_ops;
 struct qdisc_walker;
@@ -22,9 +23,6 @@ struct tcf_walker;
 struct module;
 struct bpf_flow_keys;
 
-typedef int tc_setup_cb_t(enum tc_setup_type type,
-                         void *type_data, void *cb_priv);
-
 typedef int tc_indr_block_bind_cb_t(struct net_device *dev, void *cb_priv,
                                    enum tc_setup_type type, void *type_data);
 
@@ -313,7 +311,7 @@ struct tcf_proto_ops {
        void                    (*walk)(struct tcf_proto *tp,
                                        struct tcf_walker *arg, bool rtnl_held);
        int                     (*reoffload)(struct tcf_proto *tp, bool add,
-                                            tc_setup_cb_t *cb, void *cb_priv,
+                                            flow_setup_cb_t *cb, void *cb_priv,
                                             struct netlink_ext_ack *extack);
        void                    (*bind_class)(void *, u32, unsigned long);
        void *                  (*tmplt_create)(struct net *net,
@@ -401,7 +399,7 @@ struct tcf_block {
        refcount_t refcnt;
        struct net *net;
        struct Qdisc *q;
-       struct list_head cb_list;
+       struct flow_block flow_block;
        struct list_head owner_list;
        bool keep_dst;
        unsigned int offloadcnt; /* Number of oddloaded filters */
index f42d300..e5cf514 100644 (file)
@@ -1709,6 +1709,11 @@ static inline struct sk_buff *tcp_rtx_queue_head(const struct sock *sk)
        return skb_rb_first(&sk->tcp_rtx_queue);
 }
 
+static inline struct sk_buff *tcp_rtx_queue_tail(const struct sock *sk)
+{
+       return skb_rb_last(&sk->tcp_rtx_queue);
+}
+
 static inline struct sk_buff *tcp_write_queue_head(const struct sock *sk)
 {
        return skb_peek(&sk->sk_write_queue);
index 75758ec..beb9a9d 100644 (file)
@@ -2863,7 +2863,7 @@ enum nl80211_attrs {
 #define NL80211_HT_CAPABILITY_LEN              26
 #define NL80211_VHT_CAPABILITY_LEN             12
 #define NL80211_HE_MIN_CAPABILITY_LEN           16
-#define NL80211_HE_MAX_CAPABILITY_LEN           51
+#define NL80211_HE_MAX_CAPABILITY_LEN           54
 #define NL80211_MAX_NR_CIPHER_SUITES           5
 #define NL80211_MAX_NR_AKM_SUITES              2
 
index 9d9705c..2427bc4 100644 (file)
@@ -518,7 +518,13 @@ struct v4l2_pix_format {
 #define V4L2_PIX_FMT_RGBX444 v4l2_fourcc('R', 'X', '1', '2') /* 16  rrrrgggg bbbbxxxx */
 #define V4L2_PIX_FMT_ABGR444 v4l2_fourcc('A', 'B', '1', '2') /* 16  aaaabbbb ggggrrrr */
 #define V4L2_PIX_FMT_XBGR444 v4l2_fourcc('X', 'B', '1', '2') /* 16  xxxxbbbb ggggrrrr */
-#define V4L2_PIX_FMT_BGRA444 v4l2_fourcc('B', 'A', '1', '2') /* 16  bbbbgggg rrrraaaa */
+
+/*
+ * Originally this had 'BA12' as fourcc, but this clashed with the older
+ * V4L2_PIX_FMT_SGRBG12 which inexplicably used that same fourcc.
+ * So use 'GA12' instead for V4L2_PIX_FMT_BGRA444.
+ */
+#define V4L2_PIX_FMT_BGRA444 v4l2_fourcc('G', 'A', '1', '2') /* 16  bbbbgggg rrrraaaa */
 #define V4L2_PIX_FMT_BGRX444 v4l2_fourcc('B', 'X', '1', '2') /* 16  bbbbgggg rrrrxxxx */
 #define V4L2_PIX_FMT_RGB555  v4l2_fourcc('R', 'G', 'B', 'O') /* 16  RGB-5-5-5     */
 #define V4L2_PIX_FMT_ARGB555 v4l2_fourcc('A', 'R', '1', '5') /* 16  ARGB-1-5-5-5  */
index a75b6a7..4436158 100644 (file)
@@ -720,6 +720,7 @@ static void exit_notify(struct task_struct *tsk, int group_dead)
        if (group_dead)
                kill_orphaned_pgrp(tsk->group_leader, NULL);
 
+       tsk->exit_state = EXIT_ZOMBIE;
        if (unlikely(tsk->ptrace)) {
                int sig = thread_group_leader(tsk) &&
                                thread_group_empty(tsk) &&
index 154fa55..5040fe4 100644 (file)
@@ -6,7 +6,7 @@
 menuconfig NF_TABLES_BRIDGE
        depends on BRIDGE && NETFILTER && NF_TABLES
        select NETFILTER_FAMILY_BRIDGE
-       bool "Ethernet Bridge nf_tables support"
+       tristate "Ethernet Bridge nf_tables support"
 
 if NF_TABLES_BRIDGE
 
@@ -25,6 +25,8 @@ config NF_LOG_BRIDGE
        tristate "Bridge packet logging"
        select NF_LOG_COMMON
 
+endif # NF_TABLES_BRIDGE
+
 config NF_CONNTRACK_BRIDGE
        tristate "IPv4/IPV6 bridge connection tracking support"
        depends on NF_CONNTRACK
@@ -39,8 +41,6 @@ config NF_CONNTRACK_BRIDGE
 
          To compile it as a module, choose M here.  If unsure, say N.
 
-endif # NF_TABLES_BRIDGE
-
 menuconfig BRIDGE_NF_EBTABLES
        tristate "Ethernet Bridge tables (ebtables) support"
        depends on BRIDGE && NETFILTER && NETFILTER_XTABLES
index 76f8db3..d63b970 100644 (file)
@@ -165,7 +165,7 @@ void flow_rule_match_enc_opts(const struct flow_rule *rule,
 }
 EXPORT_SYMBOL(flow_rule_match_enc_opts);
 
-struct flow_block_cb *flow_block_cb_alloc(struct net *net, tc_setup_cb_t *cb,
+struct flow_block_cb *flow_block_cb_alloc(flow_setup_cb_t *cb,
                                          void *cb_ident, void *cb_priv,
                                          void (*release)(void *cb_priv))
 {
@@ -175,7 +175,6 @@ struct flow_block_cb *flow_block_cb_alloc(struct net *net, tc_setup_cb_t *cb,
        if (!block_cb)
                return ERR_PTR(-ENOMEM);
 
-       block_cb->net = net;
        block_cb->cb = cb;
        block_cb->cb_ident = cb_ident;
        block_cb->cb_priv = cb_priv;
@@ -194,14 +193,13 @@ void flow_block_cb_free(struct flow_block_cb *block_cb)
 }
 EXPORT_SYMBOL(flow_block_cb_free);
 
-struct flow_block_cb *flow_block_cb_lookup(struct flow_block_offload *f,
-                                          tc_setup_cb_t *cb, void *cb_ident)
+struct flow_block_cb *flow_block_cb_lookup(struct flow_block *block,
+                                          flow_setup_cb_t *cb, void *cb_ident)
 {
        struct flow_block_cb *block_cb;
 
-       list_for_each_entry(block_cb, f->driver_block_list, driver_list) {
-               if (block_cb->net == f->net &&
-                   block_cb->cb == cb &&
+       list_for_each_entry(block_cb, &block->cb_list, list) {
+               if (block_cb->cb == cb &&
                    block_cb->cb_ident == cb_ident)
                        return block_cb;
        }
@@ -228,7 +226,7 @@ unsigned int flow_block_cb_decref(struct flow_block_cb *block_cb)
 }
 EXPORT_SYMBOL(flow_block_cb_decref);
 
-bool flow_block_cb_is_busy(tc_setup_cb_t *cb, void *cb_ident,
+bool flow_block_cb_is_busy(flow_setup_cb_t *cb, void *cb_ident,
                           struct list_head *driver_block_list)
 {
        struct flow_block_cb *block_cb;
@@ -245,7 +243,8 @@ EXPORT_SYMBOL(flow_block_cb_is_busy);
 
 int flow_block_cb_setup_simple(struct flow_block_offload *f,
                               struct list_head *driver_block_list,
-                              tc_setup_cb_t *cb, void *cb_ident, void *cb_priv,
+                              flow_setup_cb_t *cb,
+                              void *cb_ident, void *cb_priv,
                               bool ingress_only)
 {
        struct flow_block_cb *block_cb;
@@ -261,8 +260,7 @@ int flow_block_cb_setup_simple(struct flow_block_offload *f,
                if (flow_block_cb_is_busy(cb, cb_ident, driver_block_list))
                        return -EBUSY;
 
-               block_cb = flow_block_cb_alloc(f->net, cb, cb_ident,
-                                              cb_priv, NULL);
+               block_cb = flow_block_cb_alloc(cb, cb_ident, cb_priv, NULL);
                if (IS_ERR(block_cb))
                        return PTR_ERR(block_cb);
 
@@ -270,7 +268,7 @@ int flow_block_cb_setup_simple(struct flow_block_offload *f,
                list_add_tail(&block_cb->driver_list, driver_block_list);
                return 0;
        case FLOW_BLOCK_UNBIND:
-               block_cb = flow_block_cb_lookup(f, cb, cb_ident);
+               block_cb = flow_block_cb_lookup(f->block, cb, cb_ident);
                if (!block_cb)
                        return -ENOENT;
 
index 614c38e..33f4117 100644 (file)
@@ -951,7 +951,7 @@ static int dsa_slave_setup_tc_block(struct net_device *dev,
                                    struct flow_block_offload *f)
 {
        struct flow_block_cb *block_cb;
-       tc_setup_cb_t *cb;
+       flow_setup_cb_t *cb;
 
        if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
                cb = dsa_slave_setup_tc_block_cb_ig;
@@ -967,7 +967,7 @@ static int dsa_slave_setup_tc_block(struct net_device *dev,
                if (flow_block_cb_is_busy(cb, dev, &dsa_slave_block_cb_list))
                        return -EBUSY;
 
-               block_cb = flow_block_cb_alloc(f->net, cb, dev, dev, NULL);
+               block_cb = flow_block_cb_alloc(cb, dev, dev, NULL);
                if (IS_ERR(block_cb))
                        return PTR_ERR(block_cb);
 
@@ -975,7 +975,7 @@ static int dsa_slave_setup_tc_block(struct net_device *dev,
                list_add_tail(&block_cb->driver_list, &dsa_slave_block_cb_list);
                return 0;
        case FLOW_BLOCK_UNBIND:
-               block_cb = flow_block_cb_lookup(f, cb, dev);
+               block_cb = flow_block_cb_lookup(f->block, cb, dev);
                if (!block_cb)
                        return -ENOENT;
 
index 4d6bf7a..6bdb1ab 100644 (file)
@@ -416,8 +416,8 @@ clusterip_tg(struct sk_buff *skb, const struct xt_action_param *par)
             ctinfo == IP_CT_RELATED_REPLY))
                return XT_CONTINUE;
 
-       /* ip_conntrack_icmp guarantees us that we only have ICMP_ECHO,
-        * TIMESTAMP, INFO_REQUEST or ADDRESS type icmp packets from here
+       /* nf_conntrack_proto_icmp guarantees us that we only have ICMP_ECHO,
+        * TIMESTAMP, INFO_REQUEST or ICMP_ADDRESS type icmp packets from here
         * on, which all have an ID field [relevant for hashing]. */
 
        hash = clusterip_hashfn(skb, cipinfo->config);
index 8e7f84e..0e70f3f 100644 (file)
@@ -36,6 +36,8 @@ synproxy_tg4(struct sk_buff *skb, const struct xt_action_param *par)
                        opts.options |= XT_SYNPROXY_OPT_ECN;
 
                opts.options &= info->options;
+               opts.mss_encode = opts.mss;
+               opts.mss = info->mss;
                if (opts.options & XT_SYNPROXY_OPT_TIMESTAMP)
                        synproxy_init_timestamp_cookie(info, &opts);
                else
index 5903167..cc23f1c 100644 (file)
@@ -78,6 +78,7 @@ static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)
        flow.flowi4_mark = info->flags & XT_RPFILTER_VALID_MARK ? skb->mark : 0;
        flow.flowi4_tos = RT_TOS(iph->tos);
        flow.flowi4_scope = RT_SCOPE_UNIVERSE;
+       flow.flowi4_oif = l3mdev_master_ifindex_rcu(xt_in(par));
 
        return rpfilter_lookup_reverse(xt_net(par), &flow, xt_in(par), info->flags) ^ invert;
 }
index 87b711f..3e2685c 100644 (file)
@@ -221,11 +221,11 @@ static int nat_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct,
                int ret;
 
                rtp_exp->tuple.dst.u.udp.port = htons(nated_port);
-               ret = nf_ct_expect_related(rtp_exp);
+               ret = nf_ct_expect_related(rtp_exp, 0);
                if (ret == 0) {
                        rtcp_exp->tuple.dst.u.udp.port =
                            htons(nated_port + 1);
-                       ret = nf_ct_expect_related(rtcp_exp);
+                       ret = nf_ct_expect_related(rtcp_exp, 0);
                        if (ret == 0)
                                break;
                        else if (ret == -EBUSY) {
@@ -296,7 +296,7 @@ static int nat_t120(struct sk_buff *skb, struct nf_conn *ct,
                int ret;
 
                exp->tuple.dst.u.tcp.port = htons(nated_port);
-               ret = nf_ct_expect_related(exp);
+               ret = nf_ct_expect_related(exp, 0);
                if (ret == 0)
                        break;
                else if (ret != -EBUSY) {
@@ -352,7 +352,7 @@ static int nat_h245(struct sk_buff *skb, struct nf_conn *ct,
                int ret;
 
                exp->tuple.dst.u.tcp.port = htons(nated_port);
-               ret = nf_ct_expect_related(exp);
+               ret = nf_ct_expect_related(exp, 0);
                if (ret == 0)
                        break;
                else if (ret != -EBUSY) {
@@ -444,7 +444,7 @@ static int nat_q931(struct sk_buff *skb, struct nf_conn *ct,
                int ret;
 
                exp->tuple.dst.u.tcp.port = htons(nated_port);
-               ret = nf_ct_expect_related(exp);
+               ret = nf_ct_expect_related(exp, 0);
                if (ret == 0)
                        break;
                else if (ret != -EBUSY) {
@@ -537,7 +537,7 @@ static int nat_callforwarding(struct sk_buff *skb, struct nf_conn *ct,
                int ret;
 
                exp->tuple.dst.u.tcp.port = htons(nated_port);
-               ret = nf_ct_expect_related(exp);
+               ret = nf_ct_expect_related(exp, 0);
                if (ret == 0)
                        break;
                else if (ret != -EBUSY) {
index 4af1f5d..6e4afc4 100644 (file)
@@ -1288,6 +1288,7 @@ int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
        struct tcp_sock *tp = tcp_sk(sk);
        struct sk_buff *buff;
        int nsize, old_factor;
+       long limit;
        int nlen;
        u8 flags;
 
@@ -1298,8 +1299,16 @@ int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
        if (nsize < 0)
                nsize = 0;
 
-       if (unlikely((sk->sk_wmem_queued >> 1) > sk->sk_sndbuf &&
-                    tcp_queue != TCP_FRAG_IN_WRITE_QUEUE)) {
+       /* tcp_sendmsg() can overshoot sk_wmem_queued by one full size skb.
+        * We need some allowance to not penalize applications setting small
+        * SO_SNDBUF values.
+        * Also allow first and last skb in retransmit queue to be split.
+        */
+       limit = sk->sk_sndbuf + 2 * SKB_TRUESIZE(GSO_MAX_SIZE);
+       if (unlikely((sk->sk_wmem_queued >> 1) > limit &&
+                    tcp_queue != TCP_FRAG_IN_WRITE_QUEUE &&
+                    skb != tcp_rtx_queue_head(sk) &&
+                    skb != tcp_rtx_queue_tail(sk))) {
                NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPWQUEUETOOBIG);
                return -ENOMEM;
        }
index e77ea1e..5cdb4a6 100644 (file)
@@ -36,6 +36,8 @@ synproxy_tg6(struct sk_buff *skb, const struct xt_action_param *par)
                        opts.options |= XT_SYNPROXY_OPT_ECN;
 
                opts.options &= info->options;
+               opts.mss_encode = opts.mss;
+               opts.mss = info->mss;
                if (opts.options & XT_SYNPROXY_OPT_TIMESTAMP)
                        synproxy_init_timestamp_cookie(info, &opts);
                else
index 6bcaf73..d800801 100644 (file)
@@ -55,7 +55,9 @@ static bool rpfilter_lookup_reverse6(struct net *net, const struct sk_buff *skb,
        if (rpfilter_addr_linklocal(&iph->saddr)) {
                lookup_flags |= RT6_LOOKUP_F_IFACE;
                fl6.flowi6_oif = dev->ifindex;
-       } else if ((flags & XT_RPFILTER_LOOSE) == 0)
+       /* Set flowi6_oif for vrf devices to lookup route in l3mdev domain. */
+       } else if (netif_is_l3_master(dev) || netif_is_l3_slave(dev) ||
+                 (flags & XT_RPFILTER_LOOSE) == 0)
                fl6.flowi6_oif = dev->ifindex;
 
        rt = (void *)ip6_route_lookup(net, &fl6, skb, lookup_flags);
@@ -70,7 +72,9 @@ static bool rpfilter_lookup_reverse6(struct net *net, const struct sk_buff *skb,
                goto out;
        }
 
-       if (rt->rt6i_idev->dev == dev || (flags & XT_RPFILTER_LOOSE))
+       if (rt->rt6i_idev->dev == dev ||
+           l3mdev_master_ifindex_rcu(rt->rt6i_idev->dev) == dev->ifindex ||
+           (flags & XT_RPFILTER_LOOSE))
                ret = true;
  out:
        ip6_rt_put(rt);
index 76cc9e9..4d45806 100644 (file)
@@ -936,8 +936,10 @@ static int ieee80211_assign_beacon(struct ieee80211_sub_if_data *sdata,
 
        err = ieee80211_set_probe_resp(sdata, params->probe_resp,
                                       params->probe_resp_len, csa);
-       if (err < 0)
+       if (err < 0) {
+               kfree(new);
                return err;
+       }
        if (err == 0)
                changed |= BSS_CHANGED_AP_PROBE_RESP;
 
@@ -949,8 +951,10 @@ static int ieee80211_assign_beacon(struct ieee80211_sub_if_data *sdata,
                                                         params->civicloc,
                                                         params->civicloc_len);
 
-               if (err < 0)
+               if (err < 0) {
+                       kfree(new);
                        return err;
+               }
 
                changed |= BSS_CHANGED_FTM_RESPONDER;
        }
index acd4afb..c9a8a24 100644 (file)
@@ -187,11 +187,16 @@ int drv_conf_tx(struct ieee80211_local *local,
        if (!check_sdata_in_driver(sdata))
                return -EIO;
 
-       if (WARN_ONCE(params->cw_min == 0 ||
-                     params->cw_min > params->cw_max,
-                     "%s: invalid CW_min/CW_max: %d/%d\n",
-                     sdata->name, params->cw_min, params->cw_max))
+       if (params->cw_min == 0 || params->cw_min > params->cw_max) {
+               /*
+                * If we can't configure hardware anyway, don't warn. We may
+                * never have initialized the CW parameters.
+                */
+               WARN_ONCE(local->ops->conf_tx,
+                         "%s: invalid CW_min/CW_max: %d/%d\n",
+                         sdata->name, params->cw_min, params->cw_max);
                return -EINVAL;
+       }
 
        trace_drv_conf_tx(local, sdata, ac, params);
        if (local->ops->conf_tx)
index 32a45c0..0d65f4d 100644 (file)
@@ -223,8 +223,6 @@ config NF_CONNTRACK_FTP
          of Network Address Translation on them.
 
          This is FTP support on Layer 3 independent connection tracking.
-         Layer 3 independent connection tracking is experimental scheme
-         which generalize ip_conntrack to support other layer 3 protocols.
 
          To compile it as a module, choose M here.  If unsure, say N.
 
@@ -338,7 +336,7 @@ config NF_CONNTRACK_SIP
        help
          SIP is an application-layer control protocol that can establish,
          modify, and terminate multimedia sessions (conferences) such as
-         Internet telephony calls. With the ip_conntrack_sip and
+         Internet telephony calls. With the nf_conntrack_sip and
          the nf_nat_sip modules you can support the protocol on a connection
          tracking/NATing firewall.
 
@@ -1313,7 +1311,7 @@ config NETFILTER_XT_MATCH_HELPER
        depends on NETFILTER_ADVANCED
        help
          Helper matching allows you to match packets in dynamic connections
-         tracked by a conntrack-helper, ie. ip_conntrack_ftp
+         tracked by a conntrack-helper, ie. nf_conntrack_ftp
 
          To compile it as a module, choose M here.  If unsure, say Y.
 
index 4035419..08adcb2 100644 (file)
@@ -231,7 +231,7 @@ void ip_vs_nfct_expect_related(struct sk_buff *skb, struct nf_conn *ct,
 
        IP_VS_DBG_BUF(7, "%s: ct=%p, expect tuple=" FMT_TUPLE "\n",
                      __func__, ct, ARG_TUPLE(&exp->tuple));
-       nf_ct_expect_related(exp);
+       nf_ct_expect_related(exp, 0);
        nf_ct_expect_put(exp);
 }
 EXPORT_SYMBOL(ip_vs_nfct_expect_related);
index 42ee659..d011d2e 100644 (file)
@@ -159,7 +159,7 @@ static int amanda_help(struct sk_buff *skb,
                if (nf_nat_amanda && ct->status & IPS_NAT_MASK)
                        ret = nf_nat_amanda(skb, ctinfo, protoff,
                                            off - dataoff, len, exp);
-               else if (nf_ct_expect_related(exp) != 0) {
+               else if (nf_ct_expect_related(exp, 0) != 0) {
                        nf_ct_helper_log(skb, ct, "cannot add expectation");
                        ret = NF_DROP;
                }
index 921a7b9..1ba6bec 100644 (file)
@@ -68,7 +68,7 @@ int nf_conntrack_broadcast_help(struct sk_buff *skb,
        exp->class                = NF_CT_EXPECT_CLASS_DEFAULT;
        exp->helper               = NULL;
 
-       nf_ct_expect_related(exp);
+       nf_ct_expect_related(exp, 0);
        nf_ct_expect_put(exp);
 
        nf_ct_refresh(ct, skb, timeout * HZ);
index bdfeace..a542761 100644 (file)
@@ -1817,9 +1817,7 @@ EXPORT_SYMBOL_GPL(nf_ct_kill_acct);
 #include <linux/netfilter/nfnetlink_conntrack.h>
 #include <linux/mutex.h>
 
-/* Generic function for tcp/udp/sctp/dccp and alike. This needs to be
- * in ip_conntrack_core, since we don't want the protocols to autoload
- * or depend on ctnetlink */
+/* Generic function for tcp/udp/sctp/dccp and alike. */
 int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb,
                               const struct nf_conntrack_tuple *tuple)
 {
index ffd1f49..65364de 100644 (file)
@@ -249,13 +249,22 @@ static inline int expect_clash(const struct nf_conntrack_expect *a,
 static inline int expect_matches(const struct nf_conntrack_expect *a,
                                 const struct nf_conntrack_expect *b)
 {
-       return a->master == b->master &&
-              nf_ct_tuple_equal(&a->tuple, &b->tuple) &&
+       return nf_ct_tuple_equal(&a->tuple, &b->tuple) &&
               nf_ct_tuple_mask_equal(&a->mask, &b->mask) &&
               net_eq(nf_ct_net(a->master), nf_ct_net(b->master)) &&
               nf_ct_zone_equal_any(a->master, nf_ct_zone(b->master));
 }
 
+static bool master_matches(const struct nf_conntrack_expect *a,
+                          const struct nf_conntrack_expect *b,
+                          unsigned int flags)
+{
+       if (flags & NF_CT_EXP_F_SKIP_MASTER)
+               return true;
+
+       return a->master == b->master;
+}
+
 /* Generally a bad idea to call this: could have matched already. */
 void nf_ct_unexpect_related(struct nf_conntrack_expect *exp)
 {
@@ -399,7 +408,8 @@ static void evict_oldest_expect(struct nf_conn *master,
                nf_ct_remove_expect(last);
 }
 
-static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
+static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect,
+                                      unsigned int flags)
 {
        const struct nf_conntrack_expect_policy *p;
        struct nf_conntrack_expect *i;
@@ -417,8 +427,10 @@ static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
        }
        h = nf_ct_expect_dst_hash(net, &expect->tuple);
        hlist_for_each_entry_safe(i, next, &nf_ct_expect_hash[h], hnode) {
-               if (expect_matches(i, expect)) {
-                       if (i->class != expect->class)
+               if (master_matches(i, expect, flags) &&
+                   expect_matches(i, expect)) {
+                       if (i->class != expect->class ||
+                           i->master != expect->master)
                                return -EALREADY;
 
                        if (nf_ct_remove_expect(i))
@@ -453,12 +465,12 @@ out:
 }
 
 int nf_ct_expect_related_report(struct nf_conntrack_expect *expect,
-                               u32 portid, int report)
+                               u32 portid, int report, unsigned int flags)
 {
        int ret;
 
        spin_lock_bh(&nf_conntrack_expect_lock);
-       ret = __nf_ct_expect_check(expect);
+       ret = __nf_ct_expect_check(expect, flags);
        if (ret < 0)
                goto out;
 
index 8c6c11b..0ecb3e2 100644 (file)
@@ -525,7 +525,7 @@ skip_nl_seq:
                                 protoff, matchoff, matchlen, exp);
        else {
                /* Can't expect this?  Best to drop packet now. */
-               if (nf_ct_expect_related(exp) != 0) {
+               if (nf_ct_expect_related(exp, 0) != 0) {
                        nf_ct_helper_log(skb, ct, "cannot add expectation");
                        ret = NF_DROP;
                } else
index 8f6ba81..573cb44 100644 (file)
@@ -1,11 +1,10 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * ip_conntrack_helper_h323_asn1.c - BER and PER decoding library for H.323
- *                                  conntrack/NAT module.
+ * BER and PER decoding library for H.323 conntrack/NAT module.
  *
  * Copyright (c) 2006 by Jing Min Zhao <zhaojingmin@users.sourceforge.net>
  *
- * See ip_conntrack_helper_h323_asn1.h for details.
+ * See nf_conntrack_helper_h323_asn1.h for details.
  */
 
 #ifdef __KERNEL__
index 6497e5f..8ba037b 100644 (file)
@@ -305,8 +305,8 @@ static int expect_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct,
                ret = nat_rtp_rtcp(skb, ct, ctinfo, protoff, data, dataoff,
                                   taddr, port, rtp_port, rtp_exp, rtcp_exp);
        } else {                /* Conntrack only */
-               if (nf_ct_expect_related(rtp_exp) == 0) {
-                       if (nf_ct_expect_related(rtcp_exp) == 0) {
+               if (nf_ct_expect_related(rtp_exp, 0) == 0) {
+                       if (nf_ct_expect_related(rtcp_exp, 0) == 0) {
                                pr_debug("nf_ct_h323: expect RTP ");
                                nf_ct_dump_tuple(&rtp_exp->tuple);
                                pr_debug("nf_ct_h323: expect RTCP ");
@@ -364,7 +364,7 @@ static int expect_t120(struct sk_buff *skb,
                ret = nat_t120(skb, ct, ctinfo, protoff, data, dataoff, taddr,
                               port, exp);
        } else {                /* Conntrack only */
-               if (nf_ct_expect_related(exp) == 0) {
+               if (nf_ct_expect_related(exp, 0) == 0) {
                        pr_debug("nf_ct_h323: expect T.120 ");
                        nf_ct_dump_tuple(&exp->tuple);
                } else
@@ -701,7 +701,7 @@ static int expect_h245(struct sk_buff *skb, struct nf_conn *ct,
                ret = nat_h245(skb, ct, ctinfo, protoff, data, dataoff, taddr,
                               port, exp);
        } else {                /* Conntrack only */
-               if (nf_ct_expect_related(exp) == 0) {
+               if (nf_ct_expect_related(exp, 0) == 0) {
                        pr_debug("nf_ct_q931: expect H.245 ");
                        nf_ct_dump_tuple(&exp->tuple);
                } else
@@ -825,7 +825,7 @@ static int expect_callforwarding(struct sk_buff *skb,
                                         protoff, data, dataoff,
                                         taddr, port, exp);
        } else {                /* Conntrack only */
-               if (nf_ct_expect_related(exp) == 0) {
+               if (nf_ct_expect_related(exp, 0) == 0) {
                        pr_debug("nf_ct_q931: expect Call Forwarding ");
                        nf_ct_dump_tuple(&exp->tuple);
                } else
@@ -1284,7 +1284,7 @@ static int expect_q931(struct sk_buff *skb, struct nf_conn *ct,
                ret = nat_q931(skb, ct, ctinfo, protoff, data,
                               taddr, i, port, exp);
        } else {                /* Conntrack only */
-               if (nf_ct_expect_related(exp) == 0) {
+               if (nf_ct_expect_related(exp, 0) == 0) {
                        pr_debug("nf_ct_ras: expect Q.931 ");
                        nf_ct_dump_tuple(&exp->tuple);
 
@@ -1349,7 +1349,7 @@ static int process_gcf(struct sk_buff *skb, struct nf_conn *ct,
                          IPPROTO_UDP, NULL, &port);
        exp->helper = nf_conntrack_helper_ras;
 
-       if (nf_ct_expect_related(exp) == 0) {
+       if (nf_ct_expect_related(exp, 0) == 0) {
                pr_debug("nf_ct_ras: expect RAS ");
                nf_ct_dump_tuple(&exp->tuple);
        } else
@@ -1561,7 +1561,7 @@ static int process_acf(struct sk_buff *skb, struct nf_conn *ct,
        exp->flags = NF_CT_EXPECT_PERMANENT;
        exp->helper = nf_conntrack_helper_q931;
 
-       if (nf_ct_expect_related(exp) == 0) {
+       if (nf_ct_expect_related(exp, 0) == 0) {
                pr_debug("nf_ct_ras: expect Q.931 ");
                nf_ct_dump_tuple(&exp->tuple);
        } else
@@ -1615,7 +1615,7 @@ static int process_lcf(struct sk_buff *skb, struct nf_conn *ct,
        exp->flags = NF_CT_EXPECT_PERMANENT;
        exp->helper = nf_conntrack_helper_q931;
 
-       if (nf_ct_expect_related(exp) == 0) {
+       if (nf_ct_expect_related(exp, 0) == 0) {
                pr_debug("nf_ct_ras: expect Q.931 ");
                nf_ct_dump_tuple(&exp->tuple);
        } else
index 7ac156f..e40988a 100644 (file)
@@ -213,7 +213,7 @@ static int help(struct sk_buff *skb, unsigned int protoff,
                                                 addr_beg_p - ib_ptr,
                                                 addr_end_p - addr_beg_p,
                                                 exp);
-                       else if (nf_ct_expect_related(exp) != 0) {
+                       else if (nf_ct_expect_related(exp, 0) != 0) {
                                nf_ct_helper_log(skb, ct,
                                                 "cannot add expectation");
                                ret = NF_DROP;
index 1b77444..6aa01eb 100644 (file)
@@ -2616,7 +2616,7 @@ ctnetlink_glue_attach_expect(const struct nlattr *attr, struct nf_conn *ct,
        if (IS_ERR(exp))
                return PTR_ERR(exp);
 
-       err = nf_ct_expect_related_report(exp, portid, report);
+       err = nf_ct_expect_related_report(exp, portid, report, 0);
        nf_ct_expect_put(exp);
        return err;
 }
@@ -3367,7 +3367,7 @@ ctnetlink_create_expect(struct net *net,
                goto err_rcu;
        }
 
-       err = nf_ct_expect_related_report(exp, portid, report);
+       err = nf_ct_expect_related_report(exp, portid, report, 0);
        nf_ct_expect_put(exp);
 err_rcu:
        rcu_read_unlock();
index b22042a..a971183 100644 (file)
@@ -234,9 +234,9 @@ static int exp_gre(struct nf_conn *ct, __be16 callid, __be16 peer_callid)
        nf_nat_pptp_exp_gre = rcu_dereference(nf_nat_pptp_hook_exp_gre);
        if (nf_nat_pptp_exp_gre && ct->status & IPS_NAT_MASK)
                nf_nat_pptp_exp_gre(exp_orig, exp_reply);
-       if (nf_ct_expect_related(exp_orig) != 0)
+       if (nf_ct_expect_related(exp_orig, 0) != 0)
                goto out_put_both;
-       if (nf_ct_expect_related(exp_reply) != 0)
+       if (nf_ct_expect_related(exp_reply, 0) != 0)
                goto out_unexpect_orig;
 
        /* Add GRE keymap entries */
index c2eb365..5b05487 100644 (file)
@@ -1,7 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * ip_conntrack_proto_gre.c - Version 3.0
- *
  * Connection tracking protocol helper module for GRE.
  *
  * GRE is a generic encapsulation protocol, which is generally not very
index dd53e2b..097deba 100644 (file)
@@ -215,7 +215,7 @@ int nf_conntrack_icmpv4_error(struct nf_conn *tmpl,
                return -NF_ACCEPT;
        }
 
-       /* See ip_conntrack_proto_tcp.c */
+       /* See nf_conntrack_proto_tcp.c */
        if (state->net->ct.sysctl_checksum &&
            state->hook == NF_INET_PRE_ROUTING &&
            nf_ip_checksum(skb, state->hook, dataoff, IPPROTO_ICMP)) {
index d5fdfa0..85c1f8c 100644 (file)
@@ -472,6 +472,7 @@ static bool tcp_in_window(const struct nf_conn *ct,
        struct ip_ct_tcp_state *receiver = &state->seen[!dir];
        const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple;
        __u32 seq, ack, sack, end, win, swin;
+       u16 win_raw;
        s32 receiver_offset;
        bool res, in_recv_win;
 
@@ -480,7 +481,8 @@ static bool tcp_in_window(const struct nf_conn *ct,
         */
        seq = ntohl(tcph->seq);
        ack = sack = ntohl(tcph->ack_seq);
-       win = ntohs(tcph->window);
+       win_raw = ntohs(tcph->window);
+       win = win_raw;
        end = segment_seq_plus_len(seq, skb->len, dataoff, tcph);
 
        if (receiver->flags & IP_CT_TCP_FLAG_SACK_PERM)
@@ -655,14 +657,14 @@ static bool tcp_in_window(const struct nf_conn *ct,
                            && state->last_seq == seq
                            && state->last_ack == ack
                            && state->last_end == end
-                           && state->last_win == win)
+                           && state->last_win == win_raw)
                                state->retrans++;
                        else {
                                state->last_dir = dir;
                                state->last_seq = seq;
                                state->last_ack = ack;
                                state->last_end = end;
-                               state->last_win = win;
+                               state->last_win = win_raw;
                                state->retrans = 0;
                        }
                }
index 81448c3..1aebd65 100644 (file)
@@ -153,7 +153,7 @@ static int help(struct sk_buff *skb,
        nf_ct_dump_tuple(&exp->tuple);
 
        /* Can't expect this?  Best to drop packet now. */
-       if (nf_ct_expect_related(exp) != 0) {
+       if (nf_ct_expect_related(exp, 0) != 0) {
                nf_ct_helper_log(skb, ct, "cannot add expectation");
                ret = NF_DROP;
        }
index 1072517..b83dc9b 100644 (file)
@@ -977,11 +977,15 @@ static int set_expected_rtp_rtcp(struct sk_buff *skb, unsigned int protoff,
                /* -EALREADY handling works around end-points that send
                 * SDP messages with identical port but different media type,
                 * we pretend expectation was set up.
+                * It also works in the case that SDP messages are sent with
+                * identical expect tuples but for different master conntracks.
                 */
-               int errp = nf_ct_expect_related(rtp_exp);
+               int errp = nf_ct_expect_related(rtp_exp,
+                                               NF_CT_EXP_F_SKIP_MASTER);
 
                if (errp == 0 || errp == -EALREADY) {
-                       int errcp = nf_ct_expect_related(rtcp_exp);
+                       int errcp = nf_ct_expect_related(rtcp_exp,
+                                               NF_CT_EXP_F_SKIP_MASTER);
 
                        if (errcp == 0 || errcp == -EALREADY)
                                ret = NF_ACCEPT;
@@ -1296,7 +1300,7 @@ static int process_register_request(struct sk_buff *skb, unsigned int protoff,
                ret = hooks->expect(skb, protoff, dataoff, dptr, datalen,
                                    exp, matchoff, matchlen);
        else {
-               if (nf_ct_expect_related(exp) != 0) {
+               if (nf_ct_expect_related(exp, 0) != 0) {
                        nf_ct_helper_log(skb, ct, "cannot add expectation");
                        ret = NF_DROP;
                } else
index df6d6d6..80ee53f 100644 (file)
@@ -78,7 +78,7 @@ static int tftp_help(struct sk_buff *skb,
                nf_nat_tftp = rcu_dereference(nf_nat_tftp_hook);
                if (nf_nat_tftp && ct->status & IPS_NAT_MASK)
                        ret = nf_nat_tftp(skb, ctinfo, exp);
-               else if (nf_ct_expect_related(exp) != 0) {
+               else if (nf_ct_expect_related(exp, 0) != 0) {
                        nf_ct_helper_log(skb, ct, "cannot add expectation");
                        ret = NF_DROP;
                }
index a352604..3bc7e08 100644 (file)
@@ -48,7 +48,7 @@ static unsigned int help(struct sk_buff *skb,
                int res;
 
                exp->tuple.dst.u.tcp.port = htons(port);
-               res = nf_ct_expect_related(exp);
+               res = nf_ct_expect_related(exp, 0);
                if (res == 0)
                        break;
                else if (res != -EBUSY) {
index 9ab4104..3f6023e 100644 (file)
@@ -519,7 +519,7 @@ another_round:
  * and NF_INET_LOCAL_OUT, we change the destination to map into the
  * range. It might not be possible to get a unique tuple, but we try.
  * At worst (or if we race), we will end up with a final duplicate in
- * __ip_conntrack_confirm and drop the packet. */
+ * __nf_conntrack_confirm and drop the packet. */
 static void
 get_unique_tuple(struct nf_conntrack_tuple *tuple,
                 const struct nf_conntrack_tuple *orig_tuple,
index d48484a..aace676 100644 (file)
@@ -91,7 +91,7 @@ static unsigned int nf_nat_ftp(struct sk_buff *skb,
                int ret;
 
                exp->tuple.dst.u.tcp.port = htons(port);
-               ret = nf_ct_expect_related(exp);
+               ret = nf_ct_expect_related(exp, 0);
                if (ret == 0)
                        break;
                else if (ret != -EBUSY) {
index dfb7ef8..c691ab8 100644 (file)
@@ -53,7 +53,7 @@ static unsigned int help(struct sk_buff *skb,
                int ret;
 
                exp->tuple.dst.u.tcp.port = htons(port);
-               ret = nf_ct_expect_related(exp);
+               ret = nf_ct_expect_related(exp, 0);
                if (ret == 0)
                        break;
                else if (ret != -EBUSY) {
index e338d91..f0a735e 100644 (file)
@@ -414,7 +414,7 @@ static unsigned int nf_nat_sip_expect(struct sk_buff *skb, unsigned int protoff,
                int ret;
 
                exp->tuple.dst.u.udp.port = htons(port);
-               ret = nf_ct_expect_related(exp);
+               ret = nf_ct_expect_related(exp, NF_CT_EXP_F_SKIP_MASTER);
                if (ret == 0)
                        break;
                else if (ret != -EBUSY) {
@@ -607,7 +607,8 @@ static unsigned int nf_nat_sdp_media(struct sk_buff *skb, unsigned int protoff,
                int ret;
 
                rtp_exp->tuple.dst.u.udp.port = htons(port);
-               ret = nf_ct_expect_related(rtp_exp);
+               ret = nf_ct_expect_related(rtp_exp,
+                                          NF_CT_EXP_F_SKIP_MASTER);
                if (ret == -EBUSY)
                        continue;
                else if (ret < 0) {
@@ -615,7 +616,8 @@ static unsigned int nf_nat_sdp_media(struct sk_buff *skb, unsigned int protoff,
                        break;
                }
                rtcp_exp->tuple.dst.u.udp.port = htons(port + 1);
-               ret = nf_ct_expect_related(rtcp_exp);
+               ret = nf_ct_expect_related(rtcp_exp,
+                                          NF_CT_EXP_F_SKIP_MASTER);
                if (ret == 0)
                        break;
                else if (ret == -EBUSY) {
index 833a11f..1a59113 100644 (file)
@@ -30,7 +30,7 @@ static unsigned int help(struct sk_buff *skb,
                = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u.udp.port;
        exp->dir = IP_CT_DIR_REPLY;
        exp->expectfn = nf_nat_follow_master;
-       if (nf_ct_expect_related(exp) != 0) {
+       if (nf_ct_expect_related(exp, 0) != 0) {
                nf_ct_helper_log(skb, exp->master, "cannot add expectation");
                return NF_DROP;
        }
index b101f18..c769462 100644 (file)
@@ -470,7 +470,7 @@ synproxy_send_client_synack(struct net *net,
        struct iphdr *iph, *niph;
        struct tcphdr *nth;
        unsigned int tcp_hdr_size;
-       u16 mss = opts->mss;
+       u16 mss = opts->mss_encode;
 
        iph = ip_hdr(skb);
 
@@ -687,7 +687,7 @@ ipv4_synproxy_hook(void *priv, struct sk_buff *skb,
        state = &ct->proto.tcp;
        switch (state->state) {
        case TCP_CONNTRACK_CLOSE:
-               if (th->rst && !test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
+               if (th->rst && CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) {
                        nf_ct_seqadj_init(ct, ctinfo, synproxy->isn -
                                                      ntohl(th->seq) + 1);
                        break;
@@ -884,7 +884,7 @@ synproxy_send_client_synack_ipv6(struct net *net,
        struct ipv6hdr *iph, *niph;
        struct tcphdr *nth;
        unsigned int tcp_hdr_size;
-       u16 mss = opts->mss;
+       u16 mss = opts->mss_encode;
 
        iph = ipv6_hdr(skb);
 
@@ -1111,7 +1111,7 @@ ipv6_synproxy_hook(void *priv, struct sk_buff *skb,
        state = &ct->proto.tcp;
        switch (state->state) {
        case TCP_CONNTRACK_CLOSE:
-               if (th->rst && !test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
+               if (th->rst && CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) {
                        nf_ct_seqadj_init(ct, ctinfo, synproxy->isn -
                                                      ntohl(th->seq) + 1);
                        break;
index ed17a7c..605a7cf 100644 (file)
@@ -1662,7 +1662,7 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
 
                chain->flags |= NFT_BASE_CHAIN | flags;
                basechain->policy = NF_ACCEPT;
-               INIT_LIST_HEAD(&basechain->cb_list);
+               flow_block_init(&basechain->flow_block);
        } else {
                chain = kzalloc(sizeof(*chain), GFP_KERNEL);
                if (chain == NULL)
@@ -1900,6 +1900,8 @@ static int nf_tables_newchain(struct net *net, struct sock *nlsk,
 
        if (nla[NFTA_CHAIN_FLAGS])
                flags = ntohl(nla_get_be32(nla[NFTA_CHAIN_FLAGS]));
+       else if (chain)
+               flags = chain->flags;
 
        nft_ctx_init(&ctx, net, skb, nlh, family, table, chain, nla);
 
index 2c33028..64f5fd5 100644 (file)
@@ -116,7 +116,7 @@ static int nft_setup_cb_call(struct nft_base_chain *basechain,
        struct flow_block_cb *block_cb;
        int err;
 
-       list_for_each_entry(block_cb, &basechain->cb_list, list) {
+       list_for_each_entry(block_cb, &basechain->flow_block.cb_list, list) {
                err = block_cb->cb(type, type_data, block_cb->cb_priv);
                if (err < 0)
                        return err;
@@ -154,7 +154,7 @@ static int nft_flow_offload_rule(struct nft_trans *trans,
 static int nft_flow_offload_bind(struct flow_block_offload *bo,
                                 struct nft_base_chain *basechain)
 {
-       list_splice(&bo->cb_list, &basechain->cb_list);
+       list_splice(&bo->cb_list, &basechain->flow_block.cb_list);
        return 0;
 }
 
@@ -198,6 +198,7 @@ static int nft_flow_offload_chain(struct nft_trans *trans,
                return -EOPNOTSUPP;
 
        bo.command = cmd;
+       bo.block = &basechain->flow_block;
        bo.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS;
        bo.extack = &extack;
        INIT_LIST_HEAD(&bo.cb_list);
index 92077d4..4abbb45 100644 (file)
@@ -578,7 +578,7 @@ static int nfnetlink_bind(struct net *net, int group)
        ss = nfnetlink_get_subsys(type << 8);
        rcu_read_unlock();
        if (!ss)
-               request_module("nfnetlink-subsys-%d", type);
+               request_module_nowait("nfnetlink-subsys-%d", type);
        return 0;
 }
 #endif
index 3fd540b..b5d5d07 100644 (file)
@@ -193,7 +193,7 @@ static inline void nft_chain_filter_inet_init(void) {}
 static inline void nft_chain_filter_inet_fini(void) {}
 #endif /* CONFIG_NF_TABLES_IPV6 */
 
-#ifdef CONFIG_NF_TABLES_BRIDGE
+#if IS_ENABLED(CONFIG_NF_TABLES_BRIDGE)
 static unsigned int
 nft_do_chain_bridge(void *priv,
                    struct sk_buff *skb,
index 2f89bde..ff9ac8a 100644 (file)
@@ -142,3 +142,6 @@ MODULE_ALIAS_NFT_CHAIN(AF_INET, "nat");
 #ifdef CONFIG_NF_TABLES_IPV6
 MODULE_ALIAS_NFT_CHAIN(AF_INET6, "nat");
 #endif
+#ifdef CONFIG_NF_TABLES_INET
+MODULE_ALIAS_NFT_CHAIN(1, "nat");      /* NFPROTO_INET */
+#endif
index 827ab61..46ca8bc 100644 (file)
@@ -1252,7 +1252,7 @@ static void nft_ct_expect_obj_eval(struct nft_object *obj,
                          priv->l4proto, NULL, &priv->dport);
        exp->timeout.expires = jiffies + priv->timeout * HZ;
 
-       if (nf_ct_expect_related(exp) != 0)
+       if (nf_ct_expect_related(exp, 0) != 0)
                regs->verdict.code = NF_DROP;
 }
 
index fe93e73..b836d55 100644 (file)
@@ -129,7 +129,7 @@ static int nft_symhash_init(const struct nft_ctx *ctx,
        priv->dreg = nft_parse_register(tb[NFTA_HASH_DREG]);
 
        priv->modulus = ntohl(nla_get_be32(tb[NFTA_HASH_MODULUS]));
-       if (priv->modulus <= 1)
+       if (priv->modulus < 1)
                return -ERANGE;
 
        if (priv->offset + priv->modulus - 1 < priv->offset)
index 76866f7..f1b1d94 100644 (file)
@@ -546,7 +546,7 @@ nft_meta_select_ops(const struct nft_ctx *ctx,
        if (tb[NFTA_META_DREG] && tb[NFTA_META_SREG])
                return ERR_PTR(-EINVAL);
 
-#ifdef CONFIG_NF_TABLES_BRIDGE
+#if IS_ENABLED(CONFIG_NF_TABLES_BRIDGE) && IS_MODULE(CONFIG_NFT_BRIDGE_META)
        if (ctx->family == NFPROTO_BRIDGE)
                return ERR_PTR(-EAGAIN);
 #endif
index 8487eef..43eeb1f 100644 (file)
@@ -291,4 +291,4 @@ module_exit(nft_redir_module_exit);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Arturo Borrero Gonzalez <arturo@debian.org>");
-MODULE_ALIAS_NFT_EXPR("nat");
+MODULE_ALIAS_NFT_EXPR("redir");
index 80060ad..928e661 100644 (file)
@@ -31,6 +31,8 @@ static void nft_synproxy_tcp_options(struct synproxy_options *opts,
                opts->options |= NF_SYNPROXY_OPT_ECN;
 
        opts->options &= priv->info.options;
+       opts->mss_encode = opts->mss;
+       opts->mss = info->mss;
        if (opts->options & NF_SYNPROXY_OPT_TIMESTAMP)
                synproxy_init_timestamp_cookie(info, opts);
        else
index dca3b1e..bc89e16 100644 (file)
@@ -59,7 +59,7 @@ u64 ovs_flow_used_time(unsigned long flow_jiffies)
 void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags,
                           const struct sk_buff *skb)
 {
-       struct flow_stats *stats;
+       struct sw_flow_stats *stats;
        unsigned int cpu = smp_processor_id();
        int len = skb->len + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
 
@@ -87,7 +87,7 @@ void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags,
                        if (likely(flow->stats_last_writer != -1) &&
                            likely(!rcu_access_pointer(flow->stats[cpu]))) {
                                /* Try to allocate CPU-specific stats. */
-                               struct flow_stats *new_stats;
+                               struct sw_flow_stats *new_stats;
 
                                new_stats =
                                        kmem_cache_alloc_node(flow_stats_cache,
@@ -134,7 +134,7 @@ void ovs_flow_stats_get(const struct sw_flow *flow,
 
        /* We open code this to make sure cpu 0 is always considered */
        for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask)) {
-               struct flow_stats *stats = rcu_dereference_ovsl(flow->stats[cpu]);
+               struct sw_flow_stats *stats = rcu_dereference_ovsl(flow->stats[cpu]);
 
                if (stats) {
                        /* Local CPU may write on non-local stats, so we must
@@ -158,7 +158,7 @@ void ovs_flow_stats_clear(struct sw_flow *flow)
 
        /* We open code this to make sure cpu 0 is always considered */
        for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask)) {
-               struct flow_stats *stats = ovsl_dereference(flow->stats[cpu]);
+               struct sw_flow_stats *stats = ovsl_dereference(flow->stats[cpu]);
 
                if (stats) {
                        spin_lock_bh(&stats->lock);
index 3e2cc22..a5506e2 100644 (file)
@@ -194,7 +194,7 @@ struct sw_flow_actions {
        struct nlattr actions[];
 };
 
-struct flow_stats {
+struct sw_flow_stats {
        u64 packet_count;               /* Number of packets matched. */
        u64 byte_count;                 /* Number of bytes matched. */
        unsigned long used;             /* Last used time (in jiffies). */
@@ -216,7 +216,7 @@ struct sw_flow {
        struct cpumask cpu_used_mask;
        struct sw_flow_mask *mask;
        struct sw_flow_actions __rcu *sf_acts;
-       struct flow_stats __rcu *stats[]; /* One for each CPU.  First one
+       struct sw_flow_stats __rcu *stats[]; /* One for each CPU.  First one
                                           * is allocated at flow creation time,
                                           * the rest are allocated on demand
                                           * while holding the 'stats[0].lock'.
index 988fd8a..cf3582c 100644 (file)
@@ -66,7 +66,7 @@ void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
 struct sw_flow *ovs_flow_alloc(void)
 {
        struct sw_flow *flow;
-       struct flow_stats *stats;
+       struct sw_flow_stats *stats;
 
        flow = kmem_cache_zalloc(flow_cache, GFP_KERNEL);
        if (!flow)
@@ -110,7 +110,7 @@ static void flow_free(struct sw_flow *flow)
        for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask))
                if (flow->stats[cpu])
                        kmem_cache_free(flow_stats_cache,
-                                       (struct flow_stats __force *)flow->stats[cpu]);
+                                       (struct sw_flow_stats __force *)flow->stats[cpu]);
        kmem_cache_free(flow_cache, flow);
 }
 
@@ -712,13 +712,13 @@ int ovs_flow_init(void)
 
        flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow)
                                       + (nr_cpu_ids
-                                         * sizeof(struct flow_stats *)),
+                                         * sizeof(struct sw_flow_stats *)),
                                       0, 0, NULL);
        if (flow_cache == NULL)
                return -ENOMEM;
 
        flow_stats_cache
-               = kmem_cache_create("sw_flow_stats", sizeof(struct flow_stats),
+               = kmem_cache_create("sw_flow_stats", sizeof(struct sw_flow_stats),
                                    0, SLAB_HWCACHE_ALIGN, NULL);
        if (flow_stats_cache == NULL) {
                kmem_cache_destroy(flow_cache);
index d144233..efd3cfb 100644 (file)
@@ -691,6 +691,8 @@ static void tc_indr_block_ing_cmd(struct tc_indr_block_dev *indr_dev,
        if (!indr_dev->block)
                return;
 
+       bo.block = &indr_dev->block->flow_block;
+
        indr_block_cb->cb(indr_dev->dev, indr_block_cb->cb_priv, TC_SETUP_BLOCK,
                          &bo);
        tcf_block_setup(indr_dev->block, &bo);
@@ -775,6 +777,7 @@ static void tc_indr_block_call(struct tcf_block *block, struct net_device *dev,
                .command        = command,
                .binder_type    = ei->binder_type,
                .net            = dev_net(dev),
+               .block          = &block->flow_block,
                .block_shared   = tcf_block_shared(block),
                .extack         = extack,
        };
@@ -810,6 +813,7 @@ static int tcf_block_offload_cmd(struct tcf_block *block,
        bo.net = dev_net(dev);
        bo.command = command;
        bo.binder_type = ei->binder_type;
+       bo.block = &block->flow_block;
        bo.block_shared = tcf_block_shared(block);
        bo.extack = extack;
        INIT_LIST_HEAD(&bo.cb_list);
@@ -987,8 +991,8 @@ static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q,
                return ERR_PTR(-ENOMEM);
        }
        mutex_init(&block->lock);
+       flow_block_init(&block->flow_block);
        INIT_LIST_HEAD(&block->chain_list);
-       INIT_LIST_HEAD(&block->cb_list);
        INIT_LIST_HEAD(&block->owner_list);
        INIT_LIST_HEAD(&block->chain0.filter_chain_list);
 
@@ -1514,7 +1518,7 @@ void tcf_block_put(struct tcf_block *block)
 EXPORT_SYMBOL(tcf_block_put);
 
 static int
-tcf_block_playback_offloads(struct tcf_block *block, tc_setup_cb_t *cb,
+tcf_block_playback_offloads(struct tcf_block *block, flow_setup_cb_t *cb,
                            void *cb_priv, bool add, bool offload_in_use,
                            struct netlink_ext_ack *extack)
 {
@@ -1570,7 +1574,7 @@ static int tcf_block_bind(struct tcf_block *block,
 
                i++;
        }
-       list_splice(&bo->cb_list, &block->cb_list);
+       list_splice(&bo->cb_list, &block->flow_block.cb_list);
 
        return 0;
 
@@ -2152,7 +2156,9 @@ replay:
                tfilter_notify(net, skb, n, tp, block, q, parent, fh,
                               RTM_NEWTFILTER, false, rtnl_held);
                tfilter_put(tp, fh);
-               q->flags &= ~TCQ_F_CAN_BYPASS;
+               /* q pointer is NULL for shared blocks */
+               if (q)
+                       q->flags &= ~TCQ_F_CAN_BYPASS;
        }
 
 errout:
@@ -3156,7 +3162,7 @@ int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
        if (block->nooffloaddevcnt && err_stop)
                return -EOPNOTSUPP;
 
-       list_for_each_entry(block_cb, &block->cb_list, list) {
+       list_for_each_entry(block_cb, &block->flow_block.cb_list, list) {
                err = block_cb->cb(type, type_data, block_cb->cb_priv);
                if (err) {
                        if (err_stop)
index 691f718..3f7a9c0 100644 (file)
@@ -651,7 +651,7 @@ skip:
        }
 }
 
-static int cls_bpf_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb,
+static int cls_bpf_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
                             void *cb_priv, struct netlink_ext_ack *extack)
 {
        struct cls_bpf_head *head = rtnl_dereference(tp->root);
index 38d6e85..0541237 100644 (file)
@@ -1800,7 +1800,7 @@ fl_get_next_hw_filter(struct tcf_proto *tp, struct cls_fl_filter *f, bool add)
        return NULL;
 }
 
-static int fl_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb,
+static int fl_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
                        void *cb_priv, struct netlink_ext_ack *extack)
 {
        struct tcf_block *block = tp->chain->block;
index a30d2f8..455ea27 100644 (file)
@@ -282,7 +282,7 @@ skip:
        arg->count++;
 }
 
-static int mall_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb,
+static int mall_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
                          void *cb_priv, struct netlink_ext_ack *extack)
 {
        struct cls_mall_head *head = rtnl_dereference(tp->root);
index be9e46c..8614088 100644 (file)
@@ -1152,7 +1152,7 @@ static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg,
 }
 
 static int u32_reoffload_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht,
-                              bool add, tc_setup_cb_t *cb, void *cb_priv,
+                              bool add, flow_setup_cb_t *cb, void *cb_priv,
                               struct netlink_ext_ack *extack)
 {
        struct tc_cls_u32_offload cls_u32 = {};
@@ -1172,7 +1172,7 @@ static int u32_reoffload_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht,
 }
 
 static int u32_reoffload_knode(struct tcf_proto *tp, struct tc_u_knode *n,
-                              bool add, tc_setup_cb_t *cb, void *cb_priv,
+                              bool add, flow_setup_cb_t *cb, void *cb_priv,
                               struct netlink_ext_ack *extack)
 {
        struct tc_u_hnode *ht = rtnl_dereference(n->ht_down);
@@ -1213,7 +1213,7 @@ static int u32_reoffload_knode(struct tcf_proto *tp, struct tc_u_knode *n,
        return 0;
 }
 
-static int u32_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb,
+static int u32_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
                         void *cb_priv, struct netlink_ext_ack *extack)
 {
        struct tc_u_common *tp_c = tp->data;
index f345662..ca8ac96 100644 (file)
@@ -476,7 +476,7 @@ static void tipc_topsrv_accept(struct work_struct *work)
        }
 }
 
-/* tipc_toprsv_listener_data_ready - interrupt callback with connection request
+/* tipc_topsrv_listener_data_ready - interrupt callback with connection request
  * The queued job is launched into tipc_topsrv_accept()
  */
 static void tipc_topsrv_listener_data_ready(struct sock *sk)