Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
authorDavid S. Miller <davem@davemloft.net>
Thu, 2 Nov 2017 05:59:52 +0000 (14:59 +0900)
committerDavid S. Miller <davem@davemloft.net>
Thu, 2 Nov 2017 06:23:39 +0000 (15:23 +0900)
Smooth Cong Wang's bug fix into 'net-next'.  Basically put
the bulk of the tcf_block_put() logic from 'net' into
tcf_block_put_ext(), but after the offload unbind.

Signed-off-by: David S. Miller <davem@davemloft.net>
14 files changed:
1  2 
MAINTAINERS
drivers/net/ethernet/hisilicon/hns/hns_enet.c
drivers/net/ethernet/mellanox/mlxsw/reg.h
drivers/net/tun.c
include/net/tcp.h
include/uapi/linux/bpf.h
kernel/bpf/sockmap.c
net/ipv4/tcp_output.c
net/ipv6/addrconf.c
net/l2tp/l2tp_ppp.c
net/sched/cls_api.c
net/xfrm/xfrm_policy.c
tools/include/uapi/linux/bpf.h
tools/testing/selftests/tc-testing/tdc.py

diff --cc MAINTAINERS
Simple merge
Simple merge
index a2510cdef4b56b157934173088b922f0b1283c4a,e6d0002a1b0bc5f28c331a760823c8dc92f8fe24..c2bf2a822b109eb63d8acb754a696365d918fb46
@@@ -1733,13 -1768,11 +1733,13 @@@ static inline struct sk_buff *tcp_highe
  
  static inline void tcp_highest_sack_reset(struct sock *sk)
  {
 -      tcp_sk(sk)->highest_sack = tcp_write_queue_head(sk);
 +      struct sk_buff *skb = tcp_rtx_queue_head(sk);
 +
 +      tcp_sk(sk)->highest_sack = skb ?: tcp_send_head(sk);
  }
  
- /* Called when old skb is about to be deleted (to be combined with new skb) */
- static inline void tcp_highest_sack_combine(struct sock *sk,
+ /* Called when old skb is about to be deleted and replaced by new skb */
+ static inline void tcp_highest_sack_replace(struct sock *sk,
                                            struct sk_buff *old,
                                            struct sk_buff *new)
  {
Simple merge
Simple merge
index a69a34f57330f46a6943191468fe4a7d41475a88,823003eef3a21a5cc5c27e0be9f46159afa060df..a85e8a282d173983e35a2a1e3135ca2a63f1699e
@@@ -2735,8 -2666,10 +2736,8 @@@ static bool tcp_collapse_retrans(struc
                else if (!skb_shift(skb, next_skb, next_skb_size))
                        return false;
        }
-       tcp_highest_sack_combine(sk, next_skb, skb);
+       tcp_highest_sack_replace(sk, next_skb, skb);
  
 -      tcp_unlink_write_queue(next_skb, sk);
 -
        if (next_skb->ip_summed == CHECKSUM_PARTIAL)
                skb->ip_summed = CHECKSUM_PARTIAL;
  
Simple merge
Simple merge
index d9d54b367d232a41bb413c389051986ddd15e59d,b2d31074548724a3b59defbcd827d6538ac9bea6..2c03fcbc7188c6b42cb770764cef94d12a0b519f
@@@ -331,47 -289,22 +331,27 @@@ static void tcf_block_put_final(struct 
  }
  
  /* XXX: Standalone actions are not allowed to jump to any chain, and bound
-  * actions should be all removed after flushing. However, filters are destroyed
-  * in RCU callbacks, we have to hold the chains first, otherwise we would
-  * always race with RCU callbacks on this list without proper locking.
+  * actions should be all removed after flushing. However, filters are now
+  * destroyed in tc filter workqueue with RTNL lock, they can not race here.
   */
- static void tcf_block_put_deferred(struct work_struct *work)
- {
-       struct tcf_block *block = container_of(work, struct tcf_block, work);
-       struct tcf_chain *chain;
-       rtnl_lock();
-       /* Hold a refcnt for all chains, except 0, in case they are gone. */
-       list_for_each_entry(chain, &block->chain_list, list)
-               if (chain->index)
-                       tcf_chain_hold(chain);
-       /* No race on the list, because no chain could be destroyed. */
-       list_for_each_entry(chain, &block->chain_list, list)
-               tcf_chain_flush(chain);
-       INIT_WORK(&block->work, tcf_block_put_final);
-       /* Wait for RCU callbacks to release the reference count and make
-        * sure their works have been queued before this.
-        */
-       rcu_barrier();
-       tcf_queue_work(&block->work);
-       rtnl_unlock();
- }
 -void tcf_block_put(struct tcf_block *block)
 +void tcf_block_put_ext(struct tcf_block *block,
 +                     struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
 +                     struct tcf_block_ext_info *ei)
  {
+       struct tcf_chain *chain, *tmp;
        if (!block)
                return;
  
-       INIT_WORK(&block->work, tcf_block_put_deferred);
 +      tcf_block_offload_unbind(block, q, ei);
 +
 -      /* Wait for RCU callbacks to release the reference count and make
 -       * sure their works have been queued before this.
+       list_for_each_entry_safe(chain, tmp, &block->chain_list, list)
+               tcf_chain_flush(chain);
+       INIT_WORK(&block->work, tcf_block_put_final);
 +      /* Wait for existing RCU callbacks to cool down, make sure their works
 +       * have been queued before this. We can not flush pending works here
 +       * because we are holding the RTNL lock.
         */
        rcu_barrier();
        tcf_queue_work(&block->work);
Simple merge
Simple merge