static inline void tcp_highest_sack_reset(struct sock *sk)
{
- tcp_sk(sk)->highest_sack = tcp_write_queue_head(sk);
+ struct sk_buff *skb = tcp_rtx_queue_head(sk);
+
+ tcp_sk(sk)->highest_sack = skb ?: tcp_send_head(sk);
}
- /* Called when old skb is about to be deleted (to be combined with new skb) */
- static inline void tcp_highest_sack_combine(struct sock *sk,
+ /* Called when old skb is about to be deleted and replaced by new skb */
+ static inline void tcp_highest_sack_replace(struct sock *sk,
struct sk_buff *old,
struct sk_buff *new)
{
}
/* XXX: Standalone actions are not allowed to jump to any chain, and bound
- * actions should be all removed after flushing. However, filters are destroyed
- * in RCU callbacks, we have to hold the chains first, otherwise we would
- * always race with RCU callbacks on this list without proper locking.
+ * actions should be all removed after flushing. However, filters are now
+ * destroyed in tc filter workqueue with RTNL lock, they can not race here.
*/
- static void tcf_block_put_deferred(struct work_struct *work)
- {
- struct tcf_block *block = container_of(work, struct tcf_block, work);
- struct tcf_chain *chain;
-
- rtnl_lock();
- /* Hold a refcnt for all chains, except 0, in case they are gone. */
- list_for_each_entry(chain, &block->chain_list, list)
- if (chain->index)
- tcf_chain_hold(chain);
-
- /* No race on the list, because no chain could be destroyed. */
- list_for_each_entry(chain, &block->chain_list, list)
- tcf_chain_flush(chain);
-
- INIT_WORK(&block->work, tcf_block_put_final);
- /* Wait for RCU callbacks to release the reference count and make
- * sure their works have been queued before this.
- */
- rcu_barrier();
- tcf_queue_work(&block->work);
- rtnl_unlock();
- }
-
-void tcf_block_put(struct tcf_block *block)
+void tcf_block_put_ext(struct tcf_block *block,
+ struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
+ struct tcf_block_ext_info *ei)
{
+ struct tcf_chain *chain, *tmp;
+
if (!block)
return;
- INIT_WORK(&block->work, tcf_block_put_deferred);
+ tcf_block_offload_unbind(block, q, ei);
+
- /* Wait for RCU callbacks to release the reference count and make
- * sure their works have been queued before this.
+ list_for_each_entry_safe(chain, tmp, &block->chain_list, list)
+ tcf_chain_flush(chain);
+
+ INIT_WORK(&block->work, tcf_block_put_final);
+ /* Wait for existing RCU callbacks to cool down, make sure their works
+ * have been queued before this. We can not flush pending works here
+ * because we are holding the RTNL lock.
*/
rcu_barrier();
tcf_queue_work(&block->work);