mptcp: add mptcp_for_each_subflow_safe helper
authorMatthieu Baerts <matthieu.baerts@tessares.net>
Tue, 6 Sep 2022 20:55:39 +0000 (22:55 +0200)
committerPaolo Abeni <pabeni@redhat.com>
Thu, 15 Sep 2022 10:01:02 +0000 (12:01 +0200)
Similar to mptcp_for_each_subflow(): this is clearer now that the _safe
version is used in multiple places.

Reviewed-by: Mat Martineau <mathew.j.martineau@linux.intel.com>
Signed-off-by: Matthieu Baerts <matthieu.baerts@tessares.net>
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
net/mptcp/pm_netlink.c
net/mptcp/protocol.c
net/mptcp/protocol.h

index a3e4ee7..5e142c0 100644 (file)
@@ -796,7 +796,7 @@ static void mptcp_pm_nl_rm_addr_or_subflow(struct mptcp_sock *msk,
                u8 rm_id = rm_list->ids[i];
                bool removed = false;
 
-               list_for_each_entry_safe(subflow, tmp, &msk->conn_list, node) {
+               mptcp_for_each_subflow_safe(msk, subflow, tmp) {
                        struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
                        int how = RCV_SHUTDOWN | SEND_SHUTDOWN;
                        u8 id = subflow->local_id;
index d398f38..fc782d6 100644 (file)
@@ -2357,7 +2357,7 @@ static void __mptcp_close_subflow(struct mptcp_sock *msk)
 
        might_sleep();
 
-       list_for_each_entry_safe(subflow, tmp, &msk->conn_list, node) {
+       mptcp_for_each_subflow_safe(msk, subflow, tmp) {
                struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
 
                if (inet_sk_state_load(ssk) != TCP_CLOSE)
@@ -2400,7 +2400,7 @@ static void mptcp_check_fastclose(struct mptcp_sock *msk)
 
        mptcp_token_destroy(msk);
 
-       list_for_each_entry_safe(subflow, tmp, &msk->conn_list, node) {
+       mptcp_for_each_subflow_safe(msk, subflow, tmp) {
                struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow);
                bool slow;
 
@@ -3047,7 +3047,7 @@ void mptcp_destroy_common(struct mptcp_sock *msk, unsigned int flags)
        __mptcp_clear_xmit(sk);
 
        /* join list will be eventually flushed (with rst) at sock lock release time */
-       list_for_each_entry_safe(subflow, tmp, &msk->conn_list, node)
+       mptcp_for_each_subflow_safe(msk, subflow, tmp)
                __mptcp_close_ssk(sk, mptcp_subflow_tcp_sock(subflow), subflow, flags);
 
        /* move to sk_receive_queue, sk_stream_kill_queues will purge it */
index 132d508..c1b1231 100644 (file)
@@ -314,6 +314,8 @@ struct mptcp_sock {
 
 #define mptcp_for_each_subflow(__msk, __subflow)                       \
        list_for_each_entry(__subflow, &((__msk)->conn_list), node)
+#define mptcp_for_each_subflow_safe(__msk, __subflow, __tmp)                   \
+       list_for_each_entry_safe(__subflow, __tmp, &((__msk)->conn_list), node)
 
 static inline void msk_owned_by_me(const struct mptcp_sock *msk)
 {