1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (c) 2022, SUSE.
7 #define pr_fmt(fmt) "MPTCP: " fmt
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/list.h>
12 #include <linux/rculist.h>
13 #include <linux/spinlock.h>
16 static DEFINE_SPINLOCK(mptcp_sched_list_lock);
17 static LIST_HEAD(mptcp_sched_list);
19 static int mptcp_sched_default_get_subflow(struct mptcp_sock *msk,
20 struct mptcp_sched_data *data)
24 ssk = data->reinject ? mptcp_subflow_get_retrans(msk) :
25 mptcp_subflow_get_send(msk);
29 mptcp_subflow_set_scheduled(mptcp_subflow_ctx(ssk), true);
33 static struct mptcp_sched_ops mptcp_sched_default = {
34 .get_subflow = mptcp_sched_default_get_subflow,
39 /* Must be called with rcu read lock held */
40 struct mptcp_sched_ops *mptcp_sched_find(const char *name)
42 struct mptcp_sched_ops *sched, *ret = NULL;
44 list_for_each_entry_rcu(sched, &mptcp_sched_list, list) {
45 if (!strcmp(sched->name, name)) {
54 int mptcp_register_scheduler(struct mptcp_sched_ops *sched)
56 if (!sched->get_subflow)
59 spin_lock(&mptcp_sched_list_lock);
60 if (mptcp_sched_find(sched->name)) {
61 spin_unlock(&mptcp_sched_list_lock);
64 list_add_tail_rcu(&sched->list, &mptcp_sched_list);
65 spin_unlock(&mptcp_sched_list_lock);
67 pr_debug("%s registered", sched->name);
71 void mptcp_unregister_scheduler(struct mptcp_sched_ops *sched)
73 if (sched == &mptcp_sched_default)
76 spin_lock(&mptcp_sched_list_lock);
77 list_del_rcu(&sched->list);
78 spin_unlock(&mptcp_sched_list_lock);
81 void mptcp_sched_init(void)
83 mptcp_register_scheduler(&mptcp_sched_default);
86 int mptcp_init_sched(struct mptcp_sock *msk,
87 struct mptcp_sched_ops *sched)
90 sched = &mptcp_sched_default;
92 if (!bpf_try_module_get(sched, sched->owner))
97 msk->sched->init(msk);
99 pr_debug("sched=%s", msk->sched->name);
104 void mptcp_release_sched(struct mptcp_sock *msk)
106 struct mptcp_sched_ops *sched = msk->sched;
115 bpf_module_put(sched, sched->owner);
118 void mptcp_subflow_set_scheduled(struct mptcp_subflow_context *subflow,
121 WRITE_ONCE(subflow->scheduled, scheduled);
124 int mptcp_sched_get_send(struct mptcp_sock *msk)
126 struct mptcp_subflow_context *subflow;
127 struct mptcp_sched_data data;
129 msk_owned_by_me(msk);
131 /* the following check is moved out of mptcp_subflow_get_send */
132 if (__mptcp_check_fallback(msk)) {
134 __tcp_can_send(msk->first) &&
135 sk_stream_memory_free(msk->first)) {
136 mptcp_subflow_set_scheduled(mptcp_subflow_ctx(msk->first), true);
142 mptcp_for_each_subflow(msk, subflow) {
143 if (READ_ONCE(subflow->scheduled))
147 data.reinject = false;
148 if (msk->sched == &mptcp_sched_default || !msk->sched)
149 return mptcp_sched_default_get_subflow(msk, &data);
150 return msk->sched->get_subflow(msk, &data);
153 int mptcp_sched_get_retrans(struct mptcp_sock *msk)
155 struct mptcp_subflow_context *subflow;
156 struct mptcp_sched_data data;
158 msk_owned_by_me(msk);
160 /* the following check is moved out of mptcp_subflow_get_retrans */
161 if (__mptcp_check_fallback(msk))
164 mptcp_for_each_subflow(msk, subflow) {
165 if (READ_ONCE(subflow->scheduled))
169 data.reinject = true;
170 if (msk->sched == &mptcp_sched_default || !msk->sched)
171 return mptcp_sched_default_get_subflow(msk, &data);
172 return msk->sched->get_subflow(msk, &data);