1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 Facebook */
4 #include <linux/init.h>
5 #include <linux/types.h>
6 #include <linux/bpf_verifier.h>
9 #include <linux/btf_ids.h>
10 #include <linux/filter.h>
12 #include <net/bpf_sk_storage.h>
14 /* "extern" is to avoid sparse warning. It is only used in bpf_struct_ops.c. */
15 extern struct bpf_struct_ops bpf_tcp_congestion_ops;
17 static u32 unsupported_ops[] = {
18 offsetof(struct tcp_congestion_ops, get_info),
21 static const struct btf_type *tcp_sock_type;
22 static u32 tcp_sock_id, sock_id;
24 static int bpf_tcp_ca_init(struct btf *btf)
28 type_id = btf_find_by_name_kind(btf, "sock", BTF_KIND_STRUCT);
33 type_id = btf_find_by_name_kind(btf, "tcp_sock", BTF_KIND_STRUCT);
36 tcp_sock_id = type_id;
37 tcp_sock_type = btf_type_by_id(btf, tcp_sock_id);
42 static bool is_unsupported(u32 member_offset)
46 for (i = 0; i < ARRAY_SIZE(unsupported_ops); i++) {
47 if (member_offset == unsupported_ops[i])
54 static bool bpf_tcp_ca_is_valid_access(int off, int size,
55 enum bpf_access_type type,
56 const struct bpf_prog *prog,
57 struct bpf_insn_access_aux *info)
59 if (!bpf_tracing_btf_ctx_access(off, size, type, prog, info))
62 if (base_type(info->reg_type) == PTR_TO_BTF_ID &&
63 !bpf_type_has_unsafe_modifiers(info->reg_type) &&
64 info->btf_id == sock_id)
65 /* promote it to tcp_sock */
66 info->btf_id = tcp_sock_id;
71 static int bpf_tcp_ca_btf_struct_access(struct bpf_verifier_log *log,
72 const struct bpf_reg_state *reg,
75 const struct btf_type *t;
78 t = btf_type_by_id(reg->btf, reg->btf_id);
79 if (t != tcp_sock_type) {
80 bpf_log(log, "only read is supported\n");
85 case offsetof(struct sock, sk_pacing_rate):
86 end = offsetofend(struct sock, sk_pacing_rate);
88 case offsetof(struct sock, sk_pacing_status):
89 end = offsetofend(struct sock, sk_pacing_status);
91 case bpf_ctx_range(struct inet_connection_sock, icsk_ca_priv):
92 end = offsetofend(struct inet_connection_sock, icsk_ca_priv);
94 case offsetof(struct inet_connection_sock, icsk_ack.pending):
95 end = offsetofend(struct inet_connection_sock,
98 case offsetof(struct tcp_sock, snd_cwnd):
99 end = offsetofend(struct tcp_sock, snd_cwnd);
101 case offsetof(struct tcp_sock, snd_cwnd_cnt):
102 end = offsetofend(struct tcp_sock, snd_cwnd_cnt);
104 case offsetof(struct tcp_sock, snd_ssthresh):
105 end = offsetofend(struct tcp_sock, snd_ssthresh);
107 case offsetof(struct tcp_sock, ecn_flags):
108 end = offsetofend(struct tcp_sock, ecn_flags);
110 case offsetof(struct tcp_sock, app_limited):
111 end = offsetofend(struct tcp_sock, app_limited);
114 bpf_log(log, "no write support to tcp_sock at off %d\n", off);
118 if (off + size > end) {
120 "write access at off %d with size %d beyond the member of tcp_sock ended at %zu\n",
128 BPF_CALL_2(bpf_tcp_send_ack, struct tcp_sock *, tp, u32, rcv_nxt)
130 /* bpf_tcp_ca prog cannot have NULL tp */
131 __tcp_send_ack((struct sock *)tp, rcv_nxt);
135 static const struct bpf_func_proto bpf_tcp_send_ack_proto = {
136 .func = bpf_tcp_send_ack,
138 /* In case we want to report error later */
139 .ret_type = RET_INTEGER,
140 .arg1_type = ARG_PTR_TO_BTF_ID,
141 .arg1_btf_id = &tcp_sock_id,
142 .arg2_type = ARG_ANYTHING,
145 static u32 prog_ops_moff(const struct bpf_prog *prog)
147 const struct btf_member *m;
148 const struct btf_type *t;
151 midx = prog->expected_attach_type;
152 t = bpf_tcp_congestion_ops.type;
153 m = &btf_type_member(t)[midx];
155 return __btf_member_bit_offset(t, m) / 8;
158 static const struct bpf_func_proto *
159 bpf_tcp_ca_get_func_proto(enum bpf_func_id func_id,
160 const struct bpf_prog *prog)
163 case BPF_FUNC_tcp_send_ack:
164 return &bpf_tcp_send_ack_proto;
165 case BPF_FUNC_sk_storage_get:
166 return &bpf_sk_storage_get_proto;
167 case BPF_FUNC_sk_storage_delete:
168 return &bpf_sk_storage_delete_proto;
169 case BPF_FUNC_setsockopt:
170 /* Does not allow release() to call setsockopt.
171 * release() is called when the current bpf-tcp-cc
172 * is retiring. It is not allowed to call
173 * setsockopt() to make further changes which
174 * may potentially allocate new resources.
176 if (prog_ops_moff(prog) !=
177 offsetof(struct tcp_congestion_ops, release))
178 return &bpf_sk_setsockopt_proto;
180 case BPF_FUNC_getsockopt:
181 /* Since get/setsockopt is usually expected to
182 * be available together, disable getsockopt for
183 * release also to avoid usage surprise.
184 * The bpf-tcp-cc already has a more powerful way
185 * to read tcp_sock from the PTR_TO_BTF_ID.
187 if (prog_ops_moff(prog) !=
188 offsetof(struct tcp_congestion_ops, release))
189 return &bpf_sk_getsockopt_proto;
191 case BPF_FUNC_ktime_get_coarse_ns:
192 return &bpf_ktime_get_coarse_ns_proto;
194 return bpf_base_func_proto(func_id);
198 BTF_SET8_START(bpf_tcp_ca_check_kfunc_ids)
199 BTF_ID_FLAGS(func, tcp_reno_ssthresh)
200 BTF_ID_FLAGS(func, tcp_reno_cong_avoid)
201 BTF_ID_FLAGS(func, tcp_reno_undo_cwnd)
202 BTF_ID_FLAGS(func, tcp_slow_start)
203 BTF_ID_FLAGS(func, tcp_cong_avoid_ai)
204 BTF_SET8_END(bpf_tcp_ca_check_kfunc_ids)
206 static const struct btf_kfunc_id_set bpf_tcp_ca_kfunc_set = {
207 .owner = THIS_MODULE,
208 .set = &bpf_tcp_ca_check_kfunc_ids,
211 static const struct bpf_verifier_ops bpf_tcp_ca_verifier_ops = {
212 .get_func_proto = bpf_tcp_ca_get_func_proto,
213 .is_valid_access = bpf_tcp_ca_is_valid_access,
214 .btf_struct_access = bpf_tcp_ca_btf_struct_access,
217 static int bpf_tcp_ca_init_member(const struct btf_type *t,
218 const struct btf_member *member,
219 void *kdata, const void *udata)
221 const struct tcp_congestion_ops *utcp_ca;
222 struct tcp_congestion_ops *tcp_ca;
225 utcp_ca = (const struct tcp_congestion_ops *)udata;
226 tcp_ca = (struct tcp_congestion_ops *)kdata;
228 moff = __btf_member_bit_offset(t, member) / 8;
230 case offsetof(struct tcp_congestion_ops, flags):
231 if (utcp_ca->flags & ~TCP_CONG_MASK)
233 tcp_ca->flags = utcp_ca->flags;
235 case offsetof(struct tcp_congestion_ops, name):
236 if (bpf_obj_name_cpy(tcp_ca->name, utcp_ca->name,
237 sizeof(tcp_ca->name)) <= 0)
245 static int bpf_tcp_ca_check_member(const struct btf_type *t,
246 const struct btf_member *member,
247 const struct bpf_prog *prog)
249 if (is_unsupported(__btf_member_bit_offset(t, member) / 8))
254 static int bpf_tcp_ca_reg(void *kdata)
256 return tcp_register_congestion_control(kdata);
259 static void bpf_tcp_ca_unreg(void *kdata)
261 tcp_unregister_congestion_control(kdata);
264 static int bpf_tcp_ca_update(void *kdata, void *old_kdata)
266 return tcp_update_congestion_control(kdata, old_kdata);
269 static int bpf_tcp_ca_validate(void *kdata)
271 return tcp_validate_congestion_control(kdata);
274 struct bpf_struct_ops bpf_tcp_congestion_ops = {
275 .verifier_ops = &bpf_tcp_ca_verifier_ops,
276 .reg = bpf_tcp_ca_reg,
277 .unreg = bpf_tcp_ca_unreg,
278 .update = bpf_tcp_ca_update,
279 .check_member = bpf_tcp_ca_check_member,
280 .init_member = bpf_tcp_ca_init_member,
281 .init = bpf_tcp_ca_init,
282 .validate = bpf_tcp_ca_validate,
283 .name = "tcp_congestion_ops",
286 static int __init bpf_tcp_ca_kfunc_init(void)
288 return register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &bpf_tcp_ca_kfunc_set);
290 late_initcall(bpf_tcp_ca_kfunc_init);