1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2023 Isovalent */
5 #include <linux/bpf_mprog.h>
7 static int bpf_mprog_link(struct bpf_tuple *tuple,
8 u32 id_or_fd, u32 flags,
9 enum bpf_prog_type type)
11 struct bpf_link *link = ERR_PTR(-EINVAL);
12 bool id = flags & BPF_F_ID;
15 link = bpf_link_by_id(id_or_fd);
17 link = bpf_link_get_from_fd(id_or_fd);
20 if (type && link->prog->type != type) {
26 tuple->prog = link->prog;
30 static int bpf_mprog_prog(struct bpf_tuple *tuple,
31 u32 id_or_fd, u32 flags,
32 enum bpf_prog_type type)
34 struct bpf_prog *prog = ERR_PTR(-EINVAL);
35 bool id = flags & BPF_F_ID;
38 prog = bpf_prog_by_id(id_or_fd);
40 prog = bpf_prog_get(id_or_fd);
43 if (type && prog->type != type) {
53 static int bpf_mprog_tuple_relative(struct bpf_tuple *tuple,
54 u32 id_or_fd, u32 flags,
55 enum bpf_prog_type type)
57 bool link = flags & BPF_F_LINK;
58 bool id = flags & BPF_F_ID;
60 memset(tuple, 0, sizeof(*tuple));
62 return bpf_mprog_link(tuple, id_or_fd, flags, type);
63 /* If no relevant flag is set and no id_or_fd was passed, then
64 * tuple link/prog is just NULLed. This is the case when before/
65 * after selects first/last position without passing fd.
69 return bpf_mprog_prog(tuple, id_or_fd, flags, type);
72 static void bpf_mprog_tuple_put(struct bpf_tuple *tuple)
75 bpf_link_put(tuple->link);
77 bpf_prog_put(tuple->prog);
80 /* The bpf_mprog_{replace,delete}() operate on exact idx position with the
81 * one exception that for deletion we support delete from front/back. In
82 * case of front idx is -1, in case of back idx is bpf_mprog_total(entry).
83 * Adjustment to first and last entry is trivial. The bpf_mprog_insert()
84 * we have to deal with the following cases:
88 * Insert P4 before P3: idx for old array is 1, idx for new array is 2,
89 * hence we adjust target idx for the new array, so that memmove copies
90 * P1 and P2 to the new entry, and we insert P4 into idx 2. Inserting
91 * before P1 would have old idx -1 and new idx 0.
93 * +--+--+--+ +--+--+--+--+ +--+--+--+--+
94 * |P1|P2|P3| ==> |P1|P2| |P3| ==> |P1|P2|P4|P3|
95 * +--+--+--+ +--+--+--+--+ +--+--+--+--+
99 * Insert P4 after P2: idx for old array is 2, idx for new array is 2.
100 * Again, memmove copies P1 and P2 to the new entry, and we insert P4
101 * into idx 2. Inserting after P3 would have both old/new idx at 4 aka
102 * bpf_mprog_total(entry).
104 * +--+--+--+ +--+--+--+--+ +--+--+--+--+
105 * |P1|P2|P3| ==> |P1|P2| |P3| ==> |P1|P2|P4|P3|
106 * +--+--+--+ +--+--+--+--+ +--+--+--+--+
108 static int bpf_mprog_replace(struct bpf_mprog_entry *entry,
109 struct bpf_mprog_entry **entry_new,
110 struct bpf_tuple *ntuple, int idx)
112 struct bpf_mprog_fp *fp;
113 struct bpf_mprog_cp *cp;
114 struct bpf_prog *oprog;
116 bpf_mprog_read(entry, idx, &fp, &cp);
117 oprog = READ_ONCE(fp->prog);
118 bpf_mprog_write(fp, cp, ntuple);
120 WARN_ON_ONCE(cp->link);
127 static int bpf_mprog_insert(struct bpf_mprog_entry *entry,
128 struct bpf_mprog_entry **entry_new,
129 struct bpf_tuple *ntuple, int idx, u32 flags)
131 int total = bpf_mprog_total(entry);
132 struct bpf_mprog_entry *peer;
133 struct bpf_mprog_fp *fp;
134 struct bpf_mprog_cp *cp;
136 peer = bpf_mprog_peer(entry);
137 bpf_mprog_entry_copy(peer, entry);
140 else if (flags & BPF_F_BEFORE)
142 bpf_mprog_entry_grow(peer, idx);
144 bpf_mprog_read(peer, idx, &fp, &cp);
145 bpf_mprog_write(fp, cp, ntuple);
151 static int bpf_mprog_delete(struct bpf_mprog_entry *entry,
152 struct bpf_mprog_entry **entry_new,
153 struct bpf_tuple *dtuple, int idx)
155 int total = bpf_mprog_total(entry);
156 struct bpf_mprog_entry *peer;
158 peer = bpf_mprog_peer(entry);
159 bpf_mprog_entry_copy(peer, entry);
162 else if (idx == total)
164 bpf_mprog_entry_shrink(peer, idx);
166 bpf_mprog_mark_for_release(peer, dtuple);
171 /* In bpf_mprog_pos_*() we evaluate the target position for the BPF
172 * program/link that needs to be replaced, inserted or deleted for
173 * each "rule" independently. If all rules agree on that position
174 * or existing element, then enact replacement, addition or deletion.
175 * If this is not the case, then the request cannot be satisfied and
176 * we bail out with an error.
178 static int bpf_mprog_pos_exact(struct bpf_mprog_entry *entry,
179 struct bpf_tuple *tuple)
181 struct bpf_mprog_fp *fp;
182 struct bpf_mprog_cp *cp;
185 for (i = 0; i < bpf_mprog_total(entry); i++) {
186 bpf_mprog_read(entry, i, &fp, &cp);
187 if (tuple->prog == READ_ONCE(fp->prog))
188 return tuple->link == cp->link ? i : -EBUSY;
193 static int bpf_mprog_pos_before(struct bpf_mprog_entry *entry,
194 struct bpf_tuple *tuple)
196 struct bpf_mprog_fp *fp;
197 struct bpf_mprog_cp *cp;
200 for (i = 0; i < bpf_mprog_total(entry); i++) {
201 bpf_mprog_read(entry, i, &fp, &cp);
202 if (tuple->prog == READ_ONCE(fp->prog) &&
203 (!tuple->link || tuple->link == cp->link))
206 return tuple->prog ? -ENOENT : -1;
209 static int bpf_mprog_pos_after(struct bpf_mprog_entry *entry,
210 struct bpf_tuple *tuple)
212 struct bpf_mprog_fp *fp;
213 struct bpf_mprog_cp *cp;
216 for (i = 0; i < bpf_mprog_total(entry); i++) {
217 bpf_mprog_read(entry, i, &fp, &cp);
218 if (tuple->prog == READ_ONCE(fp->prog) &&
219 (!tuple->link || tuple->link == cp->link))
222 return tuple->prog ? -ENOENT : bpf_mprog_total(entry);
225 int bpf_mprog_attach(struct bpf_mprog_entry *entry,
226 struct bpf_mprog_entry **entry_new,
227 struct bpf_prog *prog_new, struct bpf_link *link,
228 struct bpf_prog *prog_old,
229 u32 flags, u32 id_or_fd, u64 revision)
231 struct bpf_tuple rtuple, ntuple = {
238 int ret, idx = -ERANGE, tidx;
240 if (revision && revision != bpf_mprog_revision(entry))
242 if (bpf_mprog_exists(entry, prog_new))
244 ret = bpf_mprog_tuple_relative(&rtuple, id_or_fd,
245 flags & ~BPF_F_REPLACE,
249 if (flags & BPF_F_REPLACE) {
250 tidx = bpf_mprog_pos_exact(entry, &otuple);
257 if (flags & BPF_F_BEFORE) {
258 tidx = bpf_mprog_pos_before(entry, &rtuple);
259 if (tidx < -1 || (idx >= -1 && tidx != idx)) {
260 ret = tidx < -1 ? tidx : -ERANGE;
265 if (flags & BPF_F_AFTER) {
266 tidx = bpf_mprog_pos_after(entry, &rtuple);
267 if (tidx < -1 || (idx >= -1 && tidx != idx)) {
268 ret = tidx < 0 ? tidx : -ERANGE;
274 if (rtuple.prog || flags) {
278 idx = bpf_mprog_total(entry);
281 if (idx >= bpf_mprog_max()) {
285 if (flags & BPF_F_REPLACE)
286 ret = bpf_mprog_replace(entry, entry_new, &ntuple, idx);
288 ret = bpf_mprog_insert(entry, entry_new, &ntuple, idx, flags);
290 bpf_mprog_tuple_put(&rtuple);
294 static int bpf_mprog_fetch(struct bpf_mprog_entry *entry,
295 struct bpf_tuple *tuple, int idx)
297 int total = bpf_mprog_total(entry);
298 struct bpf_mprog_cp *cp;
299 struct bpf_mprog_fp *fp;
300 struct bpf_prog *prog;
301 struct bpf_link *link;
305 else if (idx == total)
307 bpf_mprog_read(entry, idx, &fp, &cp);
308 prog = READ_ONCE(fp->prog);
310 /* The deletion request can either be without filled tuple in which
311 * case it gets populated here based on idx, or with filled tuple
312 * where the only thing we end up doing is the WARN_ON_ONCE() assert.
313 * If we hit a BPF link at the given index, it must not be removed
316 if (link && !tuple->link)
318 WARN_ON_ONCE(tuple->prog && tuple->prog != prog);
319 WARN_ON_ONCE(tuple->link && tuple->link != link);
325 int bpf_mprog_detach(struct bpf_mprog_entry *entry,
326 struct bpf_mprog_entry **entry_new,
327 struct bpf_prog *prog, struct bpf_link *link,
328 u32 flags, u32 id_or_fd, u64 revision)
330 struct bpf_tuple rtuple, dtuple = {
334 int ret, idx = -ERANGE, tidx;
336 if (flags & BPF_F_REPLACE)
338 if (revision && revision != bpf_mprog_revision(entry))
340 if (!bpf_mprog_total(entry))
342 ret = bpf_mprog_tuple_relative(&rtuple, id_or_fd, flags,
344 BPF_PROG_TYPE_UNSPEC);
348 tidx = bpf_mprog_pos_exact(entry, &dtuple);
355 if (flags & BPF_F_BEFORE) {
356 tidx = bpf_mprog_pos_before(entry, &rtuple);
357 if (tidx < -1 || (idx >= -1 && tidx != idx)) {
358 ret = tidx < -1 ? tidx : -ERANGE;
363 if (flags & BPF_F_AFTER) {
364 tidx = bpf_mprog_pos_after(entry, &rtuple);
365 if (tidx < -1 || (idx >= -1 && tidx != idx)) {
366 ret = tidx < 0 ? tidx : -ERANGE;
372 if (rtuple.prog || flags) {
376 idx = bpf_mprog_total(entry);
379 if (idx >= bpf_mprog_max()) {
383 ret = bpf_mprog_fetch(entry, &dtuple, idx);
386 ret = bpf_mprog_delete(entry, entry_new, &dtuple, idx);
388 bpf_mprog_tuple_put(&rtuple);
392 int bpf_mprog_query(const union bpf_attr *attr, union bpf_attr __user *uattr,
393 struct bpf_mprog_entry *entry)
395 u32 __user *uprog_flags, *ulink_flags;
396 u32 __user *uprog_id, *ulink_id;
397 struct bpf_mprog_fp *fp;
398 struct bpf_mprog_cp *cp;
399 struct bpf_prog *prog;
405 if (attr->query.query_flags || attr->query.attach_flags)
407 revision = bpf_mprog_revision(entry);
408 count = bpf_mprog_total(entry);
409 if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags)))
411 if (copy_to_user(&uattr->query.revision, &revision, sizeof(revision)))
413 if (copy_to_user(&uattr->query.count, &count, sizeof(count)))
415 uprog_id = u64_to_user_ptr(attr->query.prog_ids);
416 uprog_flags = u64_to_user_ptr(attr->query.prog_attach_flags);
417 ulink_id = u64_to_user_ptr(attr->query.link_ids);
418 ulink_flags = u64_to_user_ptr(attr->query.link_attach_flags);
419 if (attr->query.count == 0 || !uprog_id || !count)
421 if (attr->query.count < count) {
422 count = attr->query.count;
425 for (i = 0; i < bpf_mprog_max(); i++) {
426 bpf_mprog_read(entry, i, &fp, &cp);
427 prog = READ_ONCE(fp->prog);
431 if (copy_to_user(uprog_id + i, &id, sizeof(id)))
434 copy_to_user(uprog_flags + i, &flags, sizeof(flags)))
436 id = cp->link ? cp->link->id : 0;
438 copy_to_user(ulink_id + i, &id, sizeof(id)))
441 copy_to_user(ulink_flags + i, &flags, sizeof(flags)))