91d8de938a3ddd425124713009f1ff0123349eeb
[platform/kernel/linux-starfive.git] / kernel / bpf / trampoline.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2019 Facebook */
3 #include <linux/hash.h>
4 #include <linux/bpf.h>
5 #include <linux/filter.h>
6 #include <linux/ftrace.h>
7 #include <linux/rbtree_latch.h>
8 #include <linux/perf_event.h>
9 #include <linux/btf.h>
10 #include <linux/rcupdate_trace.h>
11 #include <linux/rcupdate_wait.h>
12 #include <linux/module.h>
13 #include <linux/static_call.h>
14 #include <linux/bpf_verifier.h>
15 #include <linux/bpf_lsm.h>
16 #include <linux/delay.h>
17
18 /* dummy _ops. The verifier will operate on target program's ops. */
19 const struct bpf_verifier_ops bpf_extension_verifier_ops = {
20 };
21 const struct bpf_prog_ops bpf_extension_prog_ops = {
22 };
23
24 /* btf_vmlinux has ~22k attachable functions. 1k htab is enough. */
25 #define TRAMPOLINE_HASH_BITS 10
26 #define TRAMPOLINE_TABLE_SIZE (1 << TRAMPOLINE_HASH_BITS)
27
28 static struct hlist_head trampoline_table[TRAMPOLINE_TABLE_SIZE];
29
30 /* serializes access to trampoline_table */
31 static DEFINE_MUTEX(trampoline_mutex);
32
33 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
34 static int bpf_trampoline_update(struct bpf_trampoline *tr, bool lock_direct_mutex);
35
36 static int bpf_tramp_ftrace_ops_func(struct ftrace_ops *ops, enum ftrace_ops_cmd cmd)
37 {
38         struct bpf_trampoline *tr = ops->private;
39         int ret = 0;
40
41         if (cmd == FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_SELF) {
42                 /* This is called inside register_ftrace_direct_multi(), so
43                  * tr->mutex is already locked.
44                  */
45                 lockdep_assert_held_once(&tr->mutex);
46
47                 /* Instead of updating the trampoline here, we propagate
48                  * -EAGAIN to register_ftrace_direct_multi(). Then we can
49                  * retry register_ftrace_direct_multi() after updating the
50                  * trampoline.
51                  */
52                 if ((tr->flags & BPF_TRAMP_F_CALL_ORIG) &&
53                     !(tr->flags & BPF_TRAMP_F_ORIG_STACK)) {
54                         if (WARN_ON_ONCE(tr->flags & BPF_TRAMP_F_SHARE_IPMODIFY))
55                                 return -EBUSY;
56
57                         tr->flags |= BPF_TRAMP_F_SHARE_IPMODIFY;
58                         return -EAGAIN;
59                 }
60
61                 return 0;
62         }
63
64         /* The normal locking order is
65          *    tr->mutex => direct_mutex (ftrace.c) => ftrace_lock (ftrace.c)
66          *
67          * The following two commands are called from
68          *
69          *   prepare_direct_functions_for_ipmodify
70          *   cleanup_direct_functions_after_ipmodify
71          *
72          * In both cases, direct_mutex is already locked. Use
73          * mutex_trylock(&tr->mutex) to avoid deadlock in race condition
74          * (something else is making changes to this same trampoline).
75          */
76         if (!mutex_trylock(&tr->mutex)) {
77                 /* sleep 1 ms to make sure whatever holding tr->mutex makes
78                  * some progress.
79                  */
80                 msleep(1);
81                 return -EAGAIN;
82         }
83
84         switch (cmd) {
85         case FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_PEER:
86                 tr->flags |= BPF_TRAMP_F_SHARE_IPMODIFY;
87
88                 if ((tr->flags & BPF_TRAMP_F_CALL_ORIG) &&
89                     !(tr->flags & BPF_TRAMP_F_ORIG_STACK))
90                         ret = bpf_trampoline_update(tr, false /* lock_direct_mutex */);
91                 break;
92         case FTRACE_OPS_CMD_DISABLE_SHARE_IPMODIFY_PEER:
93                 tr->flags &= ~BPF_TRAMP_F_SHARE_IPMODIFY;
94
95                 if (tr->flags & BPF_TRAMP_F_ORIG_STACK)
96                         ret = bpf_trampoline_update(tr, false /* lock_direct_mutex */);
97                 break;
98         default:
99                 ret = -EINVAL;
100                 break;
101         }
102
103         mutex_unlock(&tr->mutex);
104         return ret;
105 }
106 #endif
107
108 bool bpf_prog_has_trampoline(const struct bpf_prog *prog)
109 {
110         enum bpf_attach_type eatype = prog->expected_attach_type;
111         enum bpf_prog_type ptype = prog->type;
112
113         return (ptype == BPF_PROG_TYPE_TRACING &&
114                 (eatype == BPF_TRACE_FENTRY || eatype == BPF_TRACE_FEXIT ||
115                  eatype == BPF_MODIFY_RETURN)) ||
116                 (ptype == BPF_PROG_TYPE_LSM && eatype == BPF_LSM_MAC);
117 }
118
119 void bpf_image_ksym_add(void *data, struct bpf_ksym *ksym)
120 {
121         ksym->start = (unsigned long) data;
122         ksym->end = ksym->start + PAGE_SIZE;
123         bpf_ksym_add(ksym);
124         perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_BPF, ksym->start,
125                            PAGE_SIZE, false, ksym->name);
126 }
127
128 void bpf_image_ksym_del(struct bpf_ksym *ksym)
129 {
130         bpf_ksym_del(ksym);
131         perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_BPF, ksym->start,
132                            PAGE_SIZE, true, ksym->name);
133 }
134
135 static struct bpf_trampoline *bpf_trampoline_lookup(u64 key)
136 {
137         struct bpf_trampoline *tr;
138         struct hlist_head *head;
139         int i;
140
141         mutex_lock(&trampoline_mutex);
142         head = &trampoline_table[hash_64(key, TRAMPOLINE_HASH_BITS)];
143         hlist_for_each_entry(tr, head, hlist) {
144                 if (tr->key == key) {
145                         refcount_inc(&tr->refcnt);
146                         goto out;
147                 }
148         }
149         tr = kzalloc(sizeof(*tr), GFP_KERNEL);
150         if (!tr)
151                 goto out;
152 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
153         tr->fops = kzalloc(sizeof(struct ftrace_ops), GFP_KERNEL);
154         if (!tr->fops) {
155                 kfree(tr);
156                 tr = NULL;
157                 goto out;
158         }
159         tr->fops->private = tr;
160         tr->fops->ops_func = bpf_tramp_ftrace_ops_func;
161 #endif
162
163         tr->key = key;
164         INIT_HLIST_NODE(&tr->hlist);
165         hlist_add_head(&tr->hlist, head);
166         refcount_set(&tr->refcnt, 1);
167         mutex_init(&tr->mutex);
168         for (i = 0; i < BPF_TRAMP_MAX; i++)
169                 INIT_HLIST_HEAD(&tr->progs_hlist[i]);
170 out:
171         mutex_unlock(&trampoline_mutex);
172         return tr;
173 }
174
175 static int bpf_trampoline_module_get(struct bpf_trampoline *tr)
176 {
177         struct module *mod;
178         int err = 0;
179
180         preempt_disable();
181         mod = __module_text_address((unsigned long) tr->func.addr);
182         if (mod && !try_module_get(mod))
183                 err = -ENOENT;
184         preempt_enable();
185         tr->mod = mod;
186         return err;
187 }
188
189 static void bpf_trampoline_module_put(struct bpf_trampoline *tr)
190 {
191         module_put(tr->mod);
192         tr->mod = NULL;
193 }
194
195 static int unregister_fentry(struct bpf_trampoline *tr, void *old_addr)
196 {
197         void *ip = tr->func.addr;
198         int ret;
199
200         if (tr->func.ftrace_managed)
201                 ret = unregister_ftrace_direct_multi(tr->fops, (long)old_addr);
202         else
203                 ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, old_addr, NULL);
204
205         if (!ret)
206                 bpf_trampoline_module_put(tr);
207         return ret;
208 }
209
210 static int modify_fentry(struct bpf_trampoline *tr, void *old_addr, void *new_addr,
211                          bool lock_direct_mutex)
212 {
213         void *ip = tr->func.addr;
214         int ret;
215
216         if (tr->func.ftrace_managed) {
217                 if (lock_direct_mutex)
218                         ret = modify_ftrace_direct_multi(tr->fops, (long)new_addr);
219                 else
220                         ret = modify_ftrace_direct_multi_nolock(tr->fops, (long)new_addr);
221         } else {
222                 ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, old_addr, new_addr);
223         }
224         return ret;
225 }
226
227 /* first time registering */
228 static int register_fentry(struct bpf_trampoline *tr, void *new_addr)
229 {
230         void *ip = tr->func.addr;
231         unsigned long faddr;
232         int ret;
233
234         faddr = ftrace_location((unsigned long)ip);
235         if (faddr) {
236                 if (!tr->fops)
237                         return -ENOTSUPP;
238                 tr->func.ftrace_managed = true;
239         }
240
241         if (bpf_trampoline_module_get(tr))
242                 return -ENOENT;
243
244         if (tr->func.ftrace_managed) {
245                 ftrace_set_filter_ip(tr->fops, (unsigned long)ip, 0, 1);
246                 ret = register_ftrace_direct_multi(tr->fops, (long)new_addr);
247         } else {
248                 ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, NULL, new_addr);
249         }
250
251         if (ret)
252                 bpf_trampoline_module_put(tr);
253         return ret;
254 }
255
256 static struct bpf_tramp_links *
257 bpf_trampoline_get_progs(const struct bpf_trampoline *tr, int *total, bool *ip_arg)
258 {
259         struct bpf_tramp_link *link;
260         struct bpf_tramp_links *tlinks;
261         struct bpf_tramp_link **links;
262         int kind;
263
264         *total = 0;
265         tlinks = kcalloc(BPF_TRAMP_MAX, sizeof(*tlinks), GFP_KERNEL);
266         if (!tlinks)
267                 return ERR_PTR(-ENOMEM);
268
269         for (kind = 0; kind < BPF_TRAMP_MAX; kind++) {
270                 tlinks[kind].nr_links = tr->progs_cnt[kind];
271                 *total += tr->progs_cnt[kind];
272                 links = tlinks[kind].links;
273
274                 hlist_for_each_entry(link, &tr->progs_hlist[kind], tramp_hlist) {
275                         *ip_arg |= link->link.prog->call_get_func_ip;
276                         *links++ = link;
277                 }
278         }
279         return tlinks;
280 }
281
282 static void __bpf_tramp_image_put_deferred(struct work_struct *work)
283 {
284         struct bpf_tramp_image *im;
285
286         im = container_of(work, struct bpf_tramp_image, work);
287         bpf_image_ksym_del(&im->ksym);
288         bpf_jit_free_exec(im->image);
289         bpf_jit_uncharge_modmem(PAGE_SIZE);
290         percpu_ref_exit(&im->pcref);
291         kfree_rcu(im, rcu);
292 }
293
294 /* callback, fexit step 3 or fentry step 2 */
295 static void __bpf_tramp_image_put_rcu(struct rcu_head *rcu)
296 {
297         struct bpf_tramp_image *im;
298
299         im = container_of(rcu, struct bpf_tramp_image, rcu);
300         INIT_WORK(&im->work, __bpf_tramp_image_put_deferred);
301         schedule_work(&im->work);
302 }
303
304 /* callback, fexit step 2. Called after percpu_ref_kill confirms. */
305 static void __bpf_tramp_image_release(struct percpu_ref *pcref)
306 {
307         struct bpf_tramp_image *im;
308
309         im = container_of(pcref, struct bpf_tramp_image, pcref);
310         call_rcu_tasks(&im->rcu, __bpf_tramp_image_put_rcu);
311 }
312
313 /* callback, fexit or fentry step 1 */
314 static void __bpf_tramp_image_put_rcu_tasks(struct rcu_head *rcu)
315 {
316         struct bpf_tramp_image *im;
317
318         im = container_of(rcu, struct bpf_tramp_image, rcu);
319         if (im->ip_after_call)
320                 /* the case of fmod_ret/fexit trampoline and CONFIG_PREEMPTION=y */
321                 percpu_ref_kill(&im->pcref);
322         else
323                 /* the case of fentry trampoline */
324                 call_rcu_tasks(&im->rcu, __bpf_tramp_image_put_rcu);
325 }
326
327 static void bpf_tramp_image_put(struct bpf_tramp_image *im)
328 {
329         /* The trampoline image that calls original function is using:
330          * rcu_read_lock_trace to protect sleepable bpf progs
331          * rcu_read_lock to protect normal bpf progs
332          * percpu_ref to protect trampoline itself
333          * rcu tasks to protect trampoline asm not covered by percpu_ref
334          * (which are few asm insns before __bpf_tramp_enter and
335          *  after __bpf_tramp_exit)
336          *
337          * The trampoline is unreachable before bpf_tramp_image_put().
338          *
339          * First, patch the trampoline to avoid calling into fexit progs.
340          * The progs will be freed even if the original function is still
341          * executing or sleeping.
342          * In case of CONFIG_PREEMPT=y use call_rcu_tasks() to wait on
343          * first few asm instructions to execute and call into
344          * __bpf_tramp_enter->percpu_ref_get.
345          * Then use percpu_ref_kill to wait for the trampoline and the original
346          * function to finish.
347          * Then use call_rcu_tasks() to make sure few asm insns in
348          * the trampoline epilogue are done as well.
349          *
350          * In !PREEMPT case the task that got interrupted in the first asm
351          * insns won't go through an RCU quiescent state which the
352          * percpu_ref_kill will be waiting for. Hence the first
353          * call_rcu_tasks() is not necessary.
354          */
355         if (im->ip_after_call) {
356                 int err = bpf_arch_text_poke(im->ip_after_call, BPF_MOD_JUMP,
357                                              NULL, im->ip_epilogue);
358                 WARN_ON(err);
359                 if (IS_ENABLED(CONFIG_PREEMPTION))
360                         call_rcu_tasks(&im->rcu, __bpf_tramp_image_put_rcu_tasks);
361                 else
362                         percpu_ref_kill(&im->pcref);
363                 return;
364         }
365
366         /* The trampoline without fexit and fmod_ret progs doesn't call original
367          * function and doesn't use percpu_ref.
368          * Use call_rcu_tasks_trace() to wait for sleepable progs to finish.
369          * Then use call_rcu_tasks() to wait for the rest of trampoline asm
370          * and normal progs.
371          */
372         call_rcu_tasks_trace(&im->rcu, __bpf_tramp_image_put_rcu_tasks);
373 }
374
375 static struct bpf_tramp_image *bpf_tramp_image_alloc(u64 key)
376 {
377         struct bpf_tramp_image *im;
378         struct bpf_ksym *ksym;
379         void *image;
380         int err = -ENOMEM;
381
382         im = kzalloc(sizeof(*im), GFP_KERNEL);
383         if (!im)
384                 goto out;
385
386         err = bpf_jit_charge_modmem(PAGE_SIZE);
387         if (err)
388                 goto out_free_im;
389
390         err = -ENOMEM;
391         im->image = image = bpf_jit_alloc_exec(PAGE_SIZE);
392         if (!image)
393                 goto out_uncharge;
394         set_vm_flush_reset_perms(image);
395
396         err = percpu_ref_init(&im->pcref, __bpf_tramp_image_release, 0, GFP_KERNEL);
397         if (err)
398                 goto out_free_image;
399
400         ksym = &im->ksym;
401         INIT_LIST_HEAD_RCU(&ksym->lnode);
402         snprintf(ksym->name, KSYM_NAME_LEN, "bpf_trampoline_%llu", key);
403         bpf_image_ksym_add(image, ksym);
404         return im;
405
406 out_free_image:
407         bpf_jit_free_exec(im->image);
408 out_uncharge:
409         bpf_jit_uncharge_modmem(PAGE_SIZE);
410 out_free_im:
411         kfree(im);
412 out:
413         return ERR_PTR(err);
414 }
415
416 static int bpf_trampoline_update(struct bpf_trampoline *tr, bool lock_direct_mutex)
417 {
418         struct bpf_tramp_image *im;
419         struct bpf_tramp_links *tlinks;
420         u32 orig_flags = tr->flags;
421         bool ip_arg = false;
422         int err, total;
423
424         tlinks = bpf_trampoline_get_progs(tr, &total, &ip_arg);
425         if (IS_ERR(tlinks))
426                 return PTR_ERR(tlinks);
427
428         if (total == 0) {
429                 err = unregister_fentry(tr, tr->cur_image->image);
430                 bpf_tramp_image_put(tr->cur_image);
431                 tr->cur_image = NULL;
432                 goto out;
433         }
434
435         im = bpf_tramp_image_alloc(tr->key);
436         if (IS_ERR(im)) {
437                 err = PTR_ERR(im);
438                 goto out;
439         }
440
441         /* clear all bits except SHARE_IPMODIFY */
442         tr->flags &= BPF_TRAMP_F_SHARE_IPMODIFY;
443
444         if (tlinks[BPF_TRAMP_FEXIT].nr_links ||
445             tlinks[BPF_TRAMP_MODIFY_RETURN].nr_links) {
446                 /* NOTE: BPF_TRAMP_F_RESTORE_REGS and BPF_TRAMP_F_SKIP_FRAME
447                  * should not be set together.
448                  */
449                 tr->flags |= BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_SKIP_FRAME;
450         } else {
451                 tr->flags |= BPF_TRAMP_F_RESTORE_REGS;
452         }
453
454         if (ip_arg)
455                 tr->flags |= BPF_TRAMP_F_IP_ARG;
456
457 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
458 again:
459         if ((tr->flags & BPF_TRAMP_F_SHARE_IPMODIFY) &&
460             (tr->flags & BPF_TRAMP_F_CALL_ORIG))
461                 tr->flags |= BPF_TRAMP_F_ORIG_STACK;
462 #endif
463
464         err = arch_prepare_bpf_trampoline(im, im->image, im->image + PAGE_SIZE,
465                                           &tr->func.model, tr->flags, tlinks,
466                                           tr->func.addr);
467         if (err < 0)
468                 goto out;
469
470         set_memory_ro((long)im->image, 1);
471         set_memory_x((long)im->image, 1);
472
473         WARN_ON(tr->cur_image && total == 0);
474         if (tr->cur_image)
475                 /* progs already running at this address */
476                 err = modify_fentry(tr, tr->cur_image->image, im->image, lock_direct_mutex);
477         else
478                 /* first time registering */
479                 err = register_fentry(tr, im->image);
480
481 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
482         if (err == -EAGAIN) {
483                 /* -EAGAIN from bpf_tramp_ftrace_ops_func. Now
484                  * BPF_TRAMP_F_SHARE_IPMODIFY is set, we can generate the
485                  * trampoline again, and retry register.
486                  */
487                 /* reset fops->func and fops->trampoline for re-register */
488                 tr->fops->func = NULL;
489                 tr->fops->trampoline = 0;
490
491                 /* reset im->image memory attr for arch_prepare_bpf_trampoline */
492                 set_memory_nx((long)im->image, 1);
493                 set_memory_rw((long)im->image, 1);
494                 goto again;
495         }
496 #endif
497         if (err)
498                 goto out;
499
500         if (tr->cur_image)
501                 bpf_tramp_image_put(tr->cur_image);
502         tr->cur_image = im;
503 out:
504         /* If any error happens, restore previous flags */
505         if (err)
506                 tr->flags = orig_flags;
507         kfree(tlinks);
508         return err;
509 }
510
511 static enum bpf_tramp_prog_type bpf_attach_type_to_tramp(struct bpf_prog *prog)
512 {
513         switch (prog->expected_attach_type) {
514         case BPF_TRACE_FENTRY:
515                 return BPF_TRAMP_FENTRY;
516         case BPF_MODIFY_RETURN:
517                 return BPF_TRAMP_MODIFY_RETURN;
518         case BPF_TRACE_FEXIT:
519                 return BPF_TRAMP_FEXIT;
520         case BPF_LSM_MAC:
521                 if (!prog->aux->attach_func_proto->type)
522                         /* The function returns void, we cannot modify its
523                          * return value.
524                          */
525                         return BPF_TRAMP_FEXIT;
526                 else
527                         return BPF_TRAMP_MODIFY_RETURN;
528         default:
529                 return BPF_TRAMP_REPLACE;
530         }
531 }
532
533 static int __bpf_trampoline_link_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr)
534 {
535         enum bpf_tramp_prog_type kind;
536         struct bpf_tramp_link *link_exiting;
537         int err = 0;
538         int cnt = 0, i;
539
540         kind = bpf_attach_type_to_tramp(link->link.prog);
541         if (tr->extension_prog)
542                 /* cannot attach fentry/fexit if extension prog is attached.
543                  * cannot overwrite extension prog either.
544                  */
545                 return -EBUSY;
546
547         for (i = 0; i < BPF_TRAMP_MAX; i++)
548                 cnt += tr->progs_cnt[i];
549
550         if (kind == BPF_TRAMP_REPLACE) {
551                 /* Cannot attach extension if fentry/fexit are in use. */
552                 if (cnt)
553                         return -EBUSY;
554                 tr->extension_prog = link->link.prog;
555                 return bpf_arch_text_poke(tr->func.addr, BPF_MOD_JUMP, NULL,
556                                           link->link.prog->bpf_func);
557         }
558         if (cnt >= BPF_MAX_TRAMP_LINKS)
559                 return -E2BIG;
560         if (!hlist_unhashed(&link->tramp_hlist))
561                 /* prog already linked */
562                 return -EBUSY;
563         hlist_for_each_entry(link_exiting, &tr->progs_hlist[kind], tramp_hlist) {
564                 if (link_exiting->link.prog != link->link.prog)
565                         continue;
566                 /* prog already linked */
567                 return -EBUSY;
568         }
569
570         hlist_add_head(&link->tramp_hlist, &tr->progs_hlist[kind]);
571         tr->progs_cnt[kind]++;
572         err = bpf_trampoline_update(tr, true /* lock_direct_mutex */);
573         if (err) {
574                 hlist_del_init(&link->tramp_hlist);
575                 tr->progs_cnt[kind]--;
576         }
577         return err;
578 }
579
580 int bpf_trampoline_link_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr)
581 {
582         int err;
583
584         mutex_lock(&tr->mutex);
585         err = __bpf_trampoline_link_prog(link, tr);
586         mutex_unlock(&tr->mutex);
587         return err;
588 }
589
590 static int __bpf_trampoline_unlink_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr)
591 {
592         enum bpf_tramp_prog_type kind;
593         int err;
594
595         kind = bpf_attach_type_to_tramp(link->link.prog);
596         if (kind == BPF_TRAMP_REPLACE) {
597                 WARN_ON_ONCE(!tr->extension_prog);
598                 err = bpf_arch_text_poke(tr->func.addr, BPF_MOD_JUMP,
599                                          tr->extension_prog->bpf_func, NULL);
600                 tr->extension_prog = NULL;
601                 return err;
602         }
603         hlist_del_init(&link->tramp_hlist);
604         tr->progs_cnt[kind]--;
605         return bpf_trampoline_update(tr, true /* lock_direct_mutex */);
606 }
607
608 /* bpf_trampoline_unlink_prog() should never fail. */
609 int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr)
610 {
611         int err;
612
613         mutex_lock(&tr->mutex);
614         err = __bpf_trampoline_unlink_prog(link, tr);
615         mutex_unlock(&tr->mutex);
616         return err;
617 }
618
619 #if defined(CONFIG_CGROUP_BPF) && defined(CONFIG_BPF_LSM)
620 static void bpf_shim_tramp_link_release(struct bpf_link *link)
621 {
622         struct bpf_shim_tramp_link *shim_link =
623                 container_of(link, struct bpf_shim_tramp_link, link.link);
624
625         /* paired with 'shim_link->trampoline = tr' in bpf_trampoline_link_cgroup_shim */
626         if (!shim_link->trampoline)
627                 return;
628
629         WARN_ON_ONCE(bpf_trampoline_unlink_prog(&shim_link->link, shim_link->trampoline));
630         bpf_trampoline_put(shim_link->trampoline);
631 }
632
633 static void bpf_shim_tramp_link_dealloc(struct bpf_link *link)
634 {
635         struct bpf_shim_tramp_link *shim_link =
636                 container_of(link, struct bpf_shim_tramp_link, link.link);
637
638         kfree(shim_link);
639 }
640
641 static const struct bpf_link_ops bpf_shim_tramp_link_lops = {
642         .release = bpf_shim_tramp_link_release,
643         .dealloc = bpf_shim_tramp_link_dealloc,
644 };
645
646 static struct bpf_shim_tramp_link *cgroup_shim_alloc(const struct bpf_prog *prog,
647                                                      bpf_func_t bpf_func,
648                                                      int cgroup_atype)
649 {
650         struct bpf_shim_tramp_link *shim_link = NULL;
651         struct bpf_prog *p;
652
653         shim_link = kzalloc(sizeof(*shim_link), GFP_USER);
654         if (!shim_link)
655                 return NULL;
656
657         p = bpf_prog_alloc(1, 0);
658         if (!p) {
659                 kfree(shim_link);
660                 return NULL;
661         }
662
663         p->jited = false;
664         p->bpf_func = bpf_func;
665
666         p->aux->cgroup_atype = cgroup_atype;
667         p->aux->attach_func_proto = prog->aux->attach_func_proto;
668         p->aux->attach_btf_id = prog->aux->attach_btf_id;
669         p->aux->attach_btf = prog->aux->attach_btf;
670         btf_get(p->aux->attach_btf);
671         p->type = BPF_PROG_TYPE_LSM;
672         p->expected_attach_type = BPF_LSM_MAC;
673         bpf_prog_inc(p);
674         bpf_link_init(&shim_link->link.link, BPF_LINK_TYPE_UNSPEC,
675                       &bpf_shim_tramp_link_lops, p);
676         bpf_cgroup_atype_get(p->aux->attach_btf_id, cgroup_atype);
677
678         return shim_link;
679 }
680
681 static struct bpf_shim_tramp_link *cgroup_shim_find(struct bpf_trampoline *tr,
682                                                     bpf_func_t bpf_func)
683 {
684         struct bpf_tramp_link *link;
685         int kind;
686
687         for (kind = 0; kind < BPF_TRAMP_MAX; kind++) {
688                 hlist_for_each_entry(link, &tr->progs_hlist[kind], tramp_hlist) {
689                         struct bpf_prog *p = link->link.prog;
690
691                         if (p->bpf_func == bpf_func)
692                                 return container_of(link, struct bpf_shim_tramp_link, link);
693                 }
694         }
695
696         return NULL;
697 }
698
699 int bpf_trampoline_link_cgroup_shim(struct bpf_prog *prog,
700                                     int cgroup_atype)
701 {
702         struct bpf_shim_tramp_link *shim_link = NULL;
703         struct bpf_attach_target_info tgt_info = {};
704         struct bpf_trampoline *tr;
705         bpf_func_t bpf_func;
706         u64 key;
707         int err;
708
709         err = bpf_check_attach_target(NULL, prog, NULL,
710                                       prog->aux->attach_btf_id,
711                                       &tgt_info);
712         if (err)
713                 return err;
714
715         key = bpf_trampoline_compute_key(NULL, prog->aux->attach_btf,
716                                          prog->aux->attach_btf_id);
717
718         bpf_lsm_find_cgroup_shim(prog, &bpf_func);
719         tr = bpf_trampoline_get(key, &tgt_info);
720         if (!tr)
721                 return  -ENOMEM;
722
723         mutex_lock(&tr->mutex);
724
725         shim_link = cgroup_shim_find(tr, bpf_func);
726         if (shim_link) {
727                 /* Reusing existing shim attached by the other program. */
728                 bpf_link_inc(&shim_link->link.link);
729
730                 mutex_unlock(&tr->mutex);
731                 bpf_trampoline_put(tr); /* bpf_trampoline_get above */
732                 return 0;
733         }
734
735         /* Allocate and install new shim. */
736
737         shim_link = cgroup_shim_alloc(prog, bpf_func, cgroup_atype);
738         if (!shim_link) {
739                 err = -ENOMEM;
740                 goto err;
741         }
742
743         err = __bpf_trampoline_link_prog(&shim_link->link, tr);
744         if (err)
745                 goto err;
746
747         shim_link->trampoline = tr;
748         /* note, we're still holding tr refcnt from above */
749
750         mutex_unlock(&tr->mutex);
751
752         return 0;
753 err:
754         mutex_unlock(&tr->mutex);
755
756         if (shim_link)
757                 bpf_link_put(&shim_link->link.link);
758
759         /* have to release tr while _not_ holding its mutex */
760         bpf_trampoline_put(tr); /* bpf_trampoline_get above */
761
762         return err;
763 }
764
765 void bpf_trampoline_unlink_cgroup_shim(struct bpf_prog *prog)
766 {
767         struct bpf_shim_tramp_link *shim_link = NULL;
768         struct bpf_trampoline *tr;
769         bpf_func_t bpf_func;
770         u64 key;
771
772         key = bpf_trampoline_compute_key(NULL, prog->aux->attach_btf,
773                                          prog->aux->attach_btf_id);
774
775         bpf_lsm_find_cgroup_shim(prog, &bpf_func);
776         tr = bpf_trampoline_lookup(key);
777         if (WARN_ON_ONCE(!tr))
778                 return;
779
780         mutex_lock(&tr->mutex);
781         shim_link = cgroup_shim_find(tr, bpf_func);
782         mutex_unlock(&tr->mutex);
783
784         if (shim_link)
785                 bpf_link_put(&shim_link->link.link);
786
787         bpf_trampoline_put(tr); /* bpf_trampoline_lookup above */
788 }
789 #endif
790
791 struct bpf_trampoline *bpf_trampoline_get(u64 key,
792                                           struct bpf_attach_target_info *tgt_info)
793 {
794         struct bpf_trampoline *tr;
795
796         tr = bpf_trampoline_lookup(key);
797         if (!tr)
798                 return NULL;
799
800         mutex_lock(&tr->mutex);
801         if (tr->func.addr)
802                 goto out;
803
804         memcpy(&tr->func.model, &tgt_info->fmodel, sizeof(tgt_info->fmodel));
805         tr->func.addr = (void *)tgt_info->tgt_addr;
806 out:
807         mutex_unlock(&tr->mutex);
808         return tr;
809 }
810
811 void bpf_trampoline_put(struct bpf_trampoline *tr)
812 {
813         int i;
814
815         if (!tr)
816                 return;
817         mutex_lock(&trampoline_mutex);
818         if (!refcount_dec_and_test(&tr->refcnt))
819                 goto out;
820         WARN_ON_ONCE(mutex_is_locked(&tr->mutex));
821
822         for (i = 0; i < BPF_TRAMP_MAX; i++)
823                 if (WARN_ON_ONCE(!hlist_empty(&tr->progs_hlist[i])))
824                         goto out;
825
826         /* This code will be executed even when the last bpf_tramp_image
827          * is alive. All progs are detached from the trampoline and the
828          * trampoline image is patched with jmp into epilogue to skip
829          * fexit progs. The fentry-only trampoline will be freed via
830          * multiple rcu callbacks.
831          */
832         hlist_del(&tr->hlist);
833         if (tr->fops) {
834                 ftrace_free_filter(tr->fops);
835                 kfree(tr->fops);
836         }
837         kfree(tr);
838 out:
839         mutex_unlock(&trampoline_mutex);
840 }
841
842 #define NO_START_TIME 1
843 static __always_inline u64 notrace bpf_prog_start_time(void)
844 {
845         u64 start = NO_START_TIME;
846
847         if (static_branch_unlikely(&bpf_stats_enabled_key)) {
848                 start = sched_clock();
849                 if (unlikely(!start))
850                         start = NO_START_TIME;
851         }
852         return start;
853 }
854
855 /* The logic is similar to bpf_prog_run(), but with an explicit
856  * rcu_read_lock() and migrate_disable() which are required
857  * for the trampoline. The macro is split into
858  * call __bpf_prog_enter
859  * call prog->bpf_func
860  * call __bpf_prog_exit
861  *
862  * __bpf_prog_enter returns:
863  * 0 - skip execution of the bpf prog
864  * 1 - execute bpf prog
865  * [2..MAX_U64] - execute bpf prog and record execution time.
866  *     This is start time.
867  */
868 u64 notrace __bpf_prog_enter(struct bpf_prog *prog, struct bpf_tramp_run_ctx *run_ctx)
869         __acquires(RCU)
870 {
871         rcu_read_lock();
872         migrate_disable();
873
874         run_ctx->saved_run_ctx = bpf_set_run_ctx(&run_ctx->run_ctx);
875
876         if (unlikely(this_cpu_inc_return(*(prog->active)) != 1)) {
877                 bpf_prog_inc_misses_counter(prog);
878                 return 0;
879         }
880         return bpf_prog_start_time();
881 }
882
883 static void notrace update_prog_stats(struct bpf_prog *prog,
884                                       u64 start)
885 {
886         struct bpf_prog_stats *stats;
887
888         if (static_branch_unlikely(&bpf_stats_enabled_key) &&
889             /* static_key could be enabled in __bpf_prog_enter*
890              * and disabled in __bpf_prog_exit*.
891              * And vice versa.
892              * Hence check that 'start' is valid.
893              */
894             start > NO_START_TIME) {
895                 unsigned long flags;
896
897                 stats = this_cpu_ptr(prog->stats);
898                 flags = u64_stats_update_begin_irqsave(&stats->syncp);
899                 u64_stats_inc(&stats->cnt);
900                 u64_stats_add(&stats->nsecs, sched_clock() - start);
901                 u64_stats_update_end_irqrestore(&stats->syncp, flags);
902         }
903 }
904
905 void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start, struct bpf_tramp_run_ctx *run_ctx)
906         __releases(RCU)
907 {
908         bpf_reset_run_ctx(run_ctx->saved_run_ctx);
909
910         update_prog_stats(prog, start);
911         this_cpu_dec(*(prog->active));
912         migrate_enable();
913         rcu_read_unlock();
914 }
915
916 u64 notrace __bpf_prog_enter_lsm_cgroup(struct bpf_prog *prog,
917                                         struct bpf_tramp_run_ctx *run_ctx)
918         __acquires(RCU)
919 {
920         /* Runtime stats are exported via actual BPF_LSM_CGROUP
921          * programs, not the shims.
922          */
923         rcu_read_lock();
924         migrate_disable();
925
926         run_ctx->saved_run_ctx = bpf_set_run_ctx(&run_ctx->run_ctx);
927
928         return NO_START_TIME;
929 }
930
931 void notrace __bpf_prog_exit_lsm_cgroup(struct bpf_prog *prog, u64 start,
932                                         struct bpf_tramp_run_ctx *run_ctx)
933         __releases(RCU)
934 {
935         bpf_reset_run_ctx(run_ctx->saved_run_ctx);
936
937         migrate_enable();
938         rcu_read_unlock();
939 }
940
941 u64 notrace __bpf_prog_enter_sleepable(struct bpf_prog *prog, struct bpf_tramp_run_ctx *run_ctx)
942 {
943         rcu_read_lock_trace();
944         migrate_disable();
945         might_fault();
946
947         if (unlikely(this_cpu_inc_return(*(prog->active)) != 1)) {
948                 bpf_prog_inc_misses_counter(prog);
949                 return 0;
950         }
951
952         run_ctx->saved_run_ctx = bpf_set_run_ctx(&run_ctx->run_ctx);
953
954         return bpf_prog_start_time();
955 }
956
957 void notrace __bpf_prog_exit_sleepable(struct bpf_prog *prog, u64 start,
958                                        struct bpf_tramp_run_ctx *run_ctx)
959 {
960         bpf_reset_run_ctx(run_ctx->saved_run_ctx);
961
962         update_prog_stats(prog, start);
963         this_cpu_dec(*(prog->active));
964         migrate_enable();
965         rcu_read_unlock_trace();
966 }
967
968 u64 notrace __bpf_prog_enter_struct_ops(struct bpf_prog *prog,
969                                         struct bpf_tramp_run_ctx *run_ctx)
970         __acquires(RCU)
971 {
972         rcu_read_lock();
973         migrate_disable();
974
975         run_ctx->saved_run_ctx = bpf_set_run_ctx(&run_ctx->run_ctx);
976
977         return bpf_prog_start_time();
978 }
979
980 void notrace __bpf_prog_exit_struct_ops(struct bpf_prog *prog, u64 start,
981                                         struct bpf_tramp_run_ctx *run_ctx)
982         __releases(RCU)
983 {
984         bpf_reset_run_ctx(run_ctx->saved_run_ctx);
985
986         update_prog_stats(prog, start);
987         migrate_enable();
988         rcu_read_unlock();
989 }
990
991 void notrace __bpf_tramp_enter(struct bpf_tramp_image *tr)
992 {
993         percpu_ref_get(&tr->pcref);
994 }
995
996 void notrace __bpf_tramp_exit(struct bpf_tramp_image *tr)
997 {
998         percpu_ref_put(&tr->pcref);
999 }
1000
1001 int __weak
1002 arch_prepare_bpf_trampoline(struct bpf_tramp_image *tr, void *image, void *image_end,
1003                             const struct btf_func_model *m, u32 flags,
1004                             struct bpf_tramp_links *tlinks,
1005                             void *orig_call)
1006 {
1007         return -ENOTSUPP;
1008 }
1009
1010 static int __init init_trampolines(void)
1011 {
1012         int i;
1013
1014         for (i = 0; i < TRAMPOLINE_TABLE_SIZE; i++)
1015                 INIT_HLIST_HEAD(&trampoline_table[i]);
1016         return 0;
1017 }
1018 late_initcall(init_trampolines);