1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2020 Facebook */
4 #include <linux/init.h>
5 #include <linux/namei.h>
6 #include <linux/pid_namespace.h>
8 #include <linux/fdtable.h>
9 #include <linux/filter.h>
10 #include <linux/btf_ids.h>
11 #include "mmap_unlock_work.h"
13 static const char * const iter_task_type_names[] = {
19 struct bpf_iter_seq_task_common {
20 struct pid_namespace *ns;
21 enum bpf_iter_task_type type;
26 struct bpf_iter_seq_task_info {
27 /* The first field must be struct bpf_iter_seq_task_common.
28 * this is assumed by {init, fini}_seq_pidns() callback functions.
30 struct bpf_iter_seq_task_common common;
34 static struct task_struct *task_group_seq_get_next(struct bpf_iter_seq_task_common *common,
36 bool skip_if_dup_files)
38 struct task_struct *task, *next_task;
43 /* The first time, the iterator calls this function. */
44 pid = find_pid_ns(common->pid, common->ns);
48 task = get_pid_task(pid, PIDTYPE_TGID);
53 common->pid_visiting = common->pid;
58 /* If the control returns to user space and comes back to the
59 * kernel again, *tid and common->pid_visiting should be the
60 * same for task_seq_start() to pick up the correct task.
62 if (*tid == common->pid_visiting) {
63 pid = find_pid_ns(common->pid_visiting, common->ns);
64 task = get_pid_task(pid, PIDTYPE_PID);
69 pid = find_pid_ns(common->pid_visiting, common->ns);
73 task = get_pid_task(pid, PIDTYPE_PID);
78 if (!pid_alive(task)) {
79 put_task_struct(task);
83 next_task = next_thread(task);
84 put_task_struct(task);
89 *tid = __task_pid_nr_ns(next_task, PIDTYPE_PID, common->ns);
90 if (!*tid || *tid == common->pid) {
91 /* Run out of tasks of a process. The tasks of a
92 * thread_group are linked as circular linked list.
98 get_task_struct(next_task);
99 common->pid_visiting = *tid;
101 if (skip_if_dup_files && task->files == task->group_leader->files) {
109 static struct task_struct *task_seq_get_next(struct bpf_iter_seq_task_common *common,
111 bool skip_if_dup_files)
113 struct task_struct *task = NULL;
116 if (common->type == BPF_TASK_ITER_TID) {
117 if (*tid && *tid != common->pid)
120 pid = find_pid_ns(common->pid, common->ns);
122 task = get_pid_task(pid, PIDTYPE_TGID);
130 if (common->type == BPF_TASK_ITER_TGID) {
132 task = task_group_seq_get_next(common, tid, skip_if_dup_files);
140 pid = find_ge_pid(*tid, common->ns);
142 *tid = pid_nr_ns(pid, common->ns);
143 task = get_pid_task(pid, PIDTYPE_PID);
147 } else if (skip_if_dup_files && !thread_group_leader(task) &&
148 task->files == task->group_leader->files) {
149 put_task_struct(task);
160 static void *task_seq_start(struct seq_file *seq, loff_t *pos)
162 struct bpf_iter_seq_task_info *info = seq->private;
163 struct task_struct *task;
165 task = task_seq_get_next(&info->common, &info->tid, false);
174 static void *task_seq_next(struct seq_file *seq, void *v, loff_t *pos)
176 struct bpf_iter_seq_task_info *info = seq->private;
177 struct task_struct *task;
181 put_task_struct((struct task_struct *)v);
182 task = task_seq_get_next(&info->common, &info->tid, false);
189 struct bpf_iter__task {
190 __bpf_md_ptr(struct bpf_iter_meta *, meta);
191 __bpf_md_ptr(struct task_struct *, task);
194 DEFINE_BPF_ITER_FUNC(task, struct bpf_iter_meta *meta, struct task_struct *task)
196 static int __task_seq_show(struct seq_file *seq, struct task_struct *task,
199 struct bpf_iter_meta meta;
200 struct bpf_iter__task ctx;
201 struct bpf_prog *prog;
204 prog = bpf_iter_get_info(&meta, in_stop);
210 return bpf_iter_run_prog(prog, &ctx);
213 static int task_seq_show(struct seq_file *seq, void *v)
215 return __task_seq_show(seq, v, false);
218 static void task_seq_stop(struct seq_file *seq, void *v)
221 (void)__task_seq_show(seq, v, true);
223 put_task_struct((struct task_struct *)v);
226 static int bpf_iter_attach_task(struct bpf_prog *prog,
227 union bpf_iter_link_info *linfo,
228 struct bpf_iter_aux_info *aux)
234 if ((!!linfo->task.tid + !!linfo->task.pid + !!linfo->task.pid_fd) > 1)
237 aux->task.type = BPF_TASK_ITER_ALL;
238 if (linfo->task.tid != 0) {
239 aux->task.type = BPF_TASK_ITER_TID;
240 aux->task.pid = linfo->task.tid;
242 if (linfo->task.pid != 0) {
243 aux->task.type = BPF_TASK_ITER_TGID;
244 aux->task.pid = linfo->task.pid;
246 if (linfo->task.pid_fd != 0) {
247 aux->task.type = BPF_TASK_ITER_TGID;
249 pid = pidfd_get_pid(linfo->task.pid_fd, &flags);
253 tgid = pid_nr_ns(pid, task_active_pid_ns(current));
254 aux->task.pid = tgid;
261 static const struct seq_operations task_seq_ops = {
262 .start = task_seq_start,
263 .next = task_seq_next,
264 .stop = task_seq_stop,
265 .show = task_seq_show,
268 struct bpf_iter_seq_task_file_info {
269 /* The first field must be struct bpf_iter_seq_task_common.
270 * this is assumed by {init, fini}_seq_pidns() callback functions.
272 struct bpf_iter_seq_task_common common;
273 struct task_struct *task;
279 task_file_seq_get_next(struct bpf_iter_seq_task_file_info *info)
281 u32 saved_tid = info->tid;
282 struct task_struct *curr_task;
283 unsigned int curr_fd = info->fd;
285 /* If this function returns a non-NULL file object,
286 * it held a reference to the task/file.
287 * Otherwise, it does not hold any reference.
291 curr_task = info->task;
294 curr_task = task_seq_get_next(&info->common, &info->tid, true);
301 info->task = curr_task;
302 if (saved_tid == info->tid)
311 f = task_lookup_next_fd_rcu(curr_task, &curr_fd);
314 if (!get_file_rcu(f))
323 /* the current task is done, go to the next task */
325 put_task_struct(curr_task);
327 if (info->common.type == BPF_TASK_ITER_TID) {
334 saved_tid = ++(info->tid);
338 static void *task_file_seq_start(struct seq_file *seq, loff_t *pos)
340 struct bpf_iter_seq_task_file_info *info = seq->private;
344 file = task_file_seq_get_next(info);
345 if (file && *pos == 0)
351 static void *task_file_seq_next(struct seq_file *seq, void *v, loff_t *pos)
353 struct bpf_iter_seq_task_file_info *info = seq->private;
357 fput((struct file *)v);
358 return task_file_seq_get_next(info);
361 struct bpf_iter__task_file {
362 __bpf_md_ptr(struct bpf_iter_meta *, meta);
363 __bpf_md_ptr(struct task_struct *, task);
365 __bpf_md_ptr(struct file *, file);
368 DEFINE_BPF_ITER_FUNC(task_file, struct bpf_iter_meta *meta,
369 struct task_struct *task, u32 fd,
372 static int __task_file_seq_show(struct seq_file *seq, struct file *file,
375 struct bpf_iter_seq_task_file_info *info = seq->private;
376 struct bpf_iter__task_file ctx;
377 struct bpf_iter_meta meta;
378 struct bpf_prog *prog;
381 prog = bpf_iter_get_info(&meta, in_stop);
386 ctx.task = info->task;
389 return bpf_iter_run_prog(prog, &ctx);
392 static int task_file_seq_show(struct seq_file *seq, void *v)
394 return __task_file_seq_show(seq, v, false);
397 static void task_file_seq_stop(struct seq_file *seq, void *v)
399 struct bpf_iter_seq_task_file_info *info = seq->private;
402 (void)__task_file_seq_show(seq, v, true);
404 fput((struct file *)v);
405 put_task_struct(info->task);
410 static int init_seq_pidns(void *priv_data, struct bpf_iter_aux_info *aux)
412 struct bpf_iter_seq_task_common *common = priv_data;
414 common->ns = get_pid_ns(task_active_pid_ns(current));
415 common->type = aux->task.type;
416 common->pid = aux->task.pid;
421 static void fini_seq_pidns(void *priv_data)
423 struct bpf_iter_seq_task_common *common = priv_data;
425 put_pid_ns(common->ns);
428 static const struct seq_operations task_file_seq_ops = {
429 .start = task_file_seq_start,
430 .next = task_file_seq_next,
431 .stop = task_file_seq_stop,
432 .show = task_file_seq_show,
435 struct bpf_iter_seq_task_vma_info {
436 /* The first field must be struct bpf_iter_seq_task_common.
437 * this is assumed by {init, fini}_seq_pidns() callback functions.
439 struct bpf_iter_seq_task_common common;
440 struct task_struct *task;
441 struct mm_struct *mm;
442 struct vm_area_struct *vma;
444 unsigned long prev_vm_start;
445 unsigned long prev_vm_end;
448 enum bpf_task_vma_iter_find_op {
449 task_vma_iter_first_vma, /* use find_vma() with addr 0 */
450 task_vma_iter_next_vma, /* use vma_next() with curr_vma */
451 task_vma_iter_find_vma, /* use find_vma() to find next vma */
454 static struct vm_area_struct *
455 task_vma_seq_get_next(struct bpf_iter_seq_task_vma_info *info)
457 enum bpf_task_vma_iter_find_op op;
458 struct vm_area_struct *curr_vma;
459 struct task_struct *curr_task;
460 struct mm_struct *curr_mm;
461 u32 saved_tid = info->tid;
463 /* If this function returns a non-NULL vma, it holds a reference to
464 * the task_struct, holds a refcount on mm->mm_users, and holds
465 * read lock on vma->mm->mmap_lock.
466 * If this function returns NULL, it does not hold any reference or
470 curr_task = info->task;
471 curr_vma = info->vma;
473 /* In case of lock contention, drop mmap_lock to unblock
476 * After relock, call find(mm, prev_vm_end - 1) to find
477 * new vma to process.
479 * +------+------+-----------+
480 * | VMA1 | VMA2 | VMA3 |
481 * +------+------+-----------+
485 * For example, curr_vma == VMA2. Before unlock, we set
490 * There are a few cases:
492 * 1) VMA2 is freed, but VMA3 exists.
494 * find_vma() will return VMA3, just process VMA3.
496 * 2) VMA2 still exists.
498 * find_vma() will return VMA2, process VMA2->next.
500 * 3) no more vma in this mm.
502 * Process the next task.
504 * 4) find_vma() returns a different vma, VMA2'.
506 * 4.1) If VMA2 covers same range as VMA2', skip VMA2',
507 * because we already covered the range;
508 * 4.2) VMA2 and VMA2' covers different ranges, process
511 if (mmap_lock_is_contended(curr_mm)) {
512 info->prev_vm_start = curr_vma->vm_start;
513 info->prev_vm_end = curr_vma->vm_end;
514 op = task_vma_iter_find_vma;
515 mmap_read_unlock(curr_mm);
516 if (mmap_read_lock_killable(curr_mm)) {
521 op = task_vma_iter_next_vma;
525 curr_task = task_seq_get_next(&info->common, &info->tid, true);
531 if (saved_tid != info->tid) {
532 /* new task, process the first vma */
533 op = task_vma_iter_first_vma;
535 /* Found the same tid, which means the user space
536 * finished data in previous buffer and read more.
537 * We dropped mmap_lock before returning to user
538 * space, so it is necessary to use find_vma() to
539 * find the next vma to process.
541 op = task_vma_iter_find_vma;
544 curr_mm = get_task_mm(curr_task);
548 if (mmap_read_lock_killable(curr_mm)) {
555 case task_vma_iter_first_vma:
556 curr_vma = find_vma(curr_mm, 0);
558 case task_vma_iter_next_vma:
559 curr_vma = find_vma(curr_mm, curr_vma->vm_end);
561 case task_vma_iter_find_vma:
562 /* We dropped mmap_lock so it is necessary to use find_vma
563 * to find the next vma. This is similar to the mechanism
564 * in show_smaps_rollup().
566 curr_vma = find_vma(curr_mm, info->prev_vm_end - 1);
567 /* case 1) and 4.2) above just use curr_vma */
569 /* check for case 2) or case 4.1) above */
571 curr_vma->vm_start == info->prev_vm_start &&
572 curr_vma->vm_end == info->prev_vm_end)
573 curr_vma = find_vma(curr_mm, curr_vma->vm_end);
577 /* case 3) above, or case 2) 4.1) with vma->next == NULL */
578 mmap_read_unlock(curr_mm);
582 info->task = curr_task;
583 info->vma = curr_vma;
588 if (info->common.type == BPF_TASK_ITER_TID)
591 put_task_struct(curr_task);
599 put_task_struct(curr_task);
606 static void *task_vma_seq_start(struct seq_file *seq, loff_t *pos)
608 struct bpf_iter_seq_task_vma_info *info = seq->private;
609 struct vm_area_struct *vma;
611 vma = task_vma_seq_get_next(info);
612 if (vma && *pos == 0)
618 static void *task_vma_seq_next(struct seq_file *seq, void *v, loff_t *pos)
620 struct bpf_iter_seq_task_vma_info *info = seq->private;
623 return task_vma_seq_get_next(info);
626 struct bpf_iter__task_vma {
627 __bpf_md_ptr(struct bpf_iter_meta *, meta);
628 __bpf_md_ptr(struct task_struct *, task);
629 __bpf_md_ptr(struct vm_area_struct *, vma);
632 DEFINE_BPF_ITER_FUNC(task_vma, struct bpf_iter_meta *meta,
633 struct task_struct *task, struct vm_area_struct *vma)
635 static int __task_vma_seq_show(struct seq_file *seq, bool in_stop)
637 struct bpf_iter_seq_task_vma_info *info = seq->private;
638 struct bpf_iter__task_vma ctx;
639 struct bpf_iter_meta meta;
640 struct bpf_prog *prog;
643 prog = bpf_iter_get_info(&meta, in_stop);
648 ctx.task = info->task;
650 return bpf_iter_run_prog(prog, &ctx);
653 static int task_vma_seq_show(struct seq_file *seq, void *v)
655 return __task_vma_seq_show(seq, false);
658 static void task_vma_seq_stop(struct seq_file *seq, void *v)
660 struct bpf_iter_seq_task_vma_info *info = seq->private;
663 (void)__task_vma_seq_show(seq, true);
665 /* info->vma has not been seen by the BPF program. If the
666 * user space reads more, task_vma_seq_get_next should
667 * return this vma again. Set prev_vm_start to ~0UL,
668 * so that we don't skip the vma returned by the next
669 * find_vma() (case task_vma_iter_find_vma in
670 * task_vma_seq_get_next()).
672 info->prev_vm_start = ~0UL;
673 info->prev_vm_end = info->vma->vm_end;
674 mmap_read_unlock(info->mm);
677 put_task_struct(info->task);
682 static const struct seq_operations task_vma_seq_ops = {
683 .start = task_vma_seq_start,
684 .next = task_vma_seq_next,
685 .stop = task_vma_seq_stop,
686 .show = task_vma_seq_show,
689 static const struct bpf_iter_seq_info task_seq_info = {
690 .seq_ops = &task_seq_ops,
691 .init_seq_private = init_seq_pidns,
692 .fini_seq_private = fini_seq_pidns,
693 .seq_priv_size = sizeof(struct bpf_iter_seq_task_info),
696 static int bpf_iter_fill_link_info(const struct bpf_iter_aux_info *aux, struct bpf_link_info *info)
698 switch (aux->task.type) {
699 case BPF_TASK_ITER_TID:
700 info->iter.task.tid = aux->task.pid;
702 case BPF_TASK_ITER_TGID:
703 info->iter.task.pid = aux->task.pid;
711 static void bpf_iter_task_show_fdinfo(const struct bpf_iter_aux_info *aux, struct seq_file *seq)
713 seq_printf(seq, "task_type:\t%s\n", iter_task_type_names[aux->task.type]);
714 if (aux->task.type == BPF_TASK_ITER_TID)
715 seq_printf(seq, "tid:\t%u\n", aux->task.pid);
716 else if (aux->task.type == BPF_TASK_ITER_TGID)
717 seq_printf(seq, "pid:\t%u\n", aux->task.pid);
720 static struct bpf_iter_reg task_reg_info = {
722 .attach_target = bpf_iter_attach_task,
723 .feature = BPF_ITER_RESCHED,
724 .ctx_arg_info_size = 1,
726 { offsetof(struct bpf_iter__task, task),
727 PTR_TO_BTF_ID_OR_NULL },
729 .seq_info = &task_seq_info,
730 .fill_link_info = bpf_iter_fill_link_info,
731 .show_fdinfo = bpf_iter_task_show_fdinfo,
734 static const struct bpf_iter_seq_info task_file_seq_info = {
735 .seq_ops = &task_file_seq_ops,
736 .init_seq_private = init_seq_pidns,
737 .fini_seq_private = fini_seq_pidns,
738 .seq_priv_size = sizeof(struct bpf_iter_seq_task_file_info),
741 static struct bpf_iter_reg task_file_reg_info = {
742 .target = "task_file",
743 .attach_target = bpf_iter_attach_task,
744 .feature = BPF_ITER_RESCHED,
745 .ctx_arg_info_size = 2,
747 { offsetof(struct bpf_iter__task_file, task),
748 PTR_TO_BTF_ID_OR_NULL },
749 { offsetof(struct bpf_iter__task_file, file),
750 PTR_TO_BTF_ID_OR_NULL },
752 .seq_info = &task_file_seq_info,
753 .fill_link_info = bpf_iter_fill_link_info,
754 .show_fdinfo = bpf_iter_task_show_fdinfo,
757 static const struct bpf_iter_seq_info task_vma_seq_info = {
758 .seq_ops = &task_vma_seq_ops,
759 .init_seq_private = init_seq_pidns,
760 .fini_seq_private = fini_seq_pidns,
761 .seq_priv_size = sizeof(struct bpf_iter_seq_task_vma_info),
764 static struct bpf_iter_reg task_vma_reg_info = {
765 .target = "task_vma",
766 .attach_target = bpf_iter_attach_task,
767 .feature = BPF_ITER_RESCHED,
768 .ctx_arg_info_size = 2,
770 { offsetof(struct bpf_iter__task_vma, task),
771 PTR_TO_BTF_ID_OR_NULL },
772 { offsetof(struct bpf_iter__task_vma, vma),
773 PTR_TO_BTF_ID_OR_NULL },
775 .seq_info = &task_vma_seq_info,
776 .fill_link_info = bpf_iter_fill_link_info,
777 .show_fdinfo = bpf_iter_task_show_fdinfo,
780 BPF_CALL_5(bpf_find_vma, struct task_struct *, task, u64, start,
781 bpf_callback_t, callback_fn, void *, callback_ctx, u64, flags)
783 struct mmap_unlock_irq_work *work = NULL;
784 struct vm_area_struct *vma;
785 bool irq_work_busy = false;
786 struct mm_struct *mm;
799 irq_work_busy = bpf_mmap_unlock_get_irq_work(&work);
801 if (irq_work_busy || !mmap_read_trylock(mm))
804 vma = find_vma(mm, start);
806 if (vma && vma->vm_start <= start && vma->vm_end > start) {
807 callback_fn((u64)(long)task, (u64)(long)vma,
808 (u64)(long)callback_ctx, 0, 0);
811 bpf_mmap_unlock_mm(work, mm);
815 const struct bpf_func_proto bpf_find_vma_proto = {
816 .func = bpf_find_vma,
817 .ret_type = RET_INTEGER,
818 .arg1_type = ARG_PTR_TO_BTF_ID,
819 .arg1_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
820 .arg2_type = ARG_ANYTHING,
821 .arg3_type = ARG_PTR_TO_FUNC,
822 .arg4_type = ARG_PTR_TO_STACK_OR_NULL,
823 .arg5_type = ARG_ANYTHING,
826 DEFINE_PER_CPU(struct mmap_unlock_irq_work, mmap_unlock_work);
828 static void do_mmap_read_unlock(struct irq_work *entry)
830 struct mmap_unlock_irq_work *work;
832 if (WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_RT)))
835 work = container_of(entry, struct mmap_unlock_irq_work, irq_work);
836 mmap_read_unlock_non_owner(work->mm);
839 static int __init task_iter_init(void)
841 struct mmap_unlock_irq_work *work;
844 for_each_possible_cpu(cpu) {
845 work = per_cpu_ptr(&mmap_unlock_work, cpu);
846 init_irq_work(&work->irq_work, do_mmap_read_unlock);
849 task_reg_info.ctx_arg_info[0].btf_id = btf_tracing_ids[BTF_TRACING_TYPE_TASK];
850 ret = bpf_iter_reg_target(&task_reg_info);
854 task_file_reg_info.ctx_arg_info[0].btf_id = btf_tracing_ids[BTF_TRACING_TYPE_TASK];
855 task_file_reg_info.ctx_arg_info[1].btf_id = btf_tracing_ids[BTF_TRACING_TYPE_FILE];
856 ret = bpf_iter_reg_target(&task_file_reg_info);
860 task_vma_reg_info.ctx_arg_info[0].btf_id = btf_tracing_ids[BTF_TRACING_TYPE_TASK];
861 task_vma_reg_info.ctx_arg_info[1].btf_id = btf_tracing_ids[BTF_TRACING_TYPE_VMA];
862 return bpf_iter_reg_target(&task_vma_reg_info);
864 late_initcall(task_iter_init);