1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * taskstats.c - Export per-task statistics to userland
5 * Copyright (C) Shailabh Nagar, IBM Corp. 2006
6 * (C) Balbir Singh, IBM Corp. 2006
9 #include <linux/kernel.h>
10 #include <linux/taskstats_kern.h>
11 #include <linux/tsacct_kern.h>
12 #include <linux/delayacct.h>
13 #include <linux/cpumask.h>
14 #include <linux/percpu.h>
15 #include <linux/slab.h>
16 #include <linux/cgroupstats.h>
17 #include <linux/cgroup.h>
19 #include <linux/file.h>
20 #include <linux/pid_namespace.h>
21 #include <net/genetlink.h>
22 #include <linux/atomic.h>
23 #include <linux/sched/cputime.h>
26 * Maximum length of a cpumask that can be specified in
27 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
29 #define TASKSTATS_CPUMASK_MAXLEN (100+6*NR_CPUS)
31 static DEFINE_PER_CPU(__u32, taskstats_seqnum);
32 static int family_registered;
33 struct kmem_cache *taskstats_cache;
35 static struct genl_family family;
37 static const struct nla_policy taskstats_cmd_get_policy[] = {
38 [TASKSTATS_CMD_ATTR_PID] = { .type = NLA_U32 },
39 [TASKSTATS_CMD_ATTR_TGID] = { .type = NLA_U32 },
40 [TASKSTATS_CMD_ATTR_REGISTER_CPUMASK] = { .type = NLA_STRING },
41 [TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK] = { .type = NLA_STRING },};
43 static const struct nla_policy cgroupstats_cmd_get_policy[] = {
44 [CGROUPSTATS_CMD_ATTR_FD] = { .type = NLA_U32 },
48 struct list_head list;
53 struct listener_list {
54 struct rw_semaphore sem;
55 struct list_head list;
57 static DEFINE_PER_CPU(struct listener_list, listener_array);
65 static int prepare_reply(struct genl_info *info, u8 cmd, struct sk_buff **skbp,
72 * If new attributes are added, please revisit this allocation
74 skb = genlmsg_new(size, GFP_KERNEL);
79 int seq = this_cpu_inc_return(taskstats_seqnum) - 1;
81 reply = genlmsg_put(skb, 0, seq, &family, 0, cmd);
83 reply = genlmsg_put_reply(skb, info, &family, 0, cmd);
94 * Send taskstats data in @skb to listener with nl_pid @pid
96 static int send_reply(struct sk_buff *skb, struct genl_info *info)
98 struct genlmsghdr *genlhdr = nlmsg_data(nlmsg_hdr(skb));
99 void *reply = genlmsg_data(genlhdr);
101 genlmsg_end(skb, reply);
103 return genlmsg_reply(skb, info);
107 * Send taskstats data in @skb to listeners registered for @cpu's exit data
109 static void send_cpu_listeners(struct sk_buff *skb,
110 struct listener_list *listeners)
112 struct genlmsghdr *genlhdr = nlmsg_data(nlmsg_hdr(skb));
113 struct listener *s, *tmp;
114 struct sk_buff *skb_next, *skb_cur = skb;
115 void *reply = genlmsg_data(genlhdr);
118 genlmsg_end(skb, reply);
120 down_read(&listeners->sem);
121 list_for_each_entry(s, &listeners->list, list) {
125 if (!list_is_last(&s->list, &listeners->list)) {
126 skb_next = skb_clone(skb_cur, GFP_KERNEL);
130 rc = genlmsg_unicast(&init_net, skb_cur, s->pid);
131 if (rc == -ECONNREFUSED) {
137 up_read(&listeners->sem);
145 /* Delete invalidated entries */
146 down_write(&listeners->sem);
147 list_for_each_entry_safe(s, tmp, &listeners->list, list) {
153 up_write(&listeners->sem);
156 static void fill_stats(struct user_namespace *user_ns,
157 struct pid_namespace *pid_ns,
158 struct task_struct *tsk, struct taskstats *stats)
160 memset(stats, 0, sizeof(*stats));
162 * Each accounting subsystem adds calls to its functions to
163 * fill in relevant parts of struct taskstsats as follows
165 * per-task-foo(stats, tsk);
168 delayacct_add_tsk(stats, tsk);
170 /* fill in basic acct fields */
171 stats->version = TASKSTATS_VERSION;
172 stats->nvcsw = tsk->nvcsw;
173 stats->nivcsw = tsk->nivcsw;
174 bacct_add_tsk(user_ns, pid_ns, stats, tsk);
176 /* fill in extended acct fields */
177 xacct_add_tsk(stats, tsk);
180 static int fill_stats_for_pid(pid_t pid, struct taskstats *stats)
182 struct task_struct *tsk;
184 tsk = find_get_task_by_vpid(pid);
187 fill_stats(current_user_ns(), task_active_pid_ns(current), tsk, stats);
188 put_task_struct(tsk);
192 static int fill_stats_for_tgid(pid_t tgid, struct taskstats *stats)
194 struct task_struct *tsk, *first;
197 u64 delta, utime, stime;
201 * Add additional stats from live tasks except zombie thread group
202 * leaders who are already counted with the dead tasks
205 first = find_task_by_vpid(tgid);
207 if (!first || !lock_task_sighand(first, &flags))
210 if (first->signal->stats)
211 memcpy(stats, first->signal->stats, sizeof(*stats));
213 memset(stats, 0, sizeof(*stats));
216 start_time = ktime_get_ns();
221 * Accounting subsystem can call its functions here to
222 * fill in relevant parts of struct taskstsats as follows
224 * per-task-foo(stats, tsk);
226 delayacct_add_tsk(stats, tsk);
228 /* calculate task elapsed time in nsec */
229 delta = start_time - tsk->start_time;
230 /* Convert to micro seconds */
231 do_div(delta, NSEC_PER_USEC);
232 stats->ac_etime += delta;
234 task_cputime(tsk, &utime, &stime);
235 stats->ac_utime += div_u64(utime, NSEC_PER_USEC);
236 stats->ac_stime += div_u64(stime, NSEC_PER_USEC);
238 stats->nvcsw += tsk->nvcsw;
239 stats->nivcsw += tsk->nivcsw;
240 } while_each_thread(first, tsk);
242 unlock_task_sighand(first, &flags);
247 stats->version = TASKSTATS_VERSION;
249 * Accounting subsystems can also add calls here to modify
250 * fields of taskstats.
255 static void fill_tgid_exit(struct task_struct *tsk)
259 spin_lock_irqsave(&tsk->sighand->siglock, flags);
260 if (!tsk->signal->stats)
264 * Each accounting subsystem calls its functions here to
265 * accumalate its per-task stats for tsk, into the per-tgid structure
267 * per-task-foo(tsk->signal->stats, tsk);
269 delayacct_add_tsk(tsk->signal->stats, tsk);
271 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
275 static int add_del_listener(pid_t pid, const struct cpumask *mask, int isadd)
277 struct listener_list *listeners;
278 struct listener *s, *tmp, *s2;
282 if (!cpumask_subset(mask, cpu_possible_mask))
285 if (current_user_ns() != &init_user_ns)
288 if (task_active_pid_ns(current) != &init_pid_ns)
291 if (isadd == REGISTER) {
292 for_each_cpu(cpu, mask) {
293 s = kmalloc_node(sizeof(struct listener),
294 GFP_KERNEL, cpu_to_node(cpu));
302 listeners = &per_cpu(listener_array, cpu);
303 down_write(&listeners->sem);
304 list_for_each_entry(s2, &listeners->list, list) {
305 if (s2->pid == pid && s2->valid)
308 list_add(&s->list, &listeners->list);
311 up_write(&listeners->sem);
312 kfree(s); /* nop if NULL */
317 /* Deregister or cleanup */
319 for_each_cpu(cpu, mask) {
320 listeners = &per_cpu(listener_array, cpu);
321 down_write(&listeners->sem);
322 list_for_each_entry_safe(s, tmp, &listeners->list, list) {
329 up_write(&listeners->sem);
334 static int parse(struct nlattr *na, struct cpumask *mask)
343 if (len > TASKSTATS_CPUMASK_MAXLEN)
347 data = kmalloc(len, GFP_KERNEL);
350 nla_strscpy(data, na, len);
351 ret = cpulist_parse(data, mask);
356 static struct taskstats *mk_reply(struct sk_buff *skb, int type, u32 pid)
358 struct nlattr *na, *ret;
361 aggr = (type == TASKSTATS_TYPE_PID)
362 ? TASKSTATS_TYPE_AGGR_PID
363 : TASKSTATS_TYPE_AGGR_TGID;
365 na = nla_nest_start_noflag(skb, aggr);
369 if (nla_put(skb, type, sizeof(pid), &pid) < 0) {
370 nla_nest_cancel(skb, na);
373 ret = nla_reserve_64bit(skb, TASKSTATS_TYPE_STATS,
374 sizeof(struct taskstats), TASKSTATS_TYPE_NULL);
376 nla_nest_cancel(skb, na);
379 nla_nest_end(skb, na);
381 return nla_data(ret);
386 static int cgroupstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
389 struct sk_buff *rep_skb;
390 struct cgroupstats *stats;
396 na = info->attrs[CGROUPSTATS_CMD_ATTR_FD];
400 fd = nla_get_u32(info->attrs[CGROUPSTATS_CMD_ATTR_FD]);
405 size = nla_total_size(sizeof(struct cgroupstats));
407 rc = prepare_reply(info, CGROUPSTATS_CMD_NEW, &rep_skb,
412 na = nla_reserve(rep_skb, CGROUPSTATS_TYPE_CGROUP_STATS,
413 sizeof(struct cgroupstats));
420 stats = nla_data(na);
421 memset(stats, 0, sizeof(*stats));
423 rc = cgroupstats_build(stats, f.file->f_path.dentry);
429 rc = send_reply(rep_skb, info);
436 static int cmd_attr_register_cpumask(struct genl_info *info)
441 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
443 rc = parse(info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK], mask);
446 rc = add_del_listener(info->snd_portid, mask, REGISTER);
448 free_cpumask_var(mask);
452 static int cmd_attr_deregister_cpumask(struct genl_info *info)
457 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
459 rc = parse(info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK], mask);
462 rc = add_del_listener(info->snd_portid, mask, DEREGISTER);
464 free_cpumask_var(mask);
468 static size_t taskstats_packet_size(void)
472 size = nla_total_size(sizeof(u32)) +
473 nla_total_size_64bit(sizeof(struct taskstats)) +
479 static int cmd_attr_pid(struct genl_info *info)
481 struct taskstats *stats;
482 struct sk_buff *rep_skb;
487 size = taskstats_packet_size();
489 rc = prepare_reply(info, TASKSTATS_CMD_NEW, &rep_skb, size);
494 pid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_PID]);
495 stats = mk_reply(rep_skb, TASKSTATS_TYPE_PID, pid);
499 rc = fill_stats_for_pid(pid, stats);
502 return send_reply(rep_skb, info);
508 static int cmd_attr_tgid(struct genl_info *info)
510 struct taskstats *stats;
511 struct sk_buff *rep_skb;
516 size = taskstats_packet_size();
518 rc = prepare_reply(info, TASKSTATS_CMD_NEW, &rep_skb, size);
523 tgid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_TGID]);
524 stats = mk_reply(rep_skb, TASKSTATS_TYPE_TGID, tgid);
528 rc = fill_stats_for_tgid(tgid, stats);
531 return send_reply(rep_skb, info);
537 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
539 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
540 return cmd_attr_register_cpumask(info);
541 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
542 return cmd_attr_deregister_cpumask(info);
543 else if (info->attrs[TASKSTATS_CMD_ATTR_PID])
544 return cmd_attr_pid(info);
545 else if (info->attrs[TASKSTATS_CMD_ATTR_TGID])
546 return cmd_attr_tgid(info);
551 static struct taskstats *taskstats_tgid_alloc(struct task_struct *tsk)
553 struct signal_struct *sig = tsk->signal;
554 struct taskstats *stats_new, *stats;
556 /* Pairs with smp_store_release() below. */
557 stats = smp_load_acquire(&sig->stats);
558 if (stats || thread_group_empty(tsk))
561 /* No problem if kmem_cache_zalloc() fails */
562 stats_new = kmem_cache_zalloc(taskstats_cache, GFP_KERNEL);
564 spin_lock_irq(&tsk->sighand->siglock);
568 * Pairs with smp_store_release() above and order the
569 * kmem_cache_zalloc().
571 smp_store_release(&sig->stats, stats_new);
575 spin_unlock_irq(&tsk->sighand->siglock);
578 kmem_cache_free(taskstats_cache, stats_new);
583 /* Send pid data out on exit */
584 void taskstats_exit(struct task_struct *tsk, int group_dead)
587 struct listener_list *listeners;
588 struct taskstats *stats;
589 struct sk_buff *rep_skb;
593 if (!family_registered)
597 * Size includes space for nested attributes
599 size = taskstats_packet_size();
601 is_thread_group = !!taskstats_tgid_alloc(tsk);
602 if (is_thread_group) {
603 /* PID + STATS + TGID + STATS */
605 /* fill the tsk->signal->stats structure */
609 listeners = raw_cpu_ptr(&listener_array);
610 if (list_empty(&listeners->list))
613 rc = prepare_reply(NULL, TASKSTATS_CMD_NEW, &rep_skb, size);
617 stats = mk_reply(rep_skb, TASKSTATS_TYPE_PID,
618 task_pid_nr_ns(tsk, &init_pid_ns));
622 fill_stats(&init_user_ns, &init_pid_ns, tsk, stats);
625 * Doesn't matter if tsk is the leader or the last group member leaving
627 if (!is_thread_group || !group_dead)
630 stats = mk_reply(rep_skb, TASKSTATS_TYPE_TGID,
631 task_tgid_nr_ns(tsk, &init_pid_ns));
635 memcpy(stats, tsk->signal->stats, sizeof(*stats));
638 send_cpu_listeners(rep_skb, listeners);
644 static const struct genl_ops taskstats_ops[] = {
646 .cmd = TASKSTATS_CMD_GET,
647 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
648 .doit = taskstats_user_cmd,
649 .policy = taskstats_cmd_get_policy,
650 .maxattr = ARRAY_SIZE(taskstats_cmd_get_policy) - 1,
651 .flags = GENL_ADMIN_PERM,
654 .cmd = CGROUPSTATS_CMD_GET,
655 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
656 .doit = cgroupstats_user_cmd,
657 .policy = cgroupstats_cmd_get_policy,
658 .maxattr = ARRAY_SIZE(cgroupstats_cmd_get_policy) - 1,
662 static struct genl_family family __ro_after_init = {
663 .name = TASKSTATS_GENL_NAME,
664 .version = TASKSTATS_GENL_VERSION,
665 .module = THIS_MODULE,
666 .ops = taskstats_ops,
667 .n_ops = ARRAY_SIZE(taskstats_ops),
670 /* Needed early in initialization */
671 void __init taskstats_init_early(void)
675 taskstats_cache = KMEM_CACHE(taskstats, SLAB_PANIC);
676 for_each_possible_cpu(i) {
677 INIT_LIST_HEAD(&(per_cpu(listener_array, i).list));
678 init_rwsem(&(per_cpu(listener_array, i).sem));
682 static int __init taskstats_init(void)
686 rc = genl_register_family(&family);
690 family_registered = 1;
691 pr_info("registered taskstats version %d\n", TASKSTATS_GENL_VERSION);
696 * late initcall ensures initialization of statistics collection
697 * mechanisms precedes initialization of the taskstats interface
699 late_initcall(taskstats_init);