2 * taskstats.c - Export per-task statistics to userland
4 * Copyright (C) Shailabh Nagar, IBM Corp. 2006
5 * (C) Balbir Singh, IBM Corp. 2006
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
19 #include <linux/kernel.h>
20 #include <linux/taskstats_kern.h>
21 #include <linux/tsacct_kern.h>
22 #include <linux/delayacct.h>
23 #include <linux/cpumask.h>
24 #include <linux/percpu.h>
25 #include <linux/slab.h>
26 #include <linux/cgroupstats.h>
27 #include <linux/cgroup.h>
29 #include <linux/file.h>
30 #include <net/genetlink.h>
31 #include <asm/atomic.h>
34 * Maximum length of a cpumask that can be specified in
35 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
37 #define TASKSTATS_CPUMASK_MAXLEN (100+6*NR_CPUS)
39 static DEFINE_PER_CPU(__u32, taskstats_seqnum);
40 static int family_registered;
41 struct kmem_cache *taskstats_cache;
43 static struct genl_family family = {
44 .id = GENL_ID_GENERATE,
45 .name = TASKSTATS_GENL_NAME,
46 .version = TASKSTATS_GENL_VERSION,
47 .maxattr = TASKSTATS_CMD_ATTR_MAX,
50 static const struct nla_policy taskstats_cmd_get_policy[TASKSTATS_CMD_ATTR_MAX+1] = {
51 [TASKSTATS_CMD_ATTR_PID] = { .type = NLA_U32 },
52 [TASKSTATS_CMD_ATTR_TGID] = { .type = NLA_U32 },
53 [TASKSTATS_CMD_ATTR_REGISTER_CPUMASK] = { .type = NLA_STRING },
54 [TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK] = { .type = NLA_STRING },};
56 static const struct nla_policy cgroupstats_cmd_get_policy[CGROUPSTATS_CMD_ATTR_MAX+1] = {
57 [CGROUPSTATS_CMD_ATTR_FD] = { .type = NLA_U32 },
61 struct list_head list;
66 struct listener_list {
67 struct rw_semaphore sem;
68 struct list_head list;
70 static DEFINE_PER_CPU(struct listener_list, listener_array);
78 static int prepare_reply(struct genl_info *info, u8 cmd, struct sk_buff **skbp,
85 * If new attributes are added, please revisit this allocation
87 skb = genlmsg_new(size, GFP_KERNEL);
92 int seq = get_cpu_var(taskstats_seqnum)++;
93 put_cpu_var(taskstats_seqnum);
95 reply = genlmsg_put(skb, 0, seq, &family, 0, cmd);
97 reply = genlmsg_put_reply(skb, info, &family, 0, cmd);
108 * Send taskstats data in @skb to listener with nl_pid @pid
110 static int send_reply(struct sk_buff *skb, struct genl_info *info)
112 struct genlmsghdr *genlhdr = nlmsg_data(nlmsg_hdr(skb));
113 void *reply = genlmsg_data(genlhdr);
116 rc = genlmsg_end(skb, reply);
122 return genlmsg_reply(skb, info);
126 * Send taskstats data in @skb to listeners registered for @cpu's exit data
128 static void send_cpu_listeners(struct sk_buff *skb,
129 struct listener_list *listeners)
131 struct genlmsghdr *genlhdr = nlmsg_data(nlmsg_hdr(skb));
132 struct listener *s, *tmp;
133 struct sk_buff *skb_next, *skb_cur = skb;
134 void *reply = genlmsg_data(genlhdr);
135 int rc, delcount = 0;
137 rc = genlmsg_end(skb, reply);
144 down_read(&listeners->sem);
145 list_for_each_entry(s, &listeners->list, list) {
147 if (!list_is_last(&s->list, &listeners->list)) {
148 skb_next = skb_clone(skb_cur, GFP_KERNEL);
152 rc = genlmsg_unicast(&init_net, skb_cur, s->pid);
153 if (rc == -ECONNREFUSED) {
159 up_read(&listeners->sem);
167 /* Delete invalidated entries */
168 down_write(&listeners->sem);
169 list_for_each_entry_safe(s, tmp, &listeners->list, list) {
175 up_write(&listeners->sem);
178 static int fill_pid(pid_t pid, struct task_struct *tsk,
179 struct taskstats *stats)
185 tsk = find_task_by_vpid(pid);
187 get_task_struct(tsk);
192 get_task_struct(tsk);
194 memset(stats, 0, sizeof(*stats));
196 * Each accounting subsystem adds calls to its functions to
197 * fill in relevant parts of struct taskstsats as follows
199 * per-task-foo(stats, tsk);
202 delayacct_add_tsk(stats, tsk);
204 /* fill in basic acct fields */
205 stats->version = TASKSTATS_VERSION;
206 stats->nvcsw = tsk->nvcsw;
207 stats->nivcsw = tsk->nivcsw;
208 bacct_add_tsk(stats, tsk);
210 /* fill in extended acct fields */
211 xacct_add_tsk(stats, tsk);
213 /* Define err: label here if needed */
214 put_task_struct(tsk);
219 static int fill_tgid(pid_t tgid, struct task_struct *first,
220 struct taskstats *stats)
222 struct task_struct *tsk;
227 * Add additional stats from live tasks except zombie thread group
228 * leaders who are already counted with the dead tasks
232 first = find_task_by_vpid(tgid);
234 if (!first || !lock_task_sighand(first, &flags))
237 if (first->signal->stats)
238 memcpy(stats, first->signal->stats, sizeof(*stats));
240 memset(stats, 0, sizeof(*stats));
247 * Accounting subsystem can call its functions here to
248 * fill in relevant parts of struct taskstsats as follows
250 * per-task-foo(stats, tsk);
252 delayacct_add_tsk(stats, tsk);
254 stats->nvcsw += tsk->nvcsw;
255 stats->nivcsw += tsk->nivcsw;
256 } while_each_thread(first, tsk);
258 unlock_task_sighand(first, &flags);
263 stats->version = TASKSTATS_VERSION;
265 * Accounting subsystems can also add calls here to modify
266 * fields of taskstats.
272 static void fill_tgid_exit(struct task_struct *tsk)
276 spin_lock_irqsave(&tsk->sighand->siglock, flags);
277 if (!tsk->signal->stats)
281 * Each accounting subsystem calls its functions here to
282 * accumalate its per-task stats for tsk, into the per-tgid structure
284 * per-task-foo(tsk->signal->stats, tsk);
286 delayacct_add_tsk(tsk->signal->stats, tsk);
288 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
292 static int add_del_listener(pid_t pid, const struct cpumask *mask, int isadd)
294 struct listener_list *listeners;
295 struct listener *s, *tmp;
298 if (!cpumask_subset(mask, cpu_possible_mask))
301 if (isadd == REGISTER) {
302 for_each_cpu(cpu, mask) {
303 s = kmalloc_node(sizeof(struct listener), GFP_KERNEL,
308 INIT_LIST_HEAD(&s->list);
311 listeners = &per_cpu(listener_array, cpu);
312 down_write(&listeners->sem);
313 list_add(&s->list, &listeners->list);
314 up_write(&listeners->sem);
319 /* Deregister or cleanup */
321 for_each_cpu(cpu, mask) {
322 listeners = &per_cpu(listener_array, cpu);
323 down_write(&listeners->sem);
324 list_for_each_entry_safe(s, tmp, &listeners->list, list) {
331 up_write(&listeners->sem);
336 static int parse(struct nlattr *na, struct cpumask *mask)
345 if (len > TASKSTATS_CPUMASK_MAXLEN)
349 data = kmalloc(len, GFP_KERNEL);
352 nla_strlcpy(data, na, len);
353 ret = cpulist_parse(data, mask);
358 static struct taskstats *mk_reply(struct sk_buff *skb, int type, u32 pid)
360 struct nlattr *na, *ret;
363 aggr = (type == TASKSTATS_TYPE_PID)
364 ? TASKSTATS_TYPE_AGGR_PID
365 : TASKSTATS_TYPE_AGGR_TGID;
367 na = nla_nest_start(skb, aggr);
370 if (nla_put(skb, type, sizeof(pid), &pid) < 0)
372 ret = nla_reserve(skb, TASKSTATS_TYPE_STATS, sizeof(struct taskstats));
375 nla_nest_end(skb, na);
377 return nla_data(ret);
382 static int cgroupstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
385 struct sk_buff *rep_skb;
386 struct cgroupstats *stats;
393 na = info->attrs[CGROUPSTATS_CMD_ATTR_FD];
397 fd = nla_get_u32(info->attrs[CGROUPSTATS_CMD_ATTR_FD]);
398 file = fget_light(fd, &fput_needed);
402 size = nla_total_size(sizeof(struct cgroupstats));
404 rc = prepare_reply(info, CGROUPSTATS_CMD_NEW, &rep_skb,
409 na = nla_reserve(rep_skb, CGROUPSTATS_TYPE_CGROUP_STATS,
410 sizeof(struct cgroupstats));
411 stats = nla_data(na);
412 memset(stats, 0, sizeof(*stats));
414 rc = cgroupstats_build(stats, file->f_dentry);
420 rc = send_reply(rep_skb, info);
423 fput_light(file, fput_needed);
427 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
430 struct sk_buff *rep_skb;
431 struct taskstats *stats;
435 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
438 rc = parse(info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK], mask);
442 rc = add_del_listener(info->snd_pid, mask, REGISTER);
446 rc = parse(info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK], mask);
450 rc = add_del_listener(info->snd_pid, mask, DEREGISTER);
452 free_cpumask_var(mask);
455 free_cpumask_var(mask);
458 * Size includes space for nested attributes
460 size = nla_total_size(sizeof(u32)) +
461 nla_total_size(sizeof(struct taskstats)) + nla_total_size(0);
463 rc = prepare_reply(info, TASKSTATS_CMD_NEW, &rep_skb, size);
468 if (info->attrs[TASKSTATS_CMD_ATTR_PID]) {
469 u32 pid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_PID]);
470 stats = mk_reply(rep_skb, TASKSTATS_TYPE_PID, pid);
474 rc = fill_pid(pid, NULL, stats);
477 } else if (info->attrs[TASKSTATS_CMD_ATTR_TGID]) {
478 u32 tgid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_TGID]);
479 stats = mk_reply(rep_skb, TASKSTATS_TYPE_TGID, tgid);
483 rc = fill_tgid(tgid, NULL, stats);
489 return send_reply(rep_skb, info);
495 static struct taskstats *taskstats_tgid_alloc(struct task_struct *tsk)
497 struct signal_struct *sig = tsk->signal;
498 struct taskstats *stats;
500 if (sig->stats || thread_group_empty(tsk))
503 /* No problem if kmem_cache_zalloc() fails */
504 stats = kmem_cache_zalloc(taskstats_cache, GFP_KERNEL);
506 spin_lock_irq(&tsk->sighand->siglock);
511 spin_unlock_irq(&tsk->sighand->siglock);
514 kmem_cache_free(taskstats_cache, stats);
519 /* Send pid data out on exit */
520 void taskstats_exit(struct task_struct *tsk, int group_dead)
523 struct listener_list *listeners;
524 struct taskstats *stats;
525 struct sk_buff *rep_skb;
529 if (!family_registered)
533 * Size includes space for nested attributes
535 size = nla_total_size(sizeof(u32)) +
536 nla_total_size(sizeof(struct taskstats)) + nla_total_size(0);
538 is_thread_group = !!taskstats_tgid_alloc(tsk);
539 if (is_thread_group) {
540 /* PID + STATS + TGID + STATS */
542 /* fill the tsk->signal->stats structure */
546 listeners = &__raw_get_cpu_var(listener_array);
547 if (list_empty(&listeners->list))
550 rc = prepare_reply(NULL, TASKSTATS_CMD_NEW, &rep_skb, size);
554 stats = mk_reply(rep_skb, TASKSTATS_TYPE_PID, tsk->pid);
558 rc = fill_pid(-1, tsk, stats);
563 * Doesn't matter if tsk is the leader or the last group member leaving
565 if (!is_thread_group || !group_dead)
568 stats = mk_reply(rep_skb, TASKSTATS_TYPE_TGID, tsk->tgid);
572 memcpy(stats, tsk->signal->stats, sizeof(*stats));
575 send_cpu_listeners(rep_skb, listeners);
581 static struct genl_ops taskstats_ops = {
582 .cmd = TASKSTATS_CMD_GET,
583 .doit = taskstats_user_cmd,
584 .policy = taskstats_cmd_get_policy,
587 static struct genl_ops cgroupstats_ops = {
588 .cmd = CGROUPSTATS_CMD_GET,
589 .doit = cgroupstats_user_cmd,
590 .policy = cgroupstats_cmd_get_policy,
593 /* Needed early in initialization */
594 void __init taskstats_init_early(void)
598 taskstats_cache = KMEM_CACHE(taskstats, SLAB_PANIC);
599 for_each_possible_cpu(i) {
600 INIT_LIST_HEAD(&(per_cpu(listener_array, i).list));
601 init_rwsem(&(per_cpu(listener_array, i).sem));
605 static int __init taskstats_init(void)
609 rc = genl_register_family(&family);
613 rc = genl_register_ops(&family, &taskstats_ops);
617 rc = genl_register_ops(&family, &cgroupstats_ops);
621 family_registered = 1;
622 printk("registered taskstats version %d\n", TASKSTATS_GENL_VERSION);
625 genl_unregister_ops(&family, &taskstats_ops);
627 genl_unregister_family(&family);
632 * late initcall ensures initialization of statistics collection
633 * mechanisms precedes initialization of the taskstats interface
635 late_initcall(taskstats_init);