2 * taskstats.c - Export per-task statistics to userland
4 * Copyright (C) Shailabh Nagar, IBM Corp. 2006
5 * (C) Balbir Singh, IBM Corp. 2006
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
19 #include <linux/kernel.h>
20 #include <linux/taskstats_kern.h>
21 #include <linux/tsacct_kern.h>
22 #include <linux/delayacct.h>
23 #include <linux/cpumask.h>
24 #include <linux/percpu.h>
25 #include <linux/slab.h>
26 #include <linux/cgroupstats.h>
27 #include <linux/cgroup.h>
29 #include <linux/file.h>
30 #include <net/genetlink.h>
31 #include <asm/atomic.h>
34 * Maximum length of a cpumask that can be specified in
35 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
37 #define TASKSTATS_CPUMASK_MAXLEN (100+6*NR_CPUS)
39 static DEFINE_PER_CPU(__u32, taskstats_seqnum);
40 static int family_registered;
41 struct kmem_cache *taskstats_cache;
43 static struct genl_family family = {
44 .id = GENL_ID_GENERATE,
45 .name = TASKSTATS_GENL_NAME,
46 .version = TASKSTATS_GENL_VERSION,
47 .maxattr = TASKSTATS_CMD_ATTR_MAX,
50 static const struct nla_policy taskstats_cmd_get_policy[TASKSTATS_CMD_ATTR_MAX+1] = {
51 [TASKSTATS_CMD_ATTR_PID] = { .type = NLA_U32 },
52 [TASKSTATS_CMD_ATTR_TGID] = { .type = NLA_U32 },
53 [TASKSTATS_CMD_ATTR_REGISTER_CPUMASK] = { .type = NLA_STRING },
54 [TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK] = { .type = NLA_STRING },};
56 static const struct nla_policy cgroupstats_cmd_get_policy[CGROUPSTATS_CMD_ATTR_MAX+1] = {
57 [CGROUPSTATS_CMD_ATTR_FD] = { .type = NLA_U32 },
61 struct list_head list;
66 struct listener_list {
67 struct rw_semaphore sem;
68 struct list_head list;
70 static DEFINE_PER_CPU(struct listener_list, listener_array);
78 static int prepare_reply(struct genl_info *info, u8 cmd, struct sk_buff **skbp,
85 * If new attributes are added, please revisit this allocation
87 skb = genlmsg_new(size, GFP_KERNEL);
92 int seq = get_cpu_var(taskstats_seqnum)++;
93 put_cpu_var(taskstats_seqnum);
95 reply = genlmsg_put(skb, 0, seq, &family, 0, cmd);
97 reply = genlmsg_put_reply(skb, info, &family, 0, cmd);
108 * Send taskstats data in @skb to listener with nl_pid @pid
110 static int send_reply(struct sk_buff *skb, struct genl_info *info)
112 struct genlmsghdr *genlhdr = nlmsg_data(nlmsg_hdr(skb));
113 void *reply = genlmsg_data(genlhdr);
116 rc = genlmsg_end(skb, reply);
122 return genlmsg_reply(skb, info);
126 * Send taskstats data in @skb to listeners registered for @cpu's exit data
128 static void send_cpu_listeners(struct sk_buff *skb,
129 struct listener_list *listeners)
131 struct genlmsghdr *genlhdr = nlmsg_data(nlmsg_hdr(skb));
132 struct listener *s, *tmp;
133 struct sk_buff *skb_next, *skb_cur = skb;
134 void *reply = genlmsg_data(genlhdr);
135 int rc, delcount = 0;
137 rc = genlmsg_end(skb, reply);
144 down_read(&listeners->sem);
145 list_for_each_entry(s, &listeners->list, list) {
147 if (!list_is_last(&s->list, &listeners->list)) {
148 skb_next = skb_clone(skb_cur, GFP_KERNEL);
152 rc = genlmsg_unicast(&init_net, skb_cur, s->pid);
153 if (rc == -ECONNREFUSED) {
159 up_read(&listeners->sem);
167 /* Delete invalidated entries */
168 down_write(&listeners->sem);
169 list_for_each_entry_safe(s, tmp, &listeners->list, list) {
175 up_write(&listeners->sem);
178 static void fill_stats(struct task_struct *tsk, struct taskstats *stats)
180 memset(stats, 0, sizeof(*stats));
182 * Each accounting subsystem adds calls to its functions to
183 * fill in relevant parts of struct taskstsats as follows
185 * per-task-foo(stats, tsk);
188 delayacct_add_tsk(stats, tsk);
190 /* fill in basic acct fields */
191 stats->version = TASKSTATS_VERSION;
192 stats->nvcsw = tsk->nvcsw;
193 stats->nivcsw = tsk->nivcsw;
194 bacct_add_tsk(stats, tsk);
196 /* fill in extended acct fields */
197 xacct_add_tsk(stats, tsk);
200 static int fill_stats_for_pid(pid_t pid, struct taskstats *stats)
202 struct task_struct *tsk;
205 tsk = find_task_by_vpid(pid);
207 get_task_struct(tsk);
211 fill_stats(tsk, stats);
212 put_task_struct(tsk);
216 static int fill_stats_for_tgid(pid_t tgid, struct taskstats *stats)
218 struct task_struct *tsk, *first;
223 * Add additional stats from live tasks except zombie thread group
224 * leaders who are already counted with the dead tasks
227 first = find_task_by_vpid(tgid);
229 if (!first || !lock_task_sighand(first, &flags))
232 if (first->signal->stats)
233 memcpy(stats, first->signal->stats, sizeof(*stats));
235 memset(stats, 0, sizeof(*stats));
242 * Accounting subsystem can call its functions here to
243 * fill in relevant parts of struct taskstsats as follows
245 * per-task-foo(stats, tsk);
247 delayacct_add_tsk(stats, tsk);
249 stats->nvcsw += tsk->nvcsw;
250 stats->nivcsw += tsk->nivcsw;
251 } while_each_thread(first, tsk);
253 unlock_task_sighand(first, &flags);
258 stats->version = TASKSTATS_VERSION;
260 * Accounting subsystems can also add calls here to modify
261 * fields of taskstats.
266 static void fill_tgid_exit(struct task_struct *tsk)
270 spin_lock_irqsave(&tsk->sighand->siglock, flags);
271 if (!tsk->signal->stats)
275 * Each accounting subsystem calls its functions here to
276 * accumalate its per-task stats for tsk, into the per-tgid structure
278 * per-task-foo(tsk->signal->stats, tsk);
280 delayacct_add_tsk(tsk->signal->stats, tsk);
282 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
286 static int add_del_listener(pid_t pid, const struct cpumask *mask, int isadd)
288 struct listener_list *listeners;
289 struct listener *s, *tmp;
292 if (!cpumask_subset(mask, cpu_possible_mask))
295 if (isadd == REGISTER) {
296 for_each_cpu(cpu, mask) {
297 s = kmalloc_node(sizeof(struct listener), GFP_KERNEL,
302 INIT_LIST_HEAD(&s->list);
305 listeners = &per_cpu(listener_array, cpu);
306 down_write(&listeners->sem);
307 list_add(&s->list, &listeners->list);
308 up_write(&listeners->sem);
313 /* Deregister or cleanup */
315 for_each_cpu(cpu, mask) {
316 listeners = &per_cpu(listener_array, cpu);
317 down_write(&listeners->sem);
318 list_for_each_entry_safe(s, tmp, &listeners->list, list) {
325 up_write(&listeners->sem);
330 static int parse(struct nlattr *na, struct cpumask *mask)
339 if (len > TASKSTATS_CPUMASK_MAXLEN)
343 data = kmalloc(len, GFP_KERNEL);
346 nla_strlcpy(data, na, len);
347 ret = cpulist_parse(data, mask);
353 #define TASKSTATS_NEEDS_PADDING 1
356 static struct taskstats *mk_reply(struct sk_buff *skb, int type, u32 pid)
358 struct nlattr *na, *ret;
361 aggr = (type == TASKSTATS_TYPE_PID)
362 ? TASKSTATS_TYPE_AGGR_PID
363 : TASKSTATS_TYPE_AGGR_TGID;
366 * The taskstats structure is internally aligned on 8 byte
367 * boundaries but the layout of the aggregrate reply, with
368 * two NLA headers and the pid (each 4 bytes), actually
369 * force the entire structure to be unaligned. This causes
370 * the kernel to issue unaligned access warnings on some
371 * architectures like ia64. Unfortunately, some software out there
372 * doesn't properly unroll the NLA packet and assumes that the start
373 * of the taskstats structure will always be 20 bytes from the start
374 * of the netlink payload. Aligning the start of the taskstats
375 * structure breaks this software, which we don't want. So, for now
376 * the alignment only happens on architectures that require it
377 * and those users will have to update to fixed versions of those
378 * packages. Space is reserved in the packet only when needed.
379 * This ifdef should be removed in several years e.g. 2012 once
380 * we can be confident that fixed versions are installed on most
381 * systems. We add the padding before the aggregate since the
382 * aggregate is already a defined type.
384 #ifdef TASKSTATS_NEEDS_PADDING
385 if (nla_put(skb, TASKSTATS_TYPE_NULL, 0, NULL) < 0)
388 na = nla_nest_start(skb, aggr);
392 if (nla_put(skb, type, sizeof(pid), &pid) < 0)
394 ret = nla_reserve(skb, TASKSTATS_TYPE_STATS, sizeof(struct taskstats));
397 nla_nest_end(skb, na);
399 return nla_data(ret);
404 static int cgroupstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
407 struct sk_buff *rep_skb;
408 struct cgroupstats *stats;
415 na = info->attrs[CGROUPSTATS_CMD_ATTR_FD];
419 fd = nla_get_u32(info->attrs[CGROUPSTATS_CMD_ATTR_FD]);
420 file = fget_light(fd, &fput_needed);
424 size = nla_total_size(sizeof(struct cgroupstats));
426 rc = prepare_reply(info, CGROUPSTATS_CMD_NEW, &rep_skb,
431 na = nla_reserve(rep_skb, CGROUPSTATS_TYPE_CGROUP_STATS,
432 sizeof(struct cgroupstats));
433 stats = nla_data(na);
434 memset(stats, 0, sizeof(*stats));
436 rc = cgroupstats_build(stats, file->f_dentry);
442 rc = send_reply(rep_skb, info);
445 fput_light(file, fput_needed);
449 static int cmd_attr_register_cpumask(struct genl_info *info)
454 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
456 rc = parse(info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK], mask);
459 rc = add_del_listener(info->snd_pid, mask, REGISTER);
461 free_cpumask_var(mask);
465 static int cmd_attr_deregister_cpumask(struct genl_info *info)
470 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
472 rc = parse(info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK], mask);
475 rc = add_del_listener(info->snd_pid, mask, DEREGISTER);
477 free_cpumask_var(mask);
481 static size_t taskstats_packet_size(void)
485 size = nla_total_size(sizeof(u32)) +
486 nla_total_size(sizeof(struct taskstats)) + nla_total_size(0);
487 #ifdef TASKSTATS_NEEDS_PADDING
488 size += nla_total_size(0); /* Padding for alignment */
493 static int cmd_attr_pid(struct genl_info *info)
495 struct taskstats *stats;
496 struct sk_buff *rep_skb;
501 size = taskstats_packet_size();
503 rc = prepare_reply(info, TASKSTATS_CMD_NEW, &rep_skb, size);
508 pid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_PID]);
509 stats = mk_reply(rep_skb, TASKSTATS_TYPE_PID, pid);
513 rc = fill_stats_for_pid(pid, stats);
516 return send_reply(rep_skb, info);
522 static int cmd_attr_tgid(struct genl_info *info)
524 struct taskstats *stats;
525 struct sk_buff *rep_skb;
530 size = taskstats_packet_size();
532 rc = prepare_reply(info, TASKSTATS_CMD_NEW, &rep_skb, size);
537 tgid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_TGID]);
538 stats = mk_reply(rep_skb, TASKSTATS_TYPE_TGID, tgid);
542 rc = fill_stats_for_tgid(tgid, stats);
545 return send_reply(rep_skb, info);
551 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
553 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
554 return cmd_attr_register_cpumask(info);
555 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
556 return cmd_attr_deregister_cpumask(info);
557 else if (info->attrs[TASKSTATS_CMD_ATTR_PID])
558 return cmd_attr_pid(info);
559 else if (info->attrs[TASKSTATS_CMD_ATTR_TGID])
560 return cmd_attr_tgid(info);
565 static struct taskstats *taskstats_tgid_alloc(struct task_struct *tsk)
567 struct signal_struct *sig = tsk->signal;
568 struct taskstats *stats;
570 if (sig->stats || thread_group_empty(tsk))
573 /* No problem if kmem_cache_zalloc() fails */
574 stats = kmem_cache_zalloc(taskstats_cache, GFP_KERNEL);
576 spin_lock_irq(&tsk->sighand->siglock);
581 spin_unlock_irq(&tsk->sighand->siglock);
584 kmem_cache_free(taskstats_cache, stats);
589 /* Send pid data out on exit */
590 void taskstats_exit(struct task_struct *tsk, int group_dead)
593 struct listener_list *listeners;
594 struct taskstats *stats;
595 struct sk_buff *rep_skb;
599 if (!family_registered)
603 * Size includes space for nested attributes
605 size = taskstats_packet_size();
607 is_thread_group = !!taskstats_tgid_alloc(tsk);
608 if (is_thread_group) {
609 /* PID + STATS + TGID + STATS */
611 /* fill the tsk->signal->stats structure */
615 listeners = &__raw_get_cpu_var(listener_array);
616 if (list_empty(&listeners->list))
619 rc = prepare_reply(NULL, TASKSTATS_CMD_NEW, &rep_skb, size);
623 stats = mk_reply(rep_skb, TASKSTATS_TYPE_PID, tsk->pid);
627 fill_stats(tsk, stats);
630 * Doesn't matter if tsk is the leader or the last group member leaving
632 if (!is_thread_group || !group_dead)
635 stats = mk_reply(rep_skb, TASKSTATS_TYPE_TGID, tsk->tgid);
639 memcpy(stats, tsk->signal->stats, sizeof(*stats));
642 send_cpu_listeners(rep_skb, listeners);
648 static struct genl_ops taskstats_ops = {
649 .cmd = TASKSTATS_CMD_GET,
650 .doit = taskstats_user_cmd,
651 .policy = taskstats_cmd_get_policy,
654 static struct genl_ops cgroupstats_ops = {
655 .cmd = CGROUPSTATS_CMD_GET,
656 .doit = cgroupstats_user_cmd,
657 .policy = cgroupstats_cmd_get_policy,
660 /* Needed early in initialization */
661 void __init taskstats_init_early(void)
665 taskstats_cache = KMEM_CACHE(taskstats, SLAB_PANIC);
666 for_each_possible_cpu(i) {
667 INIT_LIST_HEAD(&(per_cpu(listener_array, i).list));
668 init_rwsem(&(per_cpu(listener_array, i).sem));
672 static int __init taskstats_init(void)
676 rc = genl_register_family(&family);
680 rc = genl_register_ops(&family, &taskstats_ops);
684 rc = genl_register_ops(&family, &cgroupstats_ops);
688 family_registered = 1;
689 printk("registered taskstats version %d\n", TASKSTATS_GENL_VERSION);
692 genl_unregister_ops(&family, &taskstats_ops);
694 genl_unregister_family(&family);
699 * late initcall ensures initialization of statistics collection
700 * mechanisms precedes initialization of the taskstats interface
702 late_initcall(taskstats_init);