2 * taskstats.c - Export per-task statistics to userland
4 * Copyright (C) Shailabh Nagar, IBM Corp. 2006
5 * (C) Balbir Singh, IBM Corp. 2006
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
19 #include <linux/kernel.h>
20 #include <linux/taskstats_kern.h>
21 #include <linux/tsacct_kern.h>
22 #include <linux/delayacct.h>
23 #include <linux/cpumask.h>
24 #include <linux/percpu.h>
25 #include <linux/slab.h>
26 #include <linux/cgroupstats.h>
27 #include <linux/cgroup.h>
29 #include <linux/file.h>
30 #include <linux/pid_namespace.h>
31 #include <net/genetlink.h>
32 #include <linux/atomic.h>
35 * Maximum length of a cpumask that can be specified in
36 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
38 #define TASKSTATS_CPUMASK_MAXLEN (100+6*NR_CPUS)
40 static DEFINE_PER_CPU(__u32, taskstats_seqnum);
41 static int family_registered;
42 struct kmem_cache *taskstats_cache;
44 static struct genl_family family = {
45 .id = GENL_ID_GENERATE,
46 .name = TASKSTATS_GENL_NAME,
47 .version = TASKSTATS_GENL_VERSION,
48 .maxattr = TASKSTATS_CMD_ATTR_MAX,
51 static const struct nla_policy taskstats_cmd_get_policy[TASKSTATS_CMD_ATTR_MAX+1] = {
52 [TASKSTATS_CMD_ATTR_PID] = { .type = NLA_U32 },
53 [TASKSTATS_CMD_ATTR_TGID] = { .type = NLA_U32 },
54 [TASKSTATS_CMD_ATTR_REGISTER_CPUMASK] = { .type = NLA_STRING },
55 [TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK] = { .type = NLA_STRING },};
58 * We have to use TASKSTATS_CMD_ATTR_MAX here, it is the maxattr in the family.
59 * Make sure they are always aligned.
61 static const struct nla_policy cgroupstats_cmd_get_policy[TASKSTATS_CMD_ATTR_MAX+1] = {
62 [CGROUPSTATS_CMD_ATTR_FD] = { .type = NLA_U32 },
66 struct list_head list;
71 struct listener_list {
72 struct rw_semaphore sem;
73 struct list_head list;
75 static DEFINE_PER_CPU(struct listener_list, listener_array);
83 static int prepare_reply(struct genl_info *info, u8 cmd, struct sk_buff **skbp,
90 * If new attributes are added, please revisit this allocation
92 skb = genlmsg_new(size, GFP_KERNEL);
97 int seq = this_cpu_inc_return(taskstats_seqnum) - 1;
99 reply = genlmsg_put(skb, 0, seq, &family, 0, cmd);
101 reply = genlmsg_put_reply(skb, info, &family, 0, cmd);
112 * Send taskstats data in @skb to listener with nl_pid @pid
114 static int send_reply(struct sk_buff *skb, struct genl_info *info)
116 struct genlmsghdr *genlhdr = nlmsg_data(nlmsg_hdr(skb));
117 void *reply = genlmsg_data(genlhdr);
119 genlmsg_end(skb, reply);
121 return genlmsg_reply(skb, info);
125 * Send taskstats data in @skb to listeners registered for @cpu's exit data
127 static void send_cpu_listeners(struct sk_buff *skb,
128 struct listener_list *listeners)
130 struct genlmsghdr *genlhdr = nlmsg_data(nlmsg_hdr(skb));
131 struct listener *s, *tmp;
132 struct sk_buff *skb_next, *skb_cur = skb;
133 void *reply = genlmsg_data(genlhdr);
134 int rc, delcount = 0;
136 genlmsg_end(skb, reply);
139 down_read(&listeners->sem);
140 list_for_each_entry(s, &listeners->list, list) {
142 if (!list_is_last(&s->list, &listeners->list)) {
143 skb_next = skb_clone(skb_cur, GFP_KERNEL);
147 rc = genlmsg_unicast(&init_net, skb_cur, s->pid);
148 if (rc == -ECONNREFUSED) {
154 up_read(&listeners->sem);
162 /* Delete invalidated entries */
163 down_write(&listeners->sem);
164 list_for_each_entry_safe(s, tmp, &listeners->list, list) {
170 up_write(&listeners->sem);
173 static void fill_stats(struct user_namespace *user_ns,
174 struct pid_namespace *pid_ns,
175 struct task_struct *tsk, struct taskstats *stats)
177 memset(stats, 0, sizeof(*stats));
179 * Each accounting subsystem adds calls to its functions to
180 * fill in relevant parts of struct taskstsats as follows
182 * per-task-foo(stats, tsk);
185 delayacct_add_tsk(stats, tsk);
187 /* fill in basic acct fields */
188 stats->version = TASKSTATS_VERSION;
189 stats->nvcsw = tsk->nvcsw;
190 stats->nivcsw = tsk->nivcsw;
191 bacct_add_tsk(user_ns, pid_ns, stats, tsk);
193 /* fill in extended acct fields */
194 xacct_add_tsk(stats, tsk);
197 static int fill_stats_for_pid(pid_t pid, struct taskstats *stats)
199 struct task_struct *tsk;
202 tsk = find_task_by_vpid(pid);
204 get_task_struct(tsk);
208 fill_stats(current_user_ns(), task_active_pid_ns(current), tsk, stats);
209 put_task_struct(tsk);
213 static int fill_stats_for_tgid(pid_t tgid, struct taskstats *stats)
215 struct task_struct *tsk, *first;
220 * Add additional stats from live tasks except zombie thread group
221 * leaders who are already counted with the dead tasks
224 first = find_task_by_vpid(tgid);
226 if (!first || !lock_task_sighand(first, &flags))
229 if (first->signal->stats)
230 memcpy(stats, first->signal->stats, sizeof(*stats));
232 memset(stats, 0, sizeof(*stats));
239 * Accounting subsystem can call its functions here to
240 * fill in relevant parts of struct taskstsats as follows
242 * per-task-foo(stats, tsk);
244 delayacct_add_tsk(stats, tsk);
246 stats->nvcsw += tsk->nvcsw;
247 stats->nivcsw += tsk->nivcsw;
248 } while_each_thread(first, tsk);
250 unlock_task_sighand(first, &flags);
255 stats->version = TASKSTATS_VERSION;
257 * Accounting subsystems can also add calls here to modify
258 * fields of taskstats.
263 static void fill_tgid_exit(struct task_struct *tsk)
267 spin_lock_irqsave(&tsk->sighand->siglock, flags);
268 if (!tsk->signal->stats)
272 * Each accounting subsystem calls its functions here to
273 * accumalate its per-task stats for tsk, into the per-tgid structure
275 * per-task-foo(tsk->signal->stats, tsk);
277 delayacct_add_tsk(tsk->signal->stats, tsk);
279 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
283 static int add_del_listener(pid_t pid, const struct cpumask *mask, int isadd)
285 struct listener_list *listeners;
286 struct listener *s, *tmp, *s2;
290 if (!cpumask_subset(mask, cpu_possible_mask))
293 if (current_user_ns() != &init_user_ns)
296 if (task_active_pid_ns(current) != &init_pid_ns)
299 if (isadd == REGISTER) {
300 for_each_cpu(cpu, mask) {
301 s = kmalloc_node(sizeof(struct listener),
302 GFP_KERNEL, cpu_to_node(cpu));
310 listeners = &per_cpu(listener_array, cpu);
311 down_write(&listeners->sem);
312 list_for_each_entry(s2, &listeners->list, list) {
313 if (s2->pid == pid && s2->valid)
316 list_add(&s->list, &listeners->list);
319 up_write(&listeners->sem);
320 kfree(s); /* nop if NULL */
325 /* Deregister or cleanup */
327 for_each_cpu(cpu, mask) {
328 listeners = &per_cpu(listener_array, cpu);
329 down_write(&listeners->sem);
330 list_for_each_entry_safe(s, tmp, &listeners->list, list) {
337 up_write(&listeners->sem);
342 static int parse(struct nlattr *na, struct cpumask *mask)
351 if (len > TASKSTATS_CPUMASK_MAXLEN)
355 data = kmalloc(len, GFP_KERNEL);
358 nla_strlcpy(data, na, len);
359 ret = cpulist_parse(data, mask);
364 #if defined(CONFIG_64BIT) && !defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
365 #define TASKSTATS_NEEDS_PADDING 1
368 static struct taskstats *mk_reply(struct sk_buff *skb, int type, u32 pid)
370 struct nlattr *na, *ret;
373 aggr = (type == TASKSTATS_TYPE_PID)
374 ? TASKSTATS_TYPE_AGGR_PID
375 : TASKSTATS_TYPE_AGGR_TGID;
378 * The taskstats structure is internally aligned on 8 byte
379 * boundaries but the layout of the aggregrate reply, with
380 * two NLA headers and the pid (each 4 bytes), actually
381 * force the entire structure to be unaligned. This causes
382 * the kernel to issue unaligned access warnings on some
383 * architectures like ia64. Unfortunately, some software out there
384 * doesn't properly unroll the NLA packet and assumes that the start
385 * of the taskstats structure will always be 20 bytes from the start
386 * of the netlink payload. Aligning the start of the taskstats
387 * structure breaks this software, which we don't want. So, for now
388 * the alignment only happens on architectures that require it
389 * and those users will have to update to fixed versions of those
390 * packages. Space is reserved in the packet only when needed.
391 * This ifdef should be removed in several years e.g. 2012 once
392 * we can be confident that fixed versions are installed on most
393 * systems. We add the padding before the aggregate since the
394 * aggregate is already a defined type.
396 #ifdef TASKSTATS_NEEDS_PADDING
397 if (nla_put(skb, TASKSTATS_TYPE_NULL, 0, NULL) < 0)
400 na = nla_nest_start(skb, aggr);
404 if (nla_put(skb, type, sizeof(pid), &pid) < 0) {
405 nla_nest_cancel(skb, na);
408 ret = nla_reserve(skb, TASKSTATS_TYPE_STATS, sizeof(struct taskstats));
410 nla_nest_cancel(skb, na);
413 nla_nest_end(skb, na);
415 return nla_data(ret);
420 static int cgroupstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
423 struct sk_buff *rep_skb;
424 struct cgroupstats *stats;
430 na = info->attrs[CGROUPSTATS_CMD_ATTR_FD];
434 fd = nla_get_u32(info->attrs[CGROUPSTATS_CMD_ATTR_FD]);
439 size = nla_total_size(sizeof(struct cgroupstats));
441 rc = prepare_reply(info, CGROUPSTATS_CMD_NEW, &rep_skb,
446 na = nla_reserve(rep_skb, CGROUPSTATS_TYPE_CGROUP_STATS,
447 sizeof(struct cgroupstats));
454 stats = nla_data(na);
455 memset(stats, 0, sizeof(*stats));
457 rc = cgroupstats_build(stats, f.file->f_path.dentry);
463 rc = send_reply(rep_skb, info);
470 static int cmd_attr_register_cpumask(struct genl_info *info)
475 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
477 rc = parse(info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK], mask);
480 rc = add_del_listener(info->snd_portid, mask, REGISTER);
482 free_cpumask_var(mask);
486 static int cmd_attr_deregister_cpumask(struct genl_info *info)
491 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
493 rc = parse(info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK], mask);
496 rc = add_del_listener(info->snd_portid, mask, DEREGISTER);
498 free_cpumask_var(mask);
502 static size_t taskstats_packet_size(void)
506 size = nla_total_size(sizeof(u32)) +
507 nla_total_size(sizeof(struct taskstats)) + nla_total_size(0);
508 #ifdef TASKSTATS_NEEDS_PADDING
509 size += nla_total_size(0); /* Padding for alignment */
514 static int cmd_attr_pid(struct genl_info *info)
516 struct taskstats *stats;
517 struct sk_buff *rep_skb;
522 size = taskstats_packet_size();
524 rc = prepare_reply(info, TASKSTATS_CMD_NEW, &rep_skb, size);
529 pid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_PID]);
530 stats = mk_reply(rep_skb, TASKSTATS_TYPE_PID, pid);
534 rc = fill_stats_for_pid(pid, stats);
537 return send_reply(rep_skb, info);
543 static int cmd_attr_tgid(struct genl_info *info)
545 struct taskstats *stats;
546 struct sk_buff *rep_skb;
551 size = taskstats_packet_size();
553 rc = prepare_reply(info, TASKSTATS_CMD_NEW, &rep_skb, size);
558 tgid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_TGID]);
559 stats = mk_reply(rep_skb, TASKSTATS_TYPE_TGID, tgid);
563 rc = fill_stats_for_tgid(tgid, stats);
566 return send_reply(rep_skb, info);
572 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
574 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
575 return cmd_attr_register_cpumask(info);
576 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
577 return cmd_attr_deregister_cpumask(info);
578 else if (info->attrs[TASKSTATS_CMD_ATTR_PID])
579 return cmd_attr_pid(info);
580 else if (info->attrs[TASKSTATS_CMD_ATTR_TGID])
581 return cmd_attr_tgid(info);
586 static struct taskstats *taskstats_tgid_alloc(struct task_struct *tsk)
588 struct signal_struct *sig = tsk->signal;
589 struct taskstats *stats_new, *stats;
591 /* Pairs with smp_store_release() below. */
592 stats = smp_load_acquire(&sig->stats);
593 if (stats || thread_group_empty(tsk))
596 /* No problem if kmem_cache_zalloc() fails */
597 stats_new = kmem_cache_zalloc(taskstats_cache, GFP_KERNEL);
599 spin_lock_irq(&tsk->sighand->siglock);
603 * Pairs with smp_store_release() above and order the
604 * kmem_cache_zalloc().
606 smp_store_release(&sig->stats, stats_new);
610 spin_unlock_irq(&tsk->sighand->siglock);
613 kmem_cache_free(taskstats_cache, stats_new);
618 /* Send pid data out on exit */
619 void taskstats_exit(struct task_struct *tsk, int group_dead)
622 struct listener_list *listeners;
623 struct taskstats *stats;
624 struct sk_buff *rep_skb;
628 if (!family_registered)
632 * Size includes space for nested attributes
634 size = taskstats_packet_size();
636 is_thread_group = !!taskstats_tgid_alloc(tsk);
637 if (is_thread_group) {
638 /* PID + STATS + TGID + STATS */
640 /* fill the tsk->signal->stats structure */
644 listeners = raw_cpu_ptr(&listener_array);
645 if (list_empty(&listeners->list))
648 rc = prepare_reply(NULL, TASKSTATS_CMD_NEW, &rep_skb, size);
652 stats = mk_reply(rep_skb, TASKSTATS_TYPE_PID,
653 task_pid_nr_ns(tsk, &init_pid_ns));
657 fill_stats(&init_user_ns, &init_pid_ns, tsk, stats);
660 * Doesn't matter if tsk is the leader or the last group member leaving
662 if (!is_thread_group || !group_dead)
665 stats = mk_reply(rep_skb, TASKSTATS_TYPE_TGID,
666 task_tgid_nr_ns(tsk, &init_pid_ns));
670 memcpy(stats, tsk->signal->stats, sizeof(*stats));
673 send_cpu_listeners(rep_skb, listeners);
679 static const struct genl_ops taskstats_ops[] = {
681 .cmd = TASKSTATS_CMD_GET,
682 .doit = taskstats_user_cmd,
683 .policy = taskstats_cmd_get_policy,
684 .flags = GENL_ADMIN_PERM,
687 .cmd = CGROUPSTATS_CMD_GET,
688 .doit = cgroupstats_user_cmd,
689 .policy = cgroupstats_cmd_get_policy,
693 /* Needed early in initialization */
694 void __init taskstats_init_early(void)
698 taskstats_cache = KMEM_CACHE(taskstats, SLAB_PANIC);
699 for_each_possible_cpu(i) {
700 INIT_LIST_HEAD(&(per_cpu(listener_array, i).list));
701 init_rwsem(&(per_cpu(listener_array, i).sem));
705 static int __init taskstats_init(void)
709 rc = genl_register_family_with_ops(&family, taskstats_ops);
713 family_registered = 1;
714 pr_info("registered taskstats version %d\n", TASKSTATS_GENL_VERSION);
719 * late initcall ensures initialization of statistics collection
720 * mechanisms precedes initialization of the taskstats interface
722 late_initcall(taskstats_init);