OSDN Git Service

4c12cb2cd7047d29b8577ab0e6deeabec353d226
[uclinux-h8/linux.git] / fs / proc / base.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  linux/fs/proc/base.c
4  *
5  *  Copyright (C) 1991, 1992 Linus Torvalds
6  *
7  *  proc base directory handling functions
8  *
9  *  1999, Al Viro. Rewritten. Now it covers the whole per-process part.
10  *  Instead of using magical inumbers to determine the kind of object
11  *  we allocate and fill in-core inodes upon lookup. They don't even
12  *  go into icache. We cache the reference to task_struct upon lookup too.
13  *  Eventually it should become a filesystem in its own. We don't use the
14  *  rest of procfs anymore.
15  *
16  *
17  *  Changelog:
18  *  17-Jan-2005
19  *  Allan Bezerra
20  *  Bruna Moreira <bruna.moreira@indt.org.br>
21  *  Edjard Mota <edjard.mota@indt.org.br>
22  *  Ilias Biris <ilias.biris@indt.org.br>
23  *  Mauricio Lin <mauricio.lin@indt.org.br>
24  *
25  *  Embedded Linux Lab - 10LE Instituto Nokia de Tecnologia - INdT
26  *
27  *  A new process specific entry (smaps) included in /proc. It shows the
28  *  size of rss for each memory area. The maps entry lacks information
29  *  about physical memory size (rss) for each mapped file, i.e.,
30  *  rss information for executables and library files.
31  *  This additional information is useful for any tools that need to know
32  *  about physical memory consumption for a process specific library.
33  *
34  *  Changelog:
35  *  21-Feb-2005
36  *  Embedded Linux Lab - 10LE Instituto Nokia de Tecnologia - INdT
37  *  Pud inclusion in the page table walking.
38  *
39  *  ChangeLog:
40  *  10-Mar-2005
41  *  10LE Instituto Nokia de Tecnologia - INdT:
42  *  A better way to walks through the page table as suggested by Hugh Dickins.
43  *
44  *  Simo Piiroinen <simo.piiroinen@nokia.com>:
45  *  Smaps information related to shared, private, clean and dirty pages.
46  *
47  *  Paul Mundt <paul.mundt@nokia.com>:
48  *  Overall revision about smaps.
49  */
50
51 #include <linux/uaccess.h>
52
53 #include <linux/errno.h>
54 #include <linux/time.h>
55 #include <linux/proc_fs.h>
56 #include <linux/stat.h>
57 #include <linux/task_io_accounting_ops.h>
58 #include <linux/init.h>
59 #include <linux/capability.h>
60 #include <linux/file.h>
61 #include <linux/fdtable.h>
62 #include <linux/string.h>
63 #include <linux/seq_file.h>
64 #include <linux/namei.h>
65 #include <linux/mnt_namespace.h>
66 #include <linux/mm.h>
67 #include <linux/swap.h>
68 #include <linux/rcupdate.h>
69 #include <linux/kallsyms.h>
70 #include <linux/stacktrace.h>
71 #include <linux/resource.h>
72 #include <linux/module.h>
73 #include <linux/mount.h>
74 #include <linux/security.h>
75 #include <linux/ptrace.h>
76 #include <linux/tracehook.h>
77 #include <linux/printk.h>
78 #include <linux/cgroup.h>
79 #include <linux/cpuset.h>
80 #include <linux/audit.h>
81 #include <linux/poll.h>
82 #include <linux/nsproxy.h>
83 #include <linux/oom.h>
84 #include <linux/elf.h>
85 #include <linux/pid_namespace.h>
86 #include <linux/user_namespace.h>
87 #include <linux/fs_struct.h>
88 #include <linux/slab.h>
89 #include <linux/sched/autogroup.h>
90 #include <linux/sched/mm.h>
91 #include <linux/sched/coredump.h>
92 #include <linux/sched/debug.h>
93 #include <linux/sched/stat.h>
94 #include <linux/flex_array.h>
95 #include <linux/posix-timers.h>
96 #ifdef CONFIG_HARDWALL
97 #include <asm/hardwall.h>
98 #endif
99 #include <trace/events/oom.h>
100 #include "internal.h"
101 #include "fd.h"
102
103 #include "../../lib/kstrtox.h"
104
105 /* NOTE:
106  *      Implementing inode permission operations in /proc is almost
107  *      certainly an error.  Permission checks need to happen during
108  *      each system call not at open time.  The reason is that most of
109  *      what we wish to check for permissions in /proc varies at runtime.
110  *
111  *      The classic example of a problem is opening file descriptors
112  *      in /proc for a task before it execs a suid executable.
113  */
114
115 static u8 nlink_tid;
116 static u8 nlink_tgid;
117
118 struct pid_entry {
119         const char *name;
120         unsigned int len;
121         umode_t mode;
122         const struct inode_operations *iop;
123         const struct file_operations *fop;
124         union proc_op op;
125 };
126
127 #define NOD(NAME, MODE, IOP, FOP, OP) {                 \
128         .name = (NAME),                                 \
129         .len  = sizeof(NAME) - 1,                       \
130         .mode = MODE,                                   \
131         .iop  = IOP,                                    \
132         .fop  = FOP,                                    \
133         .op   = OP,                                     \
134 }
135
136 #define DIR(NAME, MODE, iops, fops)     \
137         NOD(NAME, (S_IFDIR|(MODE)), &iops, &fops, {} )
138 #define LNK(NAME, get_link)                                     \
139         NOD(NAME, (S_IFLNK|S_IRWXUGO),                          \
140                 &proc_pid_link_inode_operations, NULL,          \
141                 { .proc_get_link = get_link } )
142 #define REG(NAME, MODE, fops)                           \
143         NOD(NAME, (S_IFREG|(MODE)), NULL, &fops, {})
144 #define ONE(NAME, MODE, show)                           \
145         NOD(NAME, (S_IFREG|(MODE)),                     \
146                 NULL, &proc_single_file_operations,     \
147                 { .proc_show = show } )
148
149 /*
150  * Count the number of hardlinks for the pid_entry table, excluding the .
151  * and .. links.
152  */
153 static unsigned int __init pid_entry_nlink(const struct pid_entry *entries,
154         unsigned int n)
155 {
156         unsigned int i;
157         unsigned int count;
158
159         count = 2;
160         for (i = 0; i < n; ++i) {
161                 if (S_ISDIR(entries[i].mode))
162                         ++count;
163         }
164
165         return count;
166 }
167
168 static int get_task_root(struct task_struct *task, struct path *root)
169 {
170         int result = -ENOENT;
171
172         task_lock(task);
173         if (task->fs) {
174                 get_fs_root(task->fs, root);
175                 result = 0;
176         }
177         task_unlock(task);
178         return result;
179 }
180
181 static int proc_cwd_link(struct dentry *dentry, struct path *path)
182 {
183         struct task_struct *task = get_proc_task(d_inode(dentry));
184         int result = -ENOENT;
185
186         if (task) {
187                 task_lock(task);
188                 if (task->fs) {
189                         get_fs_pwd(task->fs, path);
190                         result = 0;
191                 }
192                 task_unlock(task);
193                 put_task_struct(task);
194         }
195         return result;
196 }
197
198 static int proc_root_link(struct dentry *dentry, struct path *path)
199 {
200         struct task_struct *task = get_proc_task(d_inode(dentry));
201         int result = -ENOENT;
202
203         if (task) {
204                 result = get_task_root(task, path);
205                 put_task_struct(task);
206         }
207         return result;
208 }
209
210 static ssize_t proc_pid_cmdline_read(struct file *file, char __user *buf,
211                                      size_t _count, loff_t *pos)
212 {
213         struct task_struct *tsk;
214         struct mm_struct *mm;
215         char *page;
216         unsigned long count = _count;
217         unsigned long arg_start, arg_end, env_start, env_end;
218         unsigned long len1, len2, len;
219         unsigned long p;
220         char c;
221         ssize_t rv;
222
223         BUG_ON(*pos < 0);
224
225         tsk = get_proc_task(file_inode(file));
226         if (!tsk)
227                 return -ESRCH;
228         mm = get_task_mm(tsk);
229         put_task_struct(tsk);
230         if (!mm)
231                 return 0;
232         /* Check if process spawned far enough to have cmdline. */
233         if (!mm->env_end) {
234                 rv = 0;
235                 goto out_mmput;
236         }
237
238         page = (char *)__get_free_page(GFP_KERNEL);
239         if (!page) {
240                 rv = -ENOMEM;
241                 goto out_mmput;
242         }
243
244         down_read(&mm->mmap_sem);
245         arg_start = mm->arg_start;
246         arg_end = mm->arg_end;
247         env_start = mm->env_start;
248         env_end = mm->env_end;
249         up_read(&mm->mmap_sem);
250
251         BUG_ON(arg_start > arg_end);
252         BUG_ON(env_start > env_end);
253
254         len1 = arg_end - arg_start;
255         len2 = env_end - env_start;
256
257         /* Empty ARGV. */
258         if (len1 == 0) {
259                 rv = 0;
260                 goto out_free_page;
261         }
262         /*
263          * Inherently racy -- command line shares address space
264          * with code and data.
265          */
266         rv = access_remote_vm(mm, arg_end - 1, &c, 1, 0);
267         if (rv <= 0)
268                 goto out_free_page;
269
270         rv = 0;
271
272         if (c == '\0') {
273                 /* Command line (set of strings) occupies whole ARGV. */
274                 if (len1 <= *pos)
275                         goto out_free_page;
276
277                 p = arg_start + *pos;
278                 len = len1 - *pos;
279                 while (count > 0 && len > 0) {
280                         unsigned int _count;
281                         int nr_read;
282
283                         _count = min3(count, len, PAGE_SIZE);
284                         nr_read = access_remote_vm(mm, p, page, _count, 0);
285                         if (nr_read < 0)
286                                 rv = nr_read;
287                         if (nr_read <= 0)
288                                 goto out_free_page;
289
290                         if (copy_to_user(buf, page, nr_read)) {
291                                 rv = -EFAULT;
292                                 goto out_free_page;
293                         }
294
295                         p       += nr_read;
296                         len     -= nr_read;
297                         buf     += nr_read;
298                         count   -= nr_read;
299                         rv      += nr_read;
300                 }
301         } else {
302                 /*
303                  * Command line (1 string) occupies ARGV and
304                  * extends into ENVP.
305                  */
306                 struct {
307                         unsigned long p;
308                         unsigned long len;
309                 } cmdline[2] = {
310                         { .p = arg_start, .len = len1 },
311                         { .p = env_start, .len = len2 },
312                 };
313                 loff_t pos1 = *pos;
314                 unsigned int i;
315
316                 i = 0;
317                 while (i < 2 && pos1 >= cmdline[i].len) {
318                         pos1 -= cmdline[i].len;
319                         i++;
320                 }
321                 while (i < 2) {
322                         p = cmdline[i].p + pos1;
323                         len = cmdline[i].len - pos1;
324                         while (count > 0 && len > 0) {
325                                 unsigned int _count, l;
326                                 int nr_read;
327                                 bool final;
328
329                                 _count = min3(count, len, PAGE_SIZE);
330                                 nr_read = access_remote_vm(mm, p, page, _count, 0);
331                                 if (nr_read < 0)
332                                         rv = nr_read;
333                                 if (nr_read <= 0)
334                                         goto out_free_page;
335
336                                 /*
337                                  * Command line can be shorter than whole ARGV
338                                  * even if last "marker" byte says it is not.
339                                  */
340                                 final = false;
341                                 l = strnlen(page, nr_read);
342                                 if (l < nr_read) {
343                                         nr_read = l;
344                                         final = true;
345                                 }
346
347                                 if (copy_to_user(buf, page, nr_read)) {
348                                         rv = -EFAULT;
349                                         goto out_free_page;
350                                 }
351
352                                 p       += nr_read;
353                                 len     -= nr_read;
354                                 buf     += nr_read;
355                                 count   -= nr_read;
356                                 rv      += nr_read;
357
358                                 if (final)
359                                         goto out_free_page;
360                         }
361
362                         /* Only first chunk can be read partially. */
363                         pos1 = 0;
364                         i++;
365                 }
366         }
367
368 out_free_page:
369         free_page((unsigned long)page);
370 out_mmput:
371         mmput(mm);
372         if (rv > 0)
373                 *pos += rv;
374         return rv;
375 }
376
377 static const struct file_operations proc_pid_cmdline_ops = {
378         .read   = proc_pid_cmdline_read,
379         .llseek = generic_file_llseek,
380 };
381
382 #ifdef CONFIG_KALLSYMS
383 /*
384  * Provides a wchan file via kallsyms in a proper one-value-per-file format.
385  * Returns the resolved symbol.  If that fails, simply return the address.
386  */
387 static int proc_pid_wchan(struct seq_file *m, struct pid_namespace *ns,
388                           struct pid *pid, struct task_struct *task)
389 {
390         unsigned long wchan;
391         char symname[KSYM_NAME_LEN];
392
393         wchan = get_wchan(task);
394
395         if (wchan && ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS)
396                         && !lookup_symbol_name(wchan, symname))
397                 seq_printf(m, "%s", symname);
398         else
399                 seq_putc(m, '0');
400
401         return 0;
402 }
403 #endif /* CONFIG_KALLSYMS */
404
405 static int lock_trace(struct task_struct *task)
406 {
407         int err = mutex_lock_killable(&task->signal->cred_guard_mutex);
408         if (err)
409                 return err;
410         if (!ptrace_may_access(task, PTRACE_MODE_ATTACH_FSCREDS)) {
411                 mutex_unlock(&task->signal->cred_guard_mutex);
412                 return -EPERM;
413         }
414         return 0;
415 }
416
417 static void unlock_trace(struct task_struct *task)
418 {
419         mutex_unlock(&task->signal->cred_guard_mutex);
420 }
421
422 #ifdef CONFIG_STACKTRACE
423
424 #define MAX_STACK_TRACE_DEPTH   64
425
426 static int proc_pid_stack(struct seq_file *m, struct pid_namespace *ns,
427                           struct pid *pid, struct task_struct *task)
428 {
429         struct stack_trace trace;
430         unsigned long *entries;
431         int err;
432         int i;
433
434         entries = kmalloc(MAX_STACK_TRACE_DEPTH * sizeof(*entries), GFP_KERNEL);
435         if (!entries)
436                 return -ENOMEM;
437
438         trace.nr_entries        = 0;
439         trace.max_entries       = MAX_STACK_TRACE_DEPTH;
440         trace.entries           = entries;
441         trace.skip              = 0;
442
443         err = lock_trace(task);
444         if (!err) {
445                 save_stack_trace_tsk(task, &trace);
446
447                 for (i = 0; i < trace.nr_entries; i++) {
448                         seq_printf(m, "[<0>] %pB\n", (void *)entries[i]);
449                 }
450                 unlock_trace(task);
451         }
452         kfree(entries);
453
454         return err;
455 }
456 #endif
457
458 #ifdef CONFIG_SCHED_INFO
459 /*
460  * Provides /proc/PID/schedstat
461  */
462 static int proc_pid_schedstat(struct seq_file *m, struct pid_namespace *ns,
463                               struct pid *pid, struct task_struct *task)
464 {
465         if (unlikely(!sched_info_on()))
466                 seq_printf(m, "0 0 0\n");
467         else
468                 seq_printf(m, "%llu %llu %lu\n",
469                    (unsigned long long)task->se.sum_exec_runtime,
470                    (unsigned long long)task->sched_info.run_delay,
471                    task->sched_info.pcount);
472
473         return 0;
474 }
475 #endif
476
477 #ifdef CONFIG_LATENCYTOP
478 static int lstats_show_proc(struct seq_file *m, void *v)
479 {
480         int i;
481         struct inode *inode = m->private;
482         struct task_struct *task = get_proc_task(inode);
483
484         if (!task)
485                 return -ESRCH;
486         seq_puts(m, "Latency Top version : v0.1\n");
487         for (i = 0; i < 32; i++) {
488                 struct latency_record *lr = &task->latency_record[i];
489                 if (lr->backtrace[0]) {
490                         int q;
491                         seq_printf(m, "%i %li %li",
492                                    lr->count, lr->time, lr->max);
493                         for (q = 0; q < LT_BACKTRACEDEPTH; q++) {
494                                 unsigned long bt = lr->backtrace[q];
495                                 if (!bt)
496                                         break;
497                                 if (bt == ULONG_MAX)
498                                         break;
499                                 seq_printf(m, " %ps", (void *)bt);
500                         }
501                         seq_putc(m, '\n');
502                 }
503
504         }
505         put_task_struct(task);
506         return 0;
507 }
508
509 static int lstats_open(struct inode *inode, struct file *file)
510 {
511         return single_open(file, lstats_show_proc, inode);
512 }
513
514 static ssize_t lstats_write(struct file *file, const char __user *buf,
515                             size_t count, loff_t *offs)
516 {
517         struct task_struct *task = get_proc_task(file_inode(file));
518
519         if (!task)
520                 return -ESRCH;
521         clear_all_latency_tracing(task);
522         put_task_struct(task);
523
524         return count;
525 }
526
527 static const struct file_operations proc_lstats_operations = {
528         .open           = lstats_open,
529         .read           = seq_read,
530         .write          = lstats_write,
531         .llseek         = seq_lseek,
532         .release        = single_release,
533 };
534
535 #endif
536
537 static int proc_oom_score(struct seq_file *m, struct pid_namespace *ns,
538                           struct pid *pid, struct task_struct *task)
539 {
540         unsigned long totalpages = totalram_pages + total_swap_pages;
541         unsigned long points = 0;
542
543         points = oom_badness(task, NULL, NULL, totalpages) *
544                                         1000 / totalpages;
545         seq_printf(m, "%lu\n", points);
546
547         return 0;
548 }
549
550 struct limit_names {
551         const char *name;
552         const char *unit;
553 };
554
555 static const struct limit_names lnames[RLIM_NLIMITS] = {
556         [RLIMIT_CPU] = {"Max cpu time", "seconds"},
557         [RLIMIT_FSIZE] = {"Max file size", "bytes"},
558         [RLIMIT_DATA] = {"Max data size", "bytes"},
559         [RLIMIT_STACK] = {"Max stack size", "bytes"},
560         [RLIMIT_CORE] = {"Max core file size", "bytes"},
561         [RLIMIT_RSS] = {"Max resident set", "bytes"},
562         [RLIMIT_NPROC] = {"Max processes", "processes"},
563         [RLIMIT_NOFILE] = {"Max open files", "files"},
564         [RLIMIT_MEMLOCK] = {"Max locked memory", "bytes"},
565         [RLIMIT_AS] = {"Max address space", "bytes"},
566         [RLIMIT_LOCKS] = {"Max file locks", "locks"},
567         [RLIMIT_SIGPENDING] = {"Max pending signals", "signals"},
568         [RLIMIT_MSGQUEUE] = {"Max msgqueue size", "bytes"},
569         [RLIMIT_NICE] = {"Max nice priority", NULL},
570         [RLIMIT_RTPRIO] = {"Max realtime priority", NULL},
571         [RLIMIT_RTTIME] = {"Max realtime timeout", "us"},
572 };
573
574 /* Display limits for a process */
575 static int proc_pid_limits(struct seq_file *m, struct pid_namespace *ns,
576                            struct pid *pid, struct task_struct *task)
577 {
578         unsigned int i;
579         unsigned long flags;
580
581         struct rlimit rlim[RLIM_NLIMITS];
582
583         if (!lock_task_sighand(task, &flags))
584                 return 0;
585         memcpy(rlim, task->signal->rlim, sizeof(struct rlimit) * RLIM_NLIMITS);
586         unlock_task_sighand(task, &flags);
587
588         /*
589          * print the file header
590          */
591        seq_printf(m, "%-25s %-20s %-20s %-10s\n",
592                   "Limit", "Soft Limit", "Hard Limit", "Units");
593
594         for (i = 0; i < RLIM_NLIMITS; i++) {
595                 if (rlim[i].rlim_cur == RLIM_INFINITY)
596                         seq_printf(m, "%-25s %-20s ",
597                                    lnames[i].name, "unlimited");
598                 else
599                         seq_printf(m, "%-25s %-20lu ",
600                                    lnames[i].name, rlim[i].rlim_cur);
601
602                 if (rlim[i].rlim_max == RLIM_INFINITY)
603                         seq_printf(m, "%-20s ", "unlimited");
604                 else
605                         seq_printf(m, "%-20lu ", rlim[i].rlim_max);
606
607                 if (lnames[i].unit)
608                         seq_printf(m, "%-10s\n", lnames[i].unit);
609                 else
610                         seq_putc(m, '\n');
611         }
612
613         return 0;
614 }
615
616 #ifdef CONFIG_HAVE_ARCH_TRACEHOOK
617 static int proc_pid_syscall(struct seq_file *m, struct pid_namespace *ns,
618                             struct pid *pid, struct task_struct *task)
619 {
620         long nr;
621         unsigned long args[6], sp, pc;
622         int res;
623
624         res = lock_trace(task);
625         if (res)
626                 return res;
627
628         if (task_current_syscall(task, &nr, args, 6, &sp, &pc))
629                 seq_puts(m, "running\n");
630         else if (nr < 0)
631                 seq_printf(m, "%ld 0x%lx 0x%lx\n", nr, sp, pc);
632         else
633                 seq_printf(m,
634                        "%ld 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx\n",
635                        nr,
636                        args[0], args[1], args[2], args[3], args[4], args[5],
637                        sp, pc);
638         unlock_trace(task);
639
640         return 0;
641 }
642 #endif /* CONFIG_HAVE_ARCH_TRACEHOOK */
643
644 /************************************************************************/
645 /*                       Here the fs part begins                        */
646 /************************************************************************/
647
648 /* permission checks */
649 static int proc_fd_access_allowed(struct inode *inode)
650 {
651         struct task_struct *task;
652         int allowed = 0;
653         /* Allow access to a task's file descriptors if it is us or we
654          * may use ptrace attach to the process and find out that
655          * information.
656          */
657         task = get_proc_task(inode);
658         if (task) {
659                 allowed = ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS);
660                 put_task_struct(task);
661         }
662         return allowed;
663 }
664
665 int proc_setattr(struct dentry *dentry, struct iattr *attr)
666 {
667         int error;
668         struct inode *inode = d_inode(dentry);
669
670         if (attr->ia_valid & ATTR_MODE)
671                 return -EPERM;
672
673         error = setattr_prepare(dentry, attr);
674         if (error)
675                 return error;
676
677         setattr_copy(inode, attr);
678         mark_inode_dirty(inode);
679         return 0;
680 }
681
682 /*
683  * May current process learn task's sched/cmdline info (for hide_pid_min=1)
684  * or euid/egid (for hide_pid_min=2)?
685  */
686 static bool has_pid_permissions(struct pid_namespace *pid,
687                                  struct task_struct *task,
688                                  int hide_pid_min)
689 {
690         if (pid->hide_pid < hide_pid_min)
691                 return true;
692         if (in_group_p(pid->pid_gid))
693                 return true;
694         return ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS);
695 }
696
697
698 static int proc_pid_permission(struct inode *inode, int mask)
699 {
700         struct pid_namespace *pid = inode->i_sb->s_fs_info;
701         struct task_struct *task;
702         bool has_perms;
703
704         task = get_proc_task(inode);
705         if (!task)
706                 return -ESRCH;
707         has_perms = has_pid_permissions(pid, task, HIDEPID_NO_ACCESS);
708         put_task_struct(task);
709
710         if (!has_perms) {
711                 if (pid->hide_pid == HIDEPID_INVISIBLE) {
712                         /*
713                          * Let's make getdents(), stat(), and open()
714                          * consistent with each other.  If a process
715                          * may not stat() a file, it shouldn't be seen
716                          * in procfs at all.
717                          */
718                         return -ENOENT;
719                 }
720
721                 return -EPERM;
722         }
723         return generic_permission(inode, mask);
724 }
725
726
727
728 static const struct inode_operations proc_def_inode_operations = {
729         .setattr        = proc_setattr,
730 };
731
732 static int proc_single_show(struct seq_file *m, void *v)
733 {
734         struct inode *inode = m->private;
735         struct pid_namespace *ns;
736         struct pid *pid;
737         struct task_struct *task;
738         int ret;
739
740         ns = inode->i_sb->s_fs_info;
741         pid = proc_pid(inode);
742         task = get_pid_task(pid, PIDTYPE_PID);
743         if (!task)
744                 return -ESRCH;
745
746         ret = PROC_I(inode)->op.proc_show(m, ns, pid, task);
747
748         put_task_struct(task);
749         return ret;
750 }
751
752 static int proc_single_open(struct inode *inode, struct file *filp)
753 {
754         return single_open(filp, proc_single_show, inode);
755 }
756
757 static const struct file_operations proc_single_file_operations = {
758         .open           = proc_single_open,
759         .read           = seq_read,
760         .llseek         = seq_lseek,
761         .release        = single_release,
762 };
763
764
765 struct mm_struct *proc_mem_open(struct inode *inode, unsigned int mode)
766 {
767         struct task_struct *task = get_proc_task(inode);
768         struct mm_struct *mm = ERR_PTR(-ESRCH);
769
770         if (task) {
771                 mm = mm_access(task, mode | PTRACE_MODE_FSCREDS);
772                 put_task_struct(task);
773
774                 if (!IS_ERR_OR_NULL(mm)) {
775                         /* ensure this mm_struct can't be freed */
776                         mmgrab(mm);
777                         /* but do not pin its memory */
778                         mmput(mm);
779                 }
780         }
781
782         return mm;
783 }
784
785 static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
786 {
787         struct mm_struct *mm = proc_mem_open(inode, mode);
788
789         if (IS_ERR(mm))
790                 return PTR_ERR(mm);
791
792         file->private_data = mm;
793         return 0;
794 }
795
796 static int mem_open(struct inode *inode, struct file *file)
797 {
798         int ret = __mem_open(inode, file, PTRACE_MODE_ATTACH);
799
800         /* OK to pass negative loff_t, we can catch out-of-range */
801         file->f_mode |= FMODE_UNSIGNED_OFFSET;
802
803         return ret;
804 }
805
806 static ssize_t mem_rw(struct file *file, char __user *buf,
807                         size_t count, loff_t *ppos, int write)
808 {
809         struct mm_struct *mm = file->private_data;
810         unsigned long addr = *ppos;
811         ssize_t copied;
812         char *page;
813         unsigned int flags;
814
815         if (!mm)
816                 return 0;
817
818         page = (char *)__get_free_page(GFP_KERNEL);
819         if (!page)
820                 return -ENOMEM;
821
822         copied = 0;
823         if (!mmget_not_zero(mm))
824                 goto free;
825
826         flags = FOLL_FORCE | (write ? FOLL_WRITE : 0);
827
828         while (count > 0) {
829                 int this_len = min_t(int, count, PAGE_SIZE);
830
831                 if (write && copy_from_user(page, buf, this_len)) {
832                         copied = -EFAULT;
833                         break;
834                 }
835
836                 this_len = access_remote_vm(mm, addr, page, this_len, flags);
837                 if (!this_len) {
838                         if (!copied)
839                                 copied = -EIO;
840                         break;
841                 }
842
843                 if (!write && copy_to_user(buf, page, this_len)) {
844                         copied = -EFAULT;
845                         break;
846                 }
847
848                 buf += this_len;
849                 addr += this_len;
850                 copied += this_len;
851                 count -= this_len;
852         }
853         *ppos = addr;
854
855         mmput(mm);
856 free:
857         free_page((unsigned long) page);
858         return copied;
859 }
860
861 static ssize_t mem_read(struct file *file, char __user *buf,
862                         size_t count, loff_t *ppos)
863 {
864         return mem_rw(file, buf, count, ppos, 0);
865 }
866
867 static ssize_t mem_write(struct file *file, const char __user *buf,
868                          size_t count, loff_t *ppos)
869 {
870         return mem_rw(file, (char __user*)buf, count, ppos, 1);
871 }
872
873 loff_t mem_lseek(struct file *file, loff_t offset, int orig)
874 {
875         switch (orig) {
876         case 0:
877                 file->f_pos = offset;
878                 break;
879         case 1:
880                 file->f_pos += offset;
881                 break;
882         default:
883                 return -EINVAL;
884         }
885         force_successful_syscall_return();
886         return file->f_pos;
887 }
888
889 static int mem_release(struct inode *inode, struct file *file)
890 {
891         struct mm_struct *mm = file->private_data;
892         if (mm)
893                 mmdrop(mm);
894         return 0;
895 }
896
897 static const struct file_operations proc_mem_operations = {
898         .llseek         = mem_lseek,
899         .read           = mem_read,
900         .write          = mem_write,
901         .open           = mem_open,
902         .release        = mem_release,
903 };
904
905 static int environ_open(struct inode *inode, struct file *file)
906 {
907         return __mem_open(inode, file, PTRACE_MODE_READ);
908 }
909
910 static ssize_t environ_read(struct file *file, char __user *buf,
911                         size_t count, loff_t *ppos)
912 {
913         char *page;
914         unsigned long src = *ppos;
915         int ret = 0;
916         struct mm_struct *mm = file->private_data;
917         unsigned long env_start, env_end;
918
919         /* Ensure the process spawned far enough to have an environment. */
920         if (!mm || !mm->env_end)
921                 return 0;
922
923         page = (char *)__get_free_page(GFP_KERNEL);
924         if (!page)
925                 return -ENOMEM;
926
927         ret = 0;
928         if (!mmget_not_zero(mm))
929                 goto free;
930
931         down_read(&mm->mmap_sem);
932         env_start = mm->env_start;
933         env_end = mm->env_end;
934         up_read(&mm->mmap_sem);
935
936         while (count > 0) {
937                 size_t this_len, max_len;
938                 int retval;
939
940                 if (src >= (env_end - env_start))
941                         break;
942
943                 this_len = env_end - (env_start + src);
944
945                 max_len = min_t(size_t, PAGE_SIZE, count);
946                 this_len = min(max_len, this_len);
947
948                 retval = access_remote_vm(mm, (env_start + src), page, this_len, 0);
949
950                 if (retval <= 0) {
951                         ret = retval;
952                         break;
953                 }
954
955                 if (copy_to_user(buf, page, retval)) {
956                         ret = -EFAULT;
957                         break;
958                 }
959
960                 ret += retval;
961                 src += retval;
962                 buf += retval;
963                 count -= retval;
964         }
965         *ppos = src;
966         mmput(mm);
967
968 free:
969         free_page((unsigned long) page);
970         return ret;
971 }
972
973 static const struct file_operations proc_environ_operations = {
974         .open           = environ_open,
975         .read           = environ_read,
976         .llseek         = generic_file_llseek,
977         .release        = mem_release,
978 };
979
980 static int auxv_open(struct inode *inode, struct file *file)
981 {
982         return __mem_open(inode, file, PTRACE_MODE_READ_FSCREDS);
983 }
984
985 static ssize_t auxv_read(struct file *file, char __user *buf,
986                         size_t count, loff_t *ppos)
987 {
988         struct mm_struct *mm = file->private_data;
989         unsigned int nwords = 0;
990
991         if (!mm)
992                 return 0;
993         do {
994                 nwords += 2;
995         } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
996         return simple_read_from_buffer(buf, count, ppos, mm->saved_auxv,
997                                        nwords * sizeof(mm->saved_auxv[0]));
998 }
999
1000 static const struct file_operations proc_auxv_operations = {
1001         .open           = auxv_open,
1002         .read           = auxv_read,
1003         .llseek         = generic_file_llseek,
1004         .release        = mem_release,
1005 };
1006
1007 static ssize_t oom_adj_read(struct file *file, char __user *buf, size_t count,
1008                             loff_t *ppos)
1009 {
1010         struct task_struct *task = get_proc_task(file_inode(file));
1011         char buffer[PROC_NUMBUF];
1012         int oom_adj = OOM_ADJUST_MIN;
1013         size_t len;
1014
1015         if (!task)
1016                 return -ESRCH;
1017         if (task->signal->oom_score_adj == OOM_SCORE_ADJ_MAX)
1018                 oom_adj = OOM_ADJUST_MAX;
1019         else
1020                 oom_adj = (task->signal->oom_score_adj * -OOM_DISABLE) /
1021                           OOM_SCORE_ADJ_MAX;
1022         put_task_struct(task);
1023         len = snprintf(buffer, sizeof(buffer), "%d\n", oom_adj);
1024         return simple_read_from_buffer(buf, count, ppos, buffer, len);
1025 }
1026
1027 static int __set_oom_adj(struct file *file, int oom_adj, bool legacy)
1028 {
1029         static DEFINE_MUTEX(oom_adj_mutex);
1030         struct mm_struct *mm = NULL;
1031         struct task_struct *task;
1032         int err = 0;
1033
1034         task = get_proc_task(file_inode(file));
1035         if (!task)
1036                 return -ESRCH;
1037
1038         mutex_lock(&oom_adj_mutex);
1039         if (legacy) {
1040                 if (oom_adj < task->signal->oom_score_adj &&
1041                                 !capable(CAP_SYS_RESOURCE)) {
1042                         err = -EACCES;
1043                         goto err_unlock;
1044                 }
1045                 /*
1046                  * /proc/pid/oom_adj is provided for legacy purposes, ask users to use
1047                  * /proc/pid/oom_score_adj instead.
1048                  */
1049                 pr_warn_once("%s (%d): /proc/%d/oom_adj is deprecated, please use /proc/%d/oom_score_adj instead.\n",
1050                           current->comm, task_pid_nr(current), task_pid_nr(task),
1051                           task_pid_nr(task));
1052         } else {
1053                 if ((short)oom_adj < task->signal->oom_score_adj_min &&
1054                                 !capable(CAP_SYS_RESOURCE)) {
1055                         err = -EACCES;
1056                         goto err_unlock;
1057                 }
1058         }
1059
1060         /*
1061          * Make sure we will check other processes sharing the mm if this is
1062          * not vfrok which wants its own oom_score_adj.
1063          * pin the mm so it doesn't go away and get reused after task_unlock
1064          */
1065         if (!task->vfork_done) {
1066                 struct task_struct *p = find_lock_task_mm(task);
1067
1068                 if (p) {
1069                         if (atomic_read(&p->mm->mm_users) > 1) {
1070                                 mm = p->mm;
1071                                 mmgrab(mm);
1072                         }
1073                         task_unlock(p);
1074                 }
1075         }
1076
1077         task->signal->oom_score_adj = oom_adj;
1078         if (!legacy && has_capability_noaudit(current, CAP_SYS_RESOURCE))
1079                 task->signal->oom_score_adj_min = (short)oom_adj;
1080         trace_oom_score_adj_update(task);
1081
1082         if (mm) {
1083                 struct task_struct *p;
1084
1085                 rcu_read_lock();
1086                 for_each_process(p) {
1087                         if (same_thread_group(task, p))
1088                                 continue;
1089
1090                         /* do not touch kernel threads or the global init */
1091                         if (p->flags & PF_KTHREAD || is_global_init(p))
1092                                 continue;
1093
1094                         task_lock(p);
1095                         if (!p->vfork_done && process_shares_mm(p, mm)) {
1096                                 pr_info("updating oom_score_adj for %d (%s) from %d to %d because it shares mm with %d (%s). Report if this is unexpected.\n",
1097                                                 task_pid_nr(p), p->comm,
1098                                                 p->signal->oom_score_adj, oom_adj,
1099                                                 task_pid_nr(task), task->comm);
1100                                 p->signal->oom_score_adj = oom_adj;
1101                                 if (!legacy && has_capability_noaudit(current, CAP_SYS_RESOURCE))
1102                                         p->signal->oom_score_adj_min = (short)oom_adj;
1103                         }
1104                         task_unlock(p);
1105                 }
1106                 rcu_read_unlock();
1107                 mmdrop(mm);
1108         }
1109 err_unlock:
1110         mutex_unlock(&oom_adj_mutex);
1111         put_task_struct(task);
1112         return err;
1113 }
1114
1115 /*
1116  * /proc/pid/oom_adj exists solely for backwards compatibility with previous
1117  * kernels.  The effective policy is defined by oom_score_adj, which has a
1118  * different scale: oom_adj grew exponentially and oom_score_adj grows linearly.
1119  * Values written to oom_adj are simply mapped linearly to oom_score_adj.
1120  * Processes that become oom disabled via oom_adj will still be oom disabled
1121  * with this implementation.
1122  *
1123  * oom_adj cannot be removed since existing userspace binaries use it.
1124  */
1125 static ssize_t oom_adj_write(struct file *file, const char __user *buf,
1126                              size_t count, loff_t *ppos)
1127 {
1128         char buffer[PROC_NUMBUF];
1129         int oom_adj;
1130         int err;
1131
1132         memset(buffer, 0, sizeof(buffer));
1133         if (count > sizeof(buffer) - 1)
1134                 count = sizeof(buffer) - 1;
1135         if (copy_from_user(buffer, buf, count)) {
1136                 err = -EFAULT;
1137                 goto out;
1138         }
1139
1140         err = kstrtoint(strstrip(buffer), 0, &oom_adj);
1141         if (err)
1142                 goto out;
1143         if ((oom_adj < OOM_ADJUST_MIN || oom_adj > OOM_ADJUST_MAX) &&
1144              oom_adj != OOM_DISABLE) {
1145                 err = -EINVAL;
1146                 goto out;
1147         }
1148
1149         /*
1150          * Scale /proc/pid/oom_score_adj appropriately ensuring that a maximum
1151          * value is always attainable.
1152          */
1153         if (oom_adj == OOM_ADJUST_MAX)
1154                 oom_adj = OOM_SCORE_ADJ_MAX;
1155         else
1156                 oom_adj = (oom_adj * OOM_SCORE_ADJ_MAX) / -OOM_DISABLE;
1157
1158         err = __set_oom_adj(file, oom_adj, true);
1159 out:
1160         return err < 0 ? err : count;
1161 }
1162
1163 static const struct file_operations proc_oom_adj_operations = {
1164         .read           = oom_adj_read,
1165         .write          = oom_adj_write,
1166         .llseek         = generic_file_llseek,
1167 };
1168
1169 static ssize_t oom_score_adj_read(struct file *file, char __user *buf,
1170                                         size_t count, loff_t *ppos)
1171 {
1172         struct task_struct *task = get_proc_task(file_inode(file));
1173         char buffer[PROC_NUMBUF];
1174         short oom_score_adj = OOM_SCORE_ADJ_MIN;
1175         size_t len;
1176
1177         if (!task)
1178                 return -ESRCH;
1179         oom_score_adj = task->signal->oom_score_adj;
1180         put_task_struct(task);
1181         len = snprintf(buffer, sizeof(buffer), "%hd\n", oom_score_adj);
1182         return simple_read_from_buffer(buf, count, ppos, buffer, len);
1183 }
1184
1185 static ssize_t oom_score_adj_write(struct file *file, const char __user *buf,
1186                                         size_t count, loff_t *ppos)
1187 {
1188         char buffer[PROC_NUMBUF];
1189         int oom_score_adj;
1190         int err;
1191
1192         memset(buffer, 0, sizeof(buffer));
1193         if (count > sizeof(buffer) - 1)
1194                 count = sizeof(buffer) - 1;
1195         if (copy_from_user(buffer, buf, count)) {
1196                 err = -EFAULT;
1197                 goto out;
1198         }
1199
1200         err = kstrtoint(strstrip(buffer), 0, &oom_score_adj);
1201         if (err)
1202                 goto out;
1203         if (oom_score_adj < OOM_SCORE_ADJ_MIN ||
1204                         oom_score_adj > OOM_SCORE_ADJ_MAX) {
1205                 err = -EINVAL;
1206                 goto out;
1207         }
1208
1209         err = __set_oom_adj(file, oom_score_adj, false);
1210 out:
1211         return err < 0 ? err : count;
1212 }
1213
1214 static const struct file_operations proc_oom_score_adj_operations = {
1215         .read           = oom_score_adj_read,
1216         .write          = oom_score_adj_write,
1217         .llseek         = default_llseek,
1218 };
1219
1220 #ifdef CONFIG_AUDITSYSCALL
1221 #define TMPBUFLEN 11
1222 static ssize_t proc_loginuid_read(struct file * file, char __user * buf,
1223                                   size_t count, loff_t *ppos)
1224 {
1225         struct inode * inode = file_inode(file);
1226         struct task_struct *task = get_proc_task(inode);
1227         ssize_t length;
1228         char tmpbuf[TMPBUFLEN];
1229
1230         if (!task)
1231                 return -ESRCH;
1232         length = scnprintf(tmpbuf, TMPBUFLEN, "%u",
1233                            from_kuid(file->f_cred->user_ns,
1234                                      audit_get_loginuid(task)));
1235         put_task_struct(task);
1236         return simple_read_from_buffer(buf, count, ppos, tmpbuf, length);
1237 }
1238
1239 static ssize_t proc_loginuid_write(struct file * file, const char __user * buf,
1240                                    size_t count, loff_t *ppos)
1241 {
1242         struct inode * inode = file_inode(file);
1243         uid_t loginuid;
1244         kuid_t kloginuid;
1245         int rv;
1246
1247         rcu_read_lock();
1248         if (current != pid_task(proc_pid(inode), PIDTYPE_PID)) {
1249                 rcu_read_unlock();
1250                 return -EPERM;
1251         }
1252         rcu_read_unlock();
1253
1254         if (*ppos != 0) {
1255                 /* No partial writes. */
1256                 return -EINVAL;
1257         }
1258
1259         rv = kstrtou32_from_user(buf, count, 10, &loginuid);
1260         if (rv < 0)
1261                 return rv;
1262
1263         /* is userspace tring to explicitly UNSET the loginuid? */
1264         if (loginuid == AUDIT_UID_UNSET) {
1265                 kloginuid = INVALID_UID;
1266         } else {
1267                 kloginuid = make_kuid(file->f_cred->user_ns, loginuid);
1268                 if (!uid_valid(kloginuid))
1269                         return -EINVAL;
1270         }
1271
1272         rv = audit_set_loginuid(kloginuid);
1273         if (rv < 0)
1274                 return rv;
1275         return count;
1276 }
1277
1278 static const struct file_operations proc_loginuid_operations = {
1279         .read           = proc_loginuid_read,
1280         .write          = proc_loginuid_write,
1281         .llseek         = generic_file_llseek,
1282 };
1283
1284 static ssize_t proc_sessionid_read(struct file * file, char __user * buf,
1285                                   size_t count, loff_t *ppos)
1286 {
1287         struct inode * inode = file_inode(file);
1288         struct task_struct *task = get_proc_task(inode);
1289         ssize_t length;
1290         char tmpbuf[TMPBUFLEN];
1291
1292         if (!task)
1293                 return -ESRCH;
1294         length = scnprintf(tmpbuf, TMPBUFLEN, "%u",
1295                                 audit_get_sessionid(task));
1296         put_task_struct(task);
1297         return simple_read_from_buffer(buf, count, ppos, tmpbuf, length);
1298 }
1299
1300 static const struct file_operations proc_sessionid_operations = {
1301         .read           = proc_sessionid_read,
1302         .llseek         = generic_file_llseek,
1303 };
1304 #endif
1305
1306 #ifdef CONFIG_FAULT_INJECTION
1307 static ssize_t proc_fault_inject_read(struct file * file, char __user * buf,
1308                                       size_t count, loff_t *ppos)
1309 {
1310         struct task_struct *task = get_proc_task(file_inode(file));
1311         char buffer[PROC_NUMBUF];
1312         size_t len;
1313         int make_it_fail;
1314
1315         if (!task)
1316                 return -ESRCH;
1317         make_it_fail = task->make_it_fail;
1318         put_task_struct(task);
1319
1320         len = snprintf(buffer, sizeof(buffer), "%i\n", make_it_fail);
1321
1322         return simple_read_from_buffer(buf, count, ppos, buffer, len);
1323 }
1324
1325 static ssize_t proc_fault_inject_write(struct file * file,
1326                         const char __user * buf, size_t count, loff_t *ppos)
1327 {
1328         struct task_struct *task;
1329         char buffer[PROC_NUMBUF];
1330         int make_it_fail;
1331         int rv;
1332
1333         if (!capable(CAP_SYS_RESOURCE))
1334                 return -EPERM;
1335         memset(buffer, 0, sizeof(buffer));
1336         if (count > sizeof(buffer) - 1)
1337                 count = sizeof(buffer) - 1;
1338         if (copy_from_user(buffer, buf, count))
1339                 return -EFAULT;
1340         rv = kstrtoint(strstrip(buffer), 0, &make_it_fail);
1341         if (rv < 0)
1342                 return rv;
1343         if (make_it_fail < 0 || make_it_fail > 1)
1344                 return -EINVAL;
1345
1346         task = get_proc_task(file_inode(file));
1347         if (!task)
1348                 return -ESRCH;
1349         task->make_it_fail = make_it_fail;
1350         put_task_struct(task);
1351
1352         return count;
1353 }
1354
1355 static const struct file_operations proc_fault_inject_operations = {
1356         .read           = proc_fault_inject_read,
1357         .write          = proc_fault_inject_write,
1358         .llseek         = generic_file_llseek,
1359 };
1360
1361 static ssize_t proc_fail_nth_write(struct file *file, const char __user *buf,
1362                                    size_t count, loff_t *ppos)
1363 {
1364         struct task_struct *task;
1365         int err;
1366         unsigned int n;
1367
1368         err = kstrtouint_from_user(buf, count, 0, &n);
1369         if (err)
1370                 return err;
1371
1372         task = get_proc_task(file_inode(file));
1373         if (!task)
1374                 return -ESRCH;
1375         task->fail_nth = n;
1376         put_task_struct(task);
1377
1378         return count;
1379 }
1380
1381 static ssize_t proc_fail_nth_read(struct file *file, char __user *buf,
1382                                   size_t count, loff_t *ppos)
1383 {
1384         struct task_struct *task;
1385         char numbuf[PROC_NUMBUF];
1386         ssize_t len;
1387
1388         task = get_proc_task(file_inode(file));
1389         if (!task)
1390                 return -ESRCH;
1391         len = snprintf(numbuf, sizeof(numbuf), "%u\n", task->fail_nth);
1392         len = simple_read_from_buffer(buf, count, ppos, numbuf, len);
1393         put_task_struct(task);
1394
1395         return len;
1396 }
1397
1398 static const struct file_operations proc_fail_nth_operations = {
1399         .read           = proc_fail_nth_read,
1400         .write          = proc_fail_nth_write,
1401 };
1402 #endif
1403
1404
1405 #ifdef CONFIG_SCHED_DEBUG
1406 /*
1407  * Print out various scheduling related per-task fields:
1408  */
1409 static int sched_show(struct seq_file *m, void *v)
1410 {
1411         struct inode *inode = m->private;
1412         struct pid_namespace *ns = inode->i_sb->s_fs_info;
1413         struct task_struct *p;
1414
1415         p = get_proc_task(inode);
1416         if (!p)
1417                 return -ESRCH;
1418         proc_sched_show_task(p, ns, m);
1419
1420         put_task_struct(p);
1421
1422         return 0;
1423 }
1424
1425 static ssize_t
1426 sched_write(struct file *file, const char __user *buf,
1427             size_t count, loff_t *offset)
1428 {
1429         struct inode *inode = file_inode(file);
1430         struct task_struct *p;
1431
1432         p = get_proc_task(inode);
1433         if (!p)
1434                 return -ESRCH;
1435         proc_sched_set_task(p);
1436
1437         put_task_struct(p);
1438
1439         return count;
1440 }
1441
1442 static int sched_open(struct inode *inode, struct file *filp)
1443 {
1444         return single_open(filp, sched_show, inode);
1445 }
1446
1447 static const struct file_operations proc_pid_sched_operations = {
1448         .open           = sched_open,
1449         .read           = seq_read,
1450         .write          = sched_write,
1451         .llseek         = seq_lseek,
1452         .release        = single_release,
1453 };
1454
1455 #endif
1456
1457 #ifdef CONFIG_SCHED_AUTOGROUP
1458 /*
1459  * Print out autogroup related information:
1460  */
1461 static int sched_autogroup_show(struct seq_file *m, void *v)
1462 {
1463         struct inode *inode = m->private;
1464         struct task_struct *p;
1465
1466         p = get_proc_task(inode);
1467         if (!p)
1468                 return -ESRCH;
1469         proc_sched_autogroup_show_task(p, m);
1470
1471         put_task_struct(p);
1472
1473         return 0;
1474 }
1475
1476 static ssize_t
1477 sched_autogroup_write(struct file *file, const char __user *buf,
1478             size_t count, loff_t *offset)
1479 {
1480         struct inode *inode = file_inode(file);
1481         struct task_struct *p;
1482         char buffer[PROC_NUMBUF];
1483         int nice;
1484         int err;
1485
1486         memset(buffer, 0, sizeof(buffer));
1487         if (count > sizeof(buffer) - 1)
1488                 count = sizeof(buffer) - 1;
1489         if (copy_from_user(buffer, buf, count))
1490                 return -EFAULT;
1491
1492         err = kstrtoint(strstrip(buffer), 0, &nice);
1493         if (err < 0)
1494                 return err;
1495
1496         p = get_proc_task(inode);
1497         if (!p)
1498                 return -ESRCH;
1499
1500         err = proc_sched_autogroup_set_nice(p, nice);
1501         if (err)
1502                 count = err;
1503
1504         put_task_struct(p);
1505
1506         return count;
1507 }
1508
1509 static int sched_autogroup_open(struct inode *inode, struct file *filp)
1510 {
1511         int ret;
1512
1513         ret = single_open(filp, sched_autogroup_show, NULL);
1514         if (!ret) {
1515                 struct seq_file *m = filp->private_data;
1516
1517                 m->private = inode;
1518         }
1519         return ret;
1520 }
1521
1522 static const struct file_operations proc_pid_sched_autogroup_operations = {
1523         .open           = sched_autogroup_open,
1524         .read           = seq_read,
1525         .write          = sched_autogroup_write,
1526         .llseek         = seq_lseek,
1527         .release        = single_release,
1528 };
1529
1530 #endif /* CONFIG_SCHED_AUTOGROUP */
1531
1532 static ssize_t comm_write(struct file *file, const char __user *buf,
1533                                 size_t count, loff_t *offset)
1534 {
1535         struct inode *inode = file_inode(file);
1536         struct task_struct *p;
1537         char buffer[TASK_COMM_LEN];
1538         const size_t maxlen = sizeof(buffer) - 1;
1539
1540         memset(buffer, 0, sizeof(buffer));
1541         if (copy_from_user(buffer, buf, count > maxlen ? maxlen : count))
1542                 return -EFAULT;
1543
1544         p = get_proc_task(inode);
1545         if (!p)
1546                 return -ESRCH;
1547
1548         if (same_thread_group(current, p))
1549                 set_task_comm(p, buffer);
1550         else
1551                 count = -EINVAL;
1552
1553         put_task_struct(p);
1554
1555         return count;
1556 }
1557
1558 static int comm_show(struct seq_file *m, void *v)
1559 {
1560         struct inode *inode = m->private;
1561         struct task_struct *p;
1562
1563         p = get_proc_task(inode);
1564         if (!p)
1565                 return -ESRCH;
1566
1567         task_lock(p);
1568         seq_printf(m, "%s\n", p->comm);
1569         task_unlock(p);
1570
1571         put_task_struct(p);
1572
1573         return 0;
1574 }
1575
1576 static int comm_open(struct inode *inode, struct file *filp)
1577 {
1578         return single_open(filp, comm_show, inode);
1579 }
1580
1581 static const struct file_operations proc_pid_set_comm_operations = {
1582         .open           = comm_open,
1583         .read           = seq_read,
1584         .write          = comm_write,
1585         .llseek         = seq_lseek,
1586         .release        = single_release,
1587 };
1588
1589 static int proc_exe_link(struct dentry *dentry, struct path *exe_path)
1590 {
1591         struct task_struct *task;
1592         struct file *exe_file;
1593
1594         task = get_proc_task(d_inode(dentry));
1595         if (!task)
1596                 return -ENOENT;
1597         exe_file = get_task_exe_file(task);
1598         put_task_struct(task);
1599         if (exe_file) {
1600                 *exe_path = exe_file->f_path;
1601                 path_get(&exe_file->f_path);
1602                 fput(exe_file);
1603                 return 0;
1604         } else
1605                 return -ENOENT;
1606 }
1607
1608 static const char *proc_pid_get_link(struct dentry *dentry,
1609                                      struct inode *inode,
1610                                      struct delayed_call *done)
1611 {
1612         struct path path;
1613         int error = -EACCES;
1614
1615         if (!dentry)
1616                 return ERR_PTR(-ECHILD);
1617
1618         /* Are we allowed to snoop on the tasks file descriptors? */
1619         if (!proc_fd_access_allowed(inode))
1620                 goto out;
1621
1622         error = PROC_I(inode)->op.proc_get_link(dentry, &path);
1623         if (error)
1624                 goto out;
1625
1626         nd_jump_link(&path);
1627         return NULL;
1628 out:
1629         return ERR_PTR(error);
1630 }
1631
1632 static int do_proc_readlink(struct path *path, char __user *buffer, int buflen)
1633 {
1634         char *tmp = (char *)__get_free_page(GFP_KERNEL);
1635         char *pathname;
1636         int len;
1637
1638         if (!tmp)
1639                 return -ENOMEM;
1640
1641         pathname = d_path(path, tmp, PAGE_SIZE);
1642         len = PTR_ERR(pathname);
1643         if (IS_ERR(pathname))
1644                 goto out;
1645         len = tmp + PAGE_SIZE - 1 - pathname;
1646
1647         if (len > buflen)
1648                 len = buflen;
1649         if (copy_to_user(buffer, pathname, len))
1650                 len = -EFAULT;
1651  out:
1652         free_page((unsigned long)tmp);
1653         return len;
1654 }
1655
1656 static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int buflen)
1657 {
1658         int error = -EACCES;
1659         struct inode *inode = d_inode(dentry);
1660         struct path path;
1661
1662         /* Are we allowed to snoop on the tasks file descriptors? */
1663         if (!proc_fd_access_allowed(inode))
1664                 goto out;
1665
1666         error = PROC_I(inode)->op.proc_get_link(dentry, &path);
1667         if (error)
1668                 goto out;
1669
1670         error = do_proc_readlink(&path, buffer, buflen);
1671         path_put(&path);
1672 out:
1673         return error;
1674 }
1675
1676 const struct inode_operations proc_pid_link_inode_operations = {
1677         .readlink       = proc_pid_readlink,
1678         .get_link       = proc_pid_get_link,
1679         .setattr        = proc_setattr,
1680 };
1681
1682
1683 /* building an inode */
1684
1685 void task_dump_owner(struct task_struct *task, umode_t mode,
1686                      kuid_t *ruid, kgid_t *rgid)
1687 {
1688         /* Depending on the state of dumpable compute who should own a
1689          * proc file for a task.
1690          */
1691         const struct cred *cred;
1692         kuid_t uid;
1693         kgid_t gid;
1694
1695         /* Default to the tasks effective ownership */
1696         rcu_read_lock();
1697         cred = __task_cred(task);
1698         uid = cred->euid;
1699         gid = cred->egid;
1700         rcu_read_unlock();
1701
1702         /*
1703          * Before the /proc/pid/status file was created the only way to read
1704          * the effective uid of a /process was to stat /proc/pid.  Reading
1705          * /proc/pid/status is slow enough that procps and other packages
1706          * kept stating /proc/pid.  To keep the rules in /proc simple I have
1707          * made this apply to all per process world readable and executable
1708          * directories.
1709          */
1710         if (mode != (S_IFDIR|S_IRUGO|S_IXUGO)) {
1711                 struct mm_struct *mm;
1712                 task_lock(task);
1713                 mm = task->mm;
1714                 /* Make non-dumpable tasks owned by some root */
1715                 if (mm) {
1716                         if (get_dumpable(mm) != SUID_DUMP_USER) {
1717                                 struct user_namespace *user_ns = mm->user_ns;
1718
1719                                 uid = make_kuid(user_ns, 0);
1720                                 if (!uid_valid(uid))
1721                                         uid = GLOBAL_ROOT_UID;
1722
1723                                 gid = make_kgid(user_ns, 0);
1724                                 if (!gid_valid(gid))
1725                                         gid = GLOBAL_ROOT_GID;
1726                         }
1727                 } else {
1728                         uid = GLOBAL_ROOT_UID;
1729                         gid = GLOBAL_ROOT_GID;
1730                 }
1731                 task_unlock(task);
1732         }
1733         *ruid = uid;
1734         *rgid = gid;
1735 }
1736
1737 struct inode *proc_pid_make_inode(struct super_block * sb,
1738                                   struct task_struct *task, umode_t mode)
1739 {
1740         struct inode * inode;
1741         struct proc_inode *ei;
1742
1743         /* We need a new inode */
1744
1745         inode = new_inode(sb);
1746         if (!inode)
1747                 goto out;
1748
1749         /* Common stuff */
1750         ei = PROC_I(inode);
1751         inode->i_mode = mode;
1752         inode->i_ino = get_next_ino();
1753         inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
1754         inode->i_op = &proc_def_inode_operations;
1755
1756         /*
1757          * grab the reference to task.
1758          */
1759         ei->pid = get_task_pid(task, PIDTYPE_PID);
1760         if (!ei->pid)
1761                 goto out_unlock;
1762
1763         task_dump_owner(task, 0, &inode->i_uid, &inode->i_gid);
1764         security_task_to_inode(task, inode);
1765
1766 out:
1767         return inode;
1768
1769 out_unlock:
1770         iput(inode);
1771         return NULL;
1772 }
1773
1774 int pid_getattr(const struct path *path, struct kstat *stat,
1775                 u32 request_mask, unsigned int query_flags)
1776 {
1777         struct inode *inode = d_inode(path->dentry);
1778         struct task_struct *task;
1779         struct pid_namespace *pid = path->dentry->d_sb->s_fs_info;
1780
1781         generic_fillattr(inode, stat);
1782
1783         rcu_read_lock();
1784         stat->uid = GLOBAL_ROOT_UID;
1785         stat->gid = GLOBAL_ROOT_GID;
1786         task = pid_task(proc_pid(inode), PIDTYPE_PID);
1787         if (task) {
1788                 if (!has_pid_permissions(pid, task, HIDEPID_INVISIBLE)) {
1789                         rcu_read_unlock();
1790                         /*
1791                          * This doesn't prevent learning whether PID exists,
1792                          * it only makes getattr() consistent with readdir().
1793                          */
1794                         return -ENOENT;
1795                 }
1796                 task_dump_owner(task, inode->i_mode, &stat->uid, &stat->gid);
1797         }
1798         rcu_read_unlock();
1799         return 0;
1800 }
1801
1802 /* dentry stuff */
1803
1804 /*
1805  *      Exceptional case: normally we are not allowed to unhash a busy
1806  * directory. In this case, however, we can do it - no aliasing problems
1807  * due to the way we treat inodes.
1808  *
1809  * Rewrite the inode's ownerships here because the owning task may have
1810  * performed a setuid(), etc.
1811  *
1812  */
1813 int pid_revalidate(struct dentry *dentry, unsigned int flags)
1814 {
1815         struct inode *inode;
1816         struct task_struct *task;
1817
1818         if (flags & LOOKUP_RCU)
1819                 return -ECHILD;
1820
1821         inode = d_inode(dentry);
1822         task = get_proc_task(inode);
1823
1824         if (task) {
1825                 task_dump_owner(task, inode->i_mode, &inode->i_uid, &inode->i_gid);
1826
1827                 inode->i_mode &= ~(S_ISUID | S_ISGID);
1828                 security_task_to_inode(task, inode);
1829                 put_task_struct(task);
1830                 return 1;
1831         }
1832         return 0;
1833 }
1834
1835 static inline bool proc_inode_is_dead(struct inode *inode)
1836 {
1837         return !proc_pid(inode)->tasks[PIDTYPE_PID].first;
1838 }
1839
1840 int pid_delete_dentry(const struct dentry *dentry)
1841 {
1842         /* Is the task we represent dead?
1843          * If so, then don't put the dentry on the lru list,
1844          * kill it immediately.
1845          */
1846         return proc_inode_is_dead(d_inode(dentry));
1847 }
1848
1849 const struct dentry_operations pid_dentry_operations =
1850 {
1851         .d_revalidate   = pid_revalidate,
1852         .d_delete       = pid_delete_dentry,
1853 };
1854
1855 /* Lookups */
1856
1857 /*
1858  * Fill a directory entry.
1859  *
1860  * If possible create the dcache entry and derive our inode number and
1861  * file type from dcache entry.
1862  *
1863  * Since all of the proc inode numbers are dynamically generated, the inode
1864  * numbers do not exist until the inode is cache.  This means creating the
1865  * the dcache entry in readdir is necessary to keep the inode numbers
1866  * reported by readdir in sync with the inode numbers reported
1867  * by stat.
1868  */
1869 bool proc_fill_cache(struct file *file, struct dir_context *ctx,
1870         const char *name, int len,
1871         instantiate_t instantiate, struct task_struct *task, const void *ptr)
1872 {
1873         struct dentry *child, *dir = file->f_path.dentry;
1874         struct qstr qname = QSTR_INIT(name, len);
1875         struct inode *inode;
1876         unsigned type;
1877         ino_t ino;
1878
1879         child = d_hash_and_lookup(dir, &qname);
1880         if (!child) {
1881                 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
1882                 child = d_alloc_parallel(dir, &qname, &wq);
1883                 if (IS_ERR(child))
1884                         goto end_instantiate;
1885                 if (d_in_lookup(child)) {
1886                         int err = instantiate(d_inode(dir), child, task, ptr);
1887                         d_lookup_done(child);
1888                         if (err < 0) {
1889                                 dput(child);
1890                                 goto end_instantiate;
1891                         }
1892                 }
1893         }
1894         inode = d_inode(child);
1895         ino = inode->i_ino;
1896         type = inode->i_mode >> 12;
1897         dput(child);
1898         return dir_emit(ctx, name, len, ino, type);
1899
1900 end_instantiate:
1901         return dir_emit(ctx, name, len, 1, DT_UNKNOWN);
1902 }
1903
1904 /*
1905  * dname_to_vma_addr - maps a dentry name into two unsigned longs
1906  * which represent vma start and end addresses.
1907  */
1908 static int dname_to_vma_addr(struct dentry *dentry,
1909                              unsigned long *start, unsigned long *end)
1910 {
1911         const char *str = dentry->d_name.name;
1912         unsigned long long sval, eval;
1913         unsigned int len;
1914
1915         len = _parse_integer(str, 16, &sval);
1916         if (len & KSTRTOX_OVERFLOW)
1917                 return -EINVAL;
1918         if (sval != (unsigned long)sval)
1919                 return -EINVAL;
1920         str += len;
1921
1922         if (*str != '-')
1923                 return -EINVAL;
1924         str++;
1925
1926         len = _parse_integer(str, 16, &eval);
1927         if (len & KSTRTOX_OVERFLOW)
1928                 return -EINVAL;
1929         if (eval != (unsigned long)eval)
1930                 return -EINVAL;
1931         str += len;
1932
1933         if (*str != '\0')
1934                 return -EINVAL;
1935
1936         *start = sval;
1937         *end = eval;
1938
1939         return 0;
1940 }
1941
1942 static int map_files_d_revalidate(struct dentry *dentry, unsigned int flags)
1943 {
1944         unsigned long vm_start, vm_end;
1945         bool exact_vma_exists = false;
1946         struct mm_struct *mm = NULL;
1947         struct task_struct *task;
1948         struct inode *inode;
1949         int status = 0;
1950
1951         if (flags & LOOKUP_RCU)
1952                 return -ECHILD;
1953
1954         inode = d_inode(dentry);
1955         task = get_proc_task(inode);
1956         if (!task)
1957                 goto out_notask;
1958
1959         mm = mm_access(task, PTRACE_MODE_READ_FSCREDS);
1960         if (IS_ERR_OR_NULL(mm))
1961                 goto out;
1962
1963         if (!dname_to_vma_addr(dentry, &vm_start, &vm_end)) {
1964                 down_read(&mm->mmap_sem);
1965                 exact_vma_exists = !!find_exact_vma(mm, vm_start, vm_end);
1966                 up_read(&mm->mmap_sem);
1967         }
1968
1969         mmput(mm);
1970
1971         if (exact_vma_exists) {
1972                 task_dump_owner(task, 0, &inode->i_uid, &inode->i_gid);
1973
1974                 security_task_to_inode(task, inode);
1975                 status = 1;
1976         }
1977
1978 out:
1979         put_task_struct(task);
1980
1981 out_notask:
1982         return status;
1983 }
1984
1985 static const struct dentry_operations tid_map_files_dentry_operations = {
1986         .d_revalidate   = map_files_d_revalidate,
1987         .d_delete       = pid_delete_dentry,
1988 };
1989
1990 static int map_files_get_link(struct dentry *dentry, struct path *path)
1991 {
1992         unsigned long vm_start, vm_end;
1993         struct vm_area_struct *vma;
1994         struct task_struct *task;
1995         struct mm_struct *mm;
1996         int rc;
1997
1998         rc = -ENOENT;
1999         task = get_proc_task(d_inode(dentry));
2000         if (!task)
2001                 goto out;
2002
2003         mm = get_task_mm(task);
2004         put_task_struct(task);
2005         if (!mm)
2006                 goto out;
2007
2008         rc = dname_to_vma_addr(dentry, &vm_start, &vm_end);
2009         if (rc)
2010                 goto out_mmput;
2011
2012         rc = -ENOENT;
2013         down_read(&mm->mmap_sem);
2014         vma = find_exact_vma(mm, vm_start, vm_end);
2015         if (vma && vma->vm_file) {
2016                 *path = vma->vm_file->f_path;
2017                 path_get(path);
2018                 rc = 0;
2019         }
2020         up_read(&mm->mmap_sem);
2021
2022 out_mmput:
2023         mmput(mm);
2024 out:
2025         return rc;
2026 }
2027
2028 struct map_files_info {
2029         fmode_t         mode;
2030         unsigned int    len;
2031         unsigned char   name[4*sizeof(long)+2]; /* max: %lx-%lx\0 */
2032 };
2033
2034 /*
2035  * Only allow CAP_SYS_ADMIN to follow the links, due to concerns about how the
2036  * symlinks may be used to bypass permissions on ancestor directories in the
2037  * path to the file in question.
2038  */
2039 static const char *
2040 proc_map_files_get_link(struct dentry *dentry,
2041                         struct inode *inode,
2042                         struct delayed_call *done)
2043 {
2044         if (!capable(CAP_SYS_ADMIN))
2045                 return ERR_PTR(-EPERM);
2046
2047         return proc_pid_get_link(dentry, inode, done);
2048 }
2049
2050 /*
2051  * Identical to proc_pid_link_inode_operations except for get_link()
2052  */
2053 static const struct inode_operations proc_map_files_link_inode_operations = {
2054         .readlink       = proc_pid_readlink,
2055         .get_link       = proc_map_files_get_link,
2056         .setattr        = proc_setattr,
2057 };
2058
2059 static int
2060 proc_map_files_instantiate(struct inode *dir, struct dentry *dentry,
2061                            struct task_struct *task, const void *ptr)
2062 {
2063         fmode_t mode = (fmode_t)(unsigned long)ptr;
2064         struct proc_inode *ei;
2065         struct inode *inode;
2066
2067         inode = proc_pid_make_inode(dir->i_sb, task, S_IFLNK |
2068                                     ((mode & FMODE_READ ) ? S_IRUSR : 0) |
2069                                     ((mode & FMODE_WRITE) ? S_IWUSR : 0));
2070         if (!inode)
2071                 return -ENOENT;
2072
2073         ei = PROC_I(inode);
2074         ei->op.proc_get_link = map_files_get_link;
2075
2076         inode->i_op = &proc_map_files_link_inode_operations;
2077         inode->i_size = 64;
2078
2079         d_set_d_op(dentry, &tid_map_files_dentry_operations);
2080         d_add(dentry, inode);
2081
2082         return 0;
2083 }
2084
2085 static struct dentry *proc_map_files_lookup(struct inode *dir,
2086                 struct dentry *dentry, unsigned int flags)
2087 {
2088         unsigned long vm_start, vm_end;
2089         struct vm_area_struct *vma;
2090         struct task_struct *task;
2091         int result;
2092         struct mm_struct *mm;
2093
2094         result = -ENOENT;
2095         task = get_proc_task(dir);
2096         if (!task)
2097                 goto out;
2098
2099         result = -EACCES;
2100         if (!ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS))
2101                 goto out_put_task;
2102
2103         result = -ENOENT;
2104         if (dname_to_vma_addr(dentry, &vm_start, &vm_end))
2105                 goto out_put_task;
2106
2107         mm = get_task_mm(task);
2108         if (!mm)
2109                 goto out_put_task;
2110
2111         down_read(&mm->mmap_sem);
2112         vma = find_exact_vma(mm, vm_start, vm_end);
2113         if (!vma)
2114                 goto out_no_vma;
2115
2116         if (vma->vm_file)
2117                 result = proc_map_files_instantiate(dir, dentry, task,
2118                                 (void *)(unsigned long)vma->vm_file->f_mode);
2119
2120 out_no_vma:
2121         up_read(&mm->mmap_sem);
2122         mmput(mm);
2123 out_put_task:
2124         put_task_struct(task);
2125 out:
2126         return ERR_PTR(result);
2127 }
2128
2129 static const struct inode_operations proc_map_files_inode_operations = {
2130         .lookup         = proc_map_files_lookup,
2131         .permission     = proc_fd_permission,
2132         .setattr        = proc_setattr,
2133 };
2134
2135 static int
2136 proc_map_files_readdir(struct file *file, struct dir_context *ctx)
2137 {
2138         struct vm_area_struct *vma;
2139         struct task_struct *task;
2140         struct mm_struct *mm;
2141         unsigned long nr_files, pos, i;
2142         struct flex_array *fa = NULL;
2143         struct map_files_info info;
2144         struct map_files_info *p;
2145         int ret;
2146
2147         ret = -ENOENT;
2148         task = get_proc_task(file_inode(file));
2149         if (!task)
2150                 goto out;
2151
2152         ret = -EACCES;
2153         if (!ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS))
2154                 goto out_put_task;
2155
2156         ret = 0;
2157         if (!dir_emit_dots(file, ctx))
2158                 goto out_put_task;
2159
2160         mm = get_task_mm(task);
2161         if (!mm)
2162                 goto out_put_task;
2163         down_read(&mm->mmap_sem);
2164
2165         nr_files = 0;
2166
2167         /*
2168          * We need two passes here:
2169          *
2170          *  1) Collect vmas of mapped files with mmap_sem taken
2171          *  2) Release mmap_sem and instantiate entries
2172          *
2173          * otherwise we get lockdep complained, since filldir()
2174          * routine might require mmap_sem taken in might_fault().
2175          */
2176
2177         for (vma = mm->mmap, pos = 2; vma; vma = vma->vm_next) {
2178                 if (vma->vm_file && ++pos > ctx->pos)
2179                         nr_files++;
2180         }
2181
2182         if (nr_files) {
2183                 fa = flex_array_alloc(sizeof(info), nr_files,
2184                                         GFP_KERNEL);
2185                 if (!fa || flex_array_prealloc(fa, 0, nr_files,
2186                                                 GFP_KERNEL)) {
2187                         ret = -ENOMEM;
2188                         if (fa)
2189                                 flex_array_free(fa);
2190                         up_read(&mm->mmap_sem);
2191                         mmput(mm);
2192                         goto out_put_task;
2193                 }
2194                 for (i = 0, vma = mm->mmap, pos = 2; vma;
2195                                 vma = vma->vm_next) {
2196                         if (!vma->vm_file)
2197                                 continue;
2198                         if (++pos <= ctx->pos)
2199                                 continue;
2200
2201                         info.mode = vma->vm_file->f_mode;
2202                         info.len = snprintf(info.name,
2203                                         sizeof(info.name), "%lx-%lx",
2204                                         vma->vm_start, vma->vm_end);
2205                         if (flex_array_put(fa, i++, &info, GFP_KERNEL))
2206                                 BUG();
2207                 }
2208         }
2209         up_read(&mm->mmap_sem);
2210
2211         for (i = 0; i < nr_files; i++) {
2212                 p = flex_array_get(fa, i);
2213                 if (!proc_fill_cache(file, ctx,
2214                                       p->name, p->len,
2215                                       proc_map_files_instantiate,
2216                                       task,
2217                                       (void *)(unsigned long)p->mode))
2218                         break;
2219                 ctx->pos++;
2220         }
2221         if (fa)
2222                 flex_array_free(fa);
2223         mmput(mm);
2224
2225 out_put_task:
2226         put_task_struct(task);
2227 out:
2228         return ret;
2229 }
2230
2231 static const struct file_operations proc_map_files_operations = {
2232         .read           = generic_read_dir,
2233         .iterate_shared = proc_map_files_readdir,
2234         .llseek         = generic_file_llseek,
2235 };
2236
2237 #if defined(CONFIG_CHECKPOINT_RESTORE) && defined(CONFIG_POSIX_TIMERS)
2238 struct timers_private {
2239         struct pid *pid;
2240         struct task_struct *task;
2241         struct sighand_struct *sighand;
2242         struct pid_namespace *ns;
2243         unsigned long flags;
2244 };
2245
2246 static void *timers_start(struct seq_file *m, loff_t *pos)
2247 {
2248         struct timers_private *tp = m->private;
2249
2250         tp->task = get_pid_task(tp->pid, PIDTYPE_PID);
2251         if (!tp->task)
2252                 return ERR_PTR(-ESRCH);
2253
2254         tp->sighand = lock_task_sighand(tp->task, &tp->flags);
2255         if (!tp->sighand)
2256                 return ERR_PTR(-ESRCH);
2257
2258         return seq_list_start(&tp->task->signal->posix_timers, *pos);
2259 }
2260
2261 static void *timers_next(struct seq_file *m, void *v, loff_t *pos)
2262 {
2263         struct timers_private *tp = m->private;
2264         return seq_list_next(v, &tp->task->signal->posix_timers, pos);
2265 }
2266
2267 static void timers_stop(struct seq_file *m, void *v)
2268 {
2269         struct timers_private *tp = m->private;
2270
2271         if (tp->sighand) {
2272                 unlock_task_sighand(tp->task, &tp->flags);
2273                 tp->sighand = NULL;
2274         }
2275
2276         if (tp->task) {
2277                 put_task_struct(tp->task);
2278                 tp->task = NULL;
2279         }
2280 }
2281
2282 static int show_timer(struct seq_file *m, void *v)
2283 {
2284         struct k_itimer *timer;
2285         struct timers_private *tp = m->private;
2286         int notify;
2287         static const char * const nstr[] = {
2288                 [SIGEV_SIGNAL] = "signal",
2289                 [SIGEV_NONE] = "none",
2290                 [SIGEV_THREAD] = "thread",
2291         };
2292
2293         timer = list_entry((struct list_head *)v, struct k_itimer, list);
2294         notify = timer->it_sigev_notify;
2295
2296         seq_printf(m, "ID: %d\n", timer->it_id);
2297         seq_printf(m, "signal: %d/%px\n",
2298                    timer->sigq->info.si_signo,
2299                    timer->sigq->info.si_value.sival_ptr);
2300         seq_printf(m, "notify: %s/%s.%d\n",
2301                    nstr[notify & ~SIGEV_THREAD_ID],
2302                    (notify & SIGEV_THREAD_ID) ? "tid" : "pid",
2303                    pid_nr_ns(timer->it_pid, tp->ns));
2304         seq_printf(m, "ClockID: %d\n", timer->it_clock);
2305
2306         return 0;
2307 }
2308
2309 static const struct seq_operations proc_timers_seq_ops = {
2310         .start  = timers_start,
2311         .next   = timers_next,
2312         .stop   = timers_stop,
2313         .show   = show_timer,
2314 };
2315
2316 static int proc_timers_open(struct inode *inode, struct file *file)
2317 {
2318         struct timers_private *tp;
2319
2320         tp = __seq_open_private(file, &proc_timers_seq_ops,
2321                         sizeof(struct timers_private));
2322         if (!tp)
2323                 return -ENOMEM;
2324
2325         tp->pid = proc_pid(inode);
2326         tp->ns = inode->i_sb->s_fs_info;
2327         return 0;
2328 }
2329
2330 static const struct file_operations proc_timers_operations = {
2331         .open           = proc_timers_open,
2332         .read           = seq_read,
2333         .llseek         = seq_lseek,
2334         .release        = seq_release_private,
2335 };
2336 #endif
2337
2338 static ssize_t timerslack_ns_write(struct file *file, const char __user *buf,
2339                                         size_t count, loff_t *offset)
2340 {
2341         struct inode *inode = file_inode(file);
2342         struct task_struct *p;
2343         u64 slack_ns;
2344         int err;
2345
2346         err = kstrtoull_from_user(buf, count, 10, &slack_ns);
2347         if (err < 0)
2348                 return err;
2349
2350         p = get_proc_task(inode);
2351         if (!p)
2352                 return -ESRCH;
2353
2354         if (p != current) {
2355                 if (!capable(CAP_SYS_NICE)) {
2356                         count = -EPERM;
2357                         goto out;
2358                 }
2359
2360                 err = security_task_setscheduler(p);
2361                 if (err) {
2362                         count = err;
2363                         goto out;
2364                 }
2365         }
2366
2367         task_lock(p);
2368         if (slack_ns == 0)
2369                 p->timer_slack_ns = p->default_timer_slack_ns;
2370         else
2371                 p->timer_slack_ns = slack_ns;
2372         task_unlock(p);
2373
2374 out:
2375         put_task_struct(p);
2376
2377         return count;
2378 }
2379
2380 static int timerslack_ns_show(struct seq_file *m, void *v)
2381 {
2382         struct inode *inode = m->private;
2383         struct task_struct *p;
2384         int err = 0;
2385
2386         p = get_proc_task(inode);
2387         if (!p)
2388                 return -ESRCH;
2389
2390         if (p != current) {
2391
2392                 if (!capable(CAP_SYS_NICE)) {
2393                         err = -EPERM;
2394                         goto out;
2395                 }
2396                 err = security_task_getscheduler(p);
2397                 if (err)
2398                         goto out;
2399         }
2400
2401         task_lock(p);
2402         seq_printf(m, "%llu\n", p->timer_slack_ns);
2403         task_unlock(p);
2404
2405 out:
2406         put_task_struct(p);
2407
2408         return err;
2409 }
2410
2411 static int timerslack_ns_open(struct inode *inode, struct file *filp)
2412 {
2413         return single_open(filp, timerslack_ns_show, inode);
2414 }
2415
2416 static const struct file_operations proc_pid_set_timerslack_ns_operations = {
2417         .open           = timerslack_ns_open,
2418         .read           = seq_read,
2419         .write          = timerslack_ns_write,
2420         .llseek         = seq_lseek,
2421         .release        = single_release,
2422 };
2423
2424 static int proc_pident_instantiate(struct inode *dir,
2425         struct dentry *dentry, struct task_struct *task, const void *ptr)
2426 {
2427         const struct pid_entry *p = ptr;
2428         struct inode *inode;
2429         struct proc_inode *ei;
2430
2431         inode = proc_pid_make_inode(dir->i_sb, task, p->mode);
2432         if (!inode)
2433                 goto out;
2434
2435         ei = PROC_I(inode);
2436         if (S_ISDIR(inode->i_mode))
2437                 set_nlink(inode, 2);    /* Use getattr to fix if necessary */
2438         if (p->iop)
2439                 inode->i_op = p->iop;
2440         if (p->fop)
2441                 inode->i_fop = p->fop;
2442         ei->op = p->op;
2443         d_set_d_op(dentry, &pid_dentry_operations);
2444         d_add(dentry, inode);
2445         /* Close the race of the process dying before we return the dentry */
2446         if (pid_revalidate(dentry, 0))
2447                 return 0;
2448 out:
2449         return -ENOENT;
2450 }
2451
2452 static struct dentry *proc_pident_lookup(struct inode *dir, 
2453                                          struct dentry *dentry,
2454                                          const struct pid_entry *ents,
2455                                          unsigned int nents)
2456 {
2457         int error;
2458         struct task_struct *task = get_proc_task(dir);
2459         const struct pid_entry *p, *last;
2460
2461         error = -ENOENT;
2462
2463         if (!task)
2464                 goto out_no_task;
2465
2466         /*
2467          * Yes, it does not scale. And it should not. Don't add
2468          * new entries into /proc/<tgid>/ without very good reasons.
2469          */
2470         last = &ents[nents];
2471         for (p = ents; p < last; p++) {
2472                 if (p->len != dentry->d_name.len)
2473                         continue;
2474                 if (!memcmp(dentry->d_name.name, p->name, p->len))
2475                         break;
2476         }
2477         if (p >= last)
2478                 goto out;
2479
2480         error = proc_pident_instantiate(dir, dentry, task, p);
2481 out:
2482         put_task_struct(task);
2483 out_no_task:
2484         return ERR_PTR(error);
2485 }
2486
2487 static int proc_pident_readdir(struct file *file, struct dir_context *ctx,
2488                 const struct pid_entry *ents, unsigned int nents)
2489 {
2490         struct task_struct *task = get_proc_task(file_inode(file));
2491         const struct pid_entry *p;
2492
2493         if (!task)
2494                 return -ENOENT;
2495
2496         if (!dir_emit_dots(file, ctx))
2497                 goto out;
2498
2499         if (ctx->pos >= nents + 2)
2500                 goto out;
2501
2502         for (p = ents + (ctx->pos - 2); p < ents + nents; p++) {
2503                 if (!proc_fill_cache(file, ctx, p->name, p->len,
2504                                 proc_pident_instantiate, task, p))
2505                         break;
2506                 ctx->pos++;
2507         }
2508 out:
2509         put_task_struct(task);
2510         return 0;
2511 }
2512
2513 #ifdef CONFIG_SECURITY
2514 static ssize_t proc_pid_attr_read(struct file * file, char __user * buf,
2515                                   size_t count, loff_t *ppos)
2516 {
2517         struct inode * inode = file_inode(file);
2518         char *p = NULL;
2519         ssize_t length;
2520         struct task_struct *task = get_proc_task(inode);
2521
2522         if (!task)
2523                 return -ESRCH;
2524
2525         length = security_getprocattr(task,
2526                                       (char*)file->f_path.dentry->d_name.name,
2527                                       &p);
2528         put_task_struct(task);
2529         if (length > 0)
2530                 length = simple_read_from_buffer(buf, count, ppos, p, length);
2531         kfree(p);
2532         return length;
2533 }
2534
2535 static ssize_t proc_pid_attr_write(struct file * file, const char __user * buf,
2536                                    size_t count, loff_t *ppos)
2537 {
2538         struct inode * inode = file_inode(file);
2539         void *page;
2540         ssize_t length;
2541         struct task_struct *task = get_proc_task(inode);
2542
2543         length = -ESRCH;
2544         if (!task)
2545                 goto out_no_task;
2546
2547         /* A task may only write its own attributes. */
2548         length = -EACCES;
2549         if (current != task)
2550                 goto out;
2551
2552         if (count > PAGE_SIZE)
2553                 count = PAGE_SIZE;
2554
2555         /* No partial writes. */
2556         length = -EINVAL;
2557         if (*ppos != 0)
2558                 goto out;
2559
2560         page = memdup_user(buf, count);
2561         if (IS_ERR(page)) {
2562                 length = PTR_ERR(page);
2563                 goto out;
2564         }
2565
2566         /* Guard against adverse ptrace interaction */
2567         length = mutex_lock_interruptible(&current->signal->cred_guard_mutex);
2568         if (length < 0)
2569                 goto out_free;
2570
2571         length = security_setprocattr(file->f_path.dentry->d_name.name,
2572                                       page, count);
2573         mutex_unlock(&current->signal->cred_guard_mutex);
2574 out_free:
2575         kfree(page);
2576 out:
2577         put_task_struct(task);
2578 out_no_task:
2579         return length;
2580 }
2581
2582 static const struct file_operations proc_pid_attr_operations = {
2583         .read           = proc_pid_attr_read,
2584         .write          = proc_pid_attr_write,
2585         .llseek         = generic_file_llseek,
2586 };
2587
2588 static const struct pid_entry attr_dir_stuff[] = {
2589         REG("current",    S_IRUGO|S_IWUGO, proc_pid_attr_operations),
2590         REG("prev",       S_IRUGO,         proc_pid_attr_operations),
2591         REG("exec",       S_IRUGO|S_IWUGO, proc_pid_attr_operations),
2592         REG("fscreate",   S_IRUGO|S_IWUGO, proc_pid_attr_operations),
2593         REG("keycreate",  S_IRUGO|S_IWUGO, proc_pid_attr_operations),
2594         REG("sockcreate", S_IRUGO|S_IWUGO, proc_pid_attr_operations),
2595 };
2596
2597 static int proc_attr_dir_readdir(struct file *file, struct dir_context *ctx)
2598 {
2599         return proc_pident_readdir(file, ctx, 
2600                                    attr_dir_stuff, ARRAY_SIZE(attr_dir_stuff));
2601 }
2602
2603 static const struct file_operations proc_attr_dir_operations = {
2604         .read           = generic_read_dir,
2605         .iterate_shared = proc_attr_dir_readdir,
2606         .llseek         = generic_file_llseek,
2607 };
2608
2609 static struct dentry *proc_attr_dir_lookup(struct inode *dir,
2610                                 struct dentry *dentry, unsigned int flags)
2611 {
2612         return proc_pident_lookup(dir, dentry,
2613                                   attr_dir_stuff, ARRAY_SIZE(attr_dir_stuff));
2614 }
2615
2616 static const struct inode_operations proc_attr_dir_inode_operations = {
2617         .lookup         = proc_attr_dir_lookup,
2618         .getattr        = pid_getattr,
2619         .setattr        = proc_setattr,
2620 };
2621
2622 #endif
2623
2624 #ifdef CONFIG_ELF_CORE
2625 static ssize_t proc_coredump_filter_read(struct file *file, char __user *buf,
2626                                          size_t count, loff_t *ppos)
2627 {
2628         struct task_struct *task = get_proc_task(file_inode(file));
2629         struct mm_struct *mm;
2630         char buffer[PROC_NUMBUF];
2631         size_t len;
2632         int ret;
2633
2634         if (!task)
2635                 return -ESRCH;
2636
2637         ret = 0;
2638         mm = get_task_mm(task);
2639         if (mm) {
2640                 len = snprintf(buffer, sizeof(buffer), "%08lx\n",
2641                                ((mm->flags & MMF_DUMP_FILTER_MASK) >>
2642                                 MMF_DUMP_FILTER_SHIFT));
2643                 mmput(mm);
2644                 ret = simple_read_from_buffer(buf, count, ppos, buffer, len);
2645         }
2646
2647         put_task_struct(task);
2648
2649         return ret;
2650 }
2651
2652 static ssize_t proc_coredump_filter_write(struct file *file,
2653                                           const char __user *buf,
2654                                           size_t count,
2655                                           loff_t *ppos)
2656 {
2657         struct task_struct *task;
2658         struct mm_struct *mm;
2659         unsigned int val;
2660         int ret;
2661         int i;
2662         unsigned long mask;
2663
2664         ret = kstrtouint_from_user(buf, count, 0, &val);
2665         if (ret < 0)
2666                 return ret;
2667
2668         ret = -ESRCH;
2669         task = get_proc_task(file_inode(file));
2670         if (!task)
2671                 goto out_no_task;
2672
2673         mm = get_task_mm(task);
2674         if (!mm)
2675                 goto out_no_mm;
2676         ret = 0;
2677
2678         for (i = 0, mask = 1; i < MMF_DUMP_FILTER_BITS; i++, mask <<= 1) {
2679                 if (val & mask)
2680                         set_bit(i + MMF_DUMP_FILTER_SHIFT, &mm->flags);
2681                 else
2682                         clear_bit(i + MMF_DUMP_FILTER_SHIFT, &mm->flags);
2683         }
2684
2685         mmput(mm);
2686  out_no_mm:
2687         put_task_struct(task);
2688  out_no_task:
2689         if (ret < 0)
2690                 return ret;
2691         return count;
2692 }
2693
2694 static const struct file_operations proc_coredump_filter_operations = {
2695         .read           = proc_coredump_filter_read,
2696         .write          = proc_coredump_filter_write,
2697         .llseek         = generic_file_llseek,
2698 };
2699 #endif
2700
2701 #ifdef CONFIG_TASK_IO_ACCOUNTING
2702 static int do_io_accounting(struct task_struct *task, struct seq_file *m, int whole)
2703 {
2704         struct task_io_accounting acct = task->ioac;
2705         unsigned long flags;
2706         int result;
2707
2708         result = mutex_lock_killable(&task->signal->cred_guard_mutex);
2709         if (result)
2710                 return result;
2711
2712         if (!ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS)) {
2713                 result = -EACCES;
2714                 goto out_unlock;
2715         }
2716
2717         if (whole && lock_task_sighand(task, &flags)) {
2718                 struct task_struct *t = task;
2719
2720                 task_io_accounting_add(&acct, &task->signal->ioac);
2721                 while_each_thread(task, t)
2722                         task_io_accounting_add(&acct, &t->ioac);
2723
2724                 unlock_task_sighand(task, &flags);
2725         }
2726         seq_printf(m,
2727                    "rchar: %llu\n"
2728                    "wchar: %llu\n"
2729                    "syscr: %llu\n"
2730                    "syscw: %llu\n"
2731                    "read_bytes: %llu\n"
2732                    "write_bytes: %llu\n"
2733                    "cancelled_write_bytes: %llu\n",
2734                    (unsigned long long)acct.rchar,
2735                    (unsigned long long)acct.wchar,
2736                    (unsigned long long)acct.syscr,
2737                    (unsigned long long)acct.syscw,
2738                    (unsigned long long)acct.read_bytes,
2739                    (unsigned long long)acct.write_bytes,
2740                    (unsigned long long)acct.cancelled_write_bytes);
2741         result = 0;
2742
2743 out_unlock:
2744         mutex_unlock(&task->signal->cred_guard_mutex);
2745         return result;
2746 }
2747
2748 static int proc_tid_io_accounting(struct seq_file *m, struct pid_namespace *ns,
2749                                   struct pid *pid, struct task_struct *task)
2750 {
2751         return do_io_accounting(task, m, 0);
2752 }
2753
2754 static int proc_tgid_io_accounting(struct seq_file *m, struct pid_namespace *ns,
2755                                    struct pid *pid, struct task_struct *task)
2756 {
2757         return do_io_accounting(task, m, 1);
2758 }
2759 #endif /* CONFIG_TASK_IO_ACCOUNTING */
2760
2761 #ifdef CONFIG_USER_NS
2762 static int proc_id_map_open(struct inode *inode, struct file *file,
2763         const struct seq_operations *seq_ops)
2764 {
2765         struct user_namespace *ns = NULL;
2766         struct task_struct *task;
2767         struct seq_file *seq;
2768         int ret = -EINVAL;
2769
2770         task = get_proc_task(inode);
2771         if (task) {
2772                 rcu_read_lock();
2773                 ns = get_user_ns(task_cred_xxx(task, user_ns));
2774                 rcu_read_unlock();
2775                 put_task_struct(task);
2776         }
2777         if (!ns)
2778                 goto err;
2779
2780         ret = seq_open(file, seq_ops);
2781         if (ret)
2782                 goto err_put_ns;
2783
2784         seq = file->private_data;
2785         seq->private = ns;
2786
2787         return 0;
2788 err_put_ns:
2789         put_user_ns(ns);
2790 err:
2791         return ret;
2792 }
2793
2794 static int proc_id_map_release(struct inode *inode, struct file *file)
2795 {
2796         struct seq_file *seq = file->private_data;
2797         struct user_namespace *ns = seq->private;
2798         put_user_ns(ns);
2799         return seq_release(inode, file);
2800 }
2801
2802 static int proc_uid_map_open(struct inode *inode, struct file *file)
2803 {
2804         return proc_id_map_open(inode, file, &proc_uid_seq_operations);
2805 }
2806
2807 static int proc_gid_map_open(struct inode *inode, struct file *file)
2808 {
2809         return proc_id_map_open(inode, file, &proc_gid_seq_operations);
2810 }
2811
2812 static int proc_projid_map_open(struct inode *inode, struct file *file)
2813 {
2814         return proc_id_map_open(inode, file, &proc_projid_seq_operations);
2815 }
2816
2817 static const struct file_operations proc_uid_map_operations = {
2818         .open           = proc_uid_map_open,
2819         .write          = proc_uid_map_write,
2820         .read           = seq_read,
2821         .llseek         = seq_lseek,
2822         .release        = proc_id_map_release,
2823 };
2824
2825 static const struct file_operations proc_gid_map_operations = {
2826         .open           = proc_gid_map_open,
2827         .write          = proc_gid_map_write,
2828         .read           = seq_read,
2829         .llseek         = seq_lseek,
2830         .release        = proc_id_map_release,
2831 };
2832
2833 static const struct file_operations proc_projid_map_operations = {
2834         .open           = proc_projid_map_open,
2835         .write          = proc_projid_map_write,
2836         .read           = seq_read,
2837         .llseek         = seq_lseek,
2838         .release        = proc_id_map_release,
2839 };
2840
2841 static int proc_setgroups_open(struct inode *inode, struct file *file)
2842 {
2843         struct user_namespace *ns = NULL;
2844         struct task_struct *task;
2845         int ret;
2846
2847         ret = -ESRCH;
2848         task = get_proc_task(inode);
2849         if (task) {
2850                 rcu_read_lock();
2851                 ns = get_user_ns(task_cred_xxx(task, user_ns));
2852                 rcu_read_unlock();
2853                 put_task_struct(task);
2854         }
2855         if (!ns)
2856                 goto err;
2857
2858         if (file->f_mode & FMODE_WRITE) {
2859                 ret = -EACCES;
2860                 if (!ns_capable(ns, CAP_SYS_ADMIN))
2861                         goto err_put_ns;
2862         }
2863
2864         ret = single_open(file, &proc_setgroups_show, ns);
2865         if (ret)
2866                 goto err_put_ns;
2867
2868         return 0;
2869 err_put_ns:
2870         put_user_ns(ns);
2871 err:
2872         return ret;
2873 }
2874
2875 static int proc_setgroups_release(struct inode *inode, struct file *file)
2876 {
2877         struct seq_file *seq = file->private_data;
2878         struct user_namespace *ns = seq->private;
2879         int ret = single_release(inode, file);
2880         put_user_ns(ns);
2881         return ret;
2882 }
2883
2884 static const struct file_operations proc_setgroups_operations = {
2885         .open           = proc_setgroups_open,
2886         .write          = proc_setgroups_write,
2887         .read           = seq_read,
2888         .llseek         = seq_lseek,
2889         .release        = proc_setgroups_release,
2890 };
2891 #endif /* CONFIG_USER_NS */
2892
2893 static int proc_pid_personality(struct seq_file *m, struct pid_namespace *ns,
2894                                 struct pid *pid, struct task_struct *task)
2895 {
2896         int err = lock_trace(task);
2897         if (!err) {
2898                 seq_printf(m, "%08x\n", task->personality);
2899                 unlock_trace(task);
2900         }
2901         return err;
2902 }
2903
2904 #ifdef CONFIG_LIVEPATCH
2905 static int proc_pid_patch_state(struct seq_file *m, struct pid_namespace *ns,
2906                                 struct pid *pid, struct task_struct *task)
2907 {
2908         seq_printf(m, "%d\n", task->patch_state);
2909         return 0;
2910 }
2911 #endif /* CONFIG_LIVEPATCH */
2912
2913 /*
2914  * Thread groups
2915  */
2916 static const struct file_operations proc_task_operations;
2917 static const struct inode_operations proc_task_inode_operations;
2918
2919 static const struct pid_entry tgid_base_stuff[] = {
2920         DIR("task",       S_IRUGO|S_IXUGO, proc_task_inode_operations, proc_task_operations),
2921         DIR("fd",         S_IRUSR|S_IXUSR, proc_fd_inode_operations, proc_fd_operations),
2922         DIR("map_files",  S_IRUSR|S_IXUSR, proc_map_files_inode_operations, proc_map_files_operations),
2923         DIR("fdinfo",     S_IRUSR|S_IXUSR, proc_fdinfo_inode_operations, proc_fdinfo_operations),
2924         DIR("ns",         S_IRUSR|S_IXUGO, proc_ns_dir_inode_operations, proc_ns_dir_operations),
2925 #ifdef CONFIG_NET
2926         DIR("net",        S_IRUGO|S_IXUGO, proc_net_inode_operations, proc_net_operations),
2927 #endif
2928         REG("environ",    S_IRUSR, proc_environ_operations),
2929         REG("auxv",       S_IRUSR, proc_auxv_operations),
2930         ONE("status",     S_IRUGO, proc_pid_status),
2931         ONE("personality", S_IRUSR, proc_pid_personality),
2932         ONE("limits",     S_IRUGO, proc_pid_limits),
2933 #ifdef CONFIG_SCHED_DEBUG
2934         REG("sched",      S_IRUGO|S_IWUSR, proc_pid_sched_operations),
2935 #endif
2936 #ifdef CONFIG_SCHED_AUTOGROUP
2937         REG("autogroup",  S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
2938 #endif
2939         REG("comm",      S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
2940 #ifdef CONFIG_HAVE_ARCH_TRACEHOOK
2941         ONE("syscall",    S_IRUSR, proc_pid_syscall),
2942 #endif
2943         REG("cmdline",    S_IRUGO, proc_pid_cmdline_ops),
2944         ONE("stat",       S_IRUGO, proc_tgid_stat),
2945         ONE("statm",      S_IRUGO, proc_pid_statm),
2946         REG("maps",       S_IRUGO, proc_pid_maps_operations),
2947 #ifdef CONFIG_NUMA
2948         REG("numa_maps",  S_IRUGO, proc_pid_numa_maps_operations),
2949 #endif
2950         REG("mem",        S_IRUSR|S_IWUSR, proc_mem_operations),
2951         LNK("cwd",        proc_cwd_link),
2952         LNK("root",       proc_root_link),
2953         LNK("exe",        proc_exe_link),
2954         REG("mounts",     S_IRUGO, proc_mounts_operations),
2955         REG("mountinfo",  S_IRUGO, proc_mountinfo_operations),
2956         REG("mountstats", S_IRUSR, proc_mountstats_operations),
2957 #ifdef CONFIG_PROC_PAGE_MONITOR
2958         REG("clear_refs", S_IWUSR, proc_clear_refs_operations),
2959         REG("smaps",      S_IRUGO, proc_pid_smaps_operations),
2960         REG("smaps_rollup", S_IRUGO, proc_pid_smaps_rollup_operations),
2961         REG("pagemap",    S_IRUSR, proc_pagemap_operations),
2962 #endif
2963 #ifdef CONFIG_SECURITY
2964         DIR("attr",       S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
2965 #endif
2966 #ifdef CONFIG_KALLSYMS
2967         ONE("wchan",      S_IRUGO, proc_pid_wchan),
2968 #endif
2969 #ifdef CONFIG_STACKTRACE
2970         ONE("stack",      S_IRUSR, proc_pid_stack),
2971 #endif
2972 #ifdef CONFIG_SCHED_INFO
2973         ONE("schedstat",  S_IRUGO, proc_pid_schedstat),
2974 #endif
2975 #ifdef CONFIG_LATENCYTOP
2976         REG("latency",  S_IRUGO, proc_lstats_operations),
2977 #endif
2978 #ifdef CONFIG_PROC_PID_CPUSET
2979         ONE("cpuset",     S_IRUGO, proc_cpuset_show),
2980 #endif
2981 #ifdef CONFIG_CGROUPS
2982         ONE("cgroup",  S_IRUGO, proc_cgroup_show),
2983 #endif
2984         ONE("oom_score",  S_IRUGO, proc_oom_score),
2985         REG("oom_adj",    S_IRUGO|S_IWUSR, proc_oom_adj_operations),
2986         REG("oom_score_adj", S_IRUGO|S_IWUSR, proc_oom_score_adj_operations),
2987 #ifdef CONFIG_AUDITSYSCALL
2988         REG("loginuid",   S_IWUSR|S_IRUGO, proc_loginuid_operations),
2989         REG("sessionid",  S_IRUGO, proc_sessionid_operations),
2990 #endif
2991 #ifdef CONFIG_FAULT_INJECTION
2992         REG("make-it-fail", S_IRUGO|S_IWUSR, proc_fault_inject_operations),
2993         REG("fail-nth", 0644, proc_fail_nth_operations),
2994 #endif
2995 #ifdef CONFIG_ELF_CORE
2996         REG("coredump_filter", S_IRUGO|S_IWUSR, proc_coredump_filter_operations),
2997 #endif
2998 #ifdef CONFIG_TASK_IO_ACCOUNTING
2999         ONE("io",       S_IRUSR, proc_tgid_io_accounting),
3000 #endif
3001 #ifdef CONFIG_HARDWALL
3002         ONE("hardwall",   S_IRUGO, proc_pid_hardwall),
3003 #endif
3004 #ifdef CONFIG_USER_NS
3005         REG("uid_map",    S_IRUGO|S_IWUSR, proc_uid_map_operations),
3006         REG("gid_map",    S_IRUGO|S_IWUSR, proc_gid_map_operations),
3007         REG("projid_map", S_IRUGO|S_IWUSR, proc_projid_map_operations),
3008         REG("setgroups",  S_IRUGO|S_IWUSR, proc_setgroups_operations),
3009 #endif
3010 #if defined(CONFIG_CHECKPOINT_RESTORE) && defined(CONFIG_POSIX_TIMERS)
3011         REG("timers",     S_IRUGO, proc_timers_operations),
3012 #endif
3013         REG("timerslack_ns", S_IRUGO|S_IWUGO, proc_pid_set_timerslack_ns_operations),
3014 #ifdef CONFIG_LIVEPATCH
3015         ONE("patch_state",  S_IRUSR, proc_pid_patch_state),
3016 #endif
3017 };
3018
3019 static int proc_tgid_base_readdir(struct file *file, struct dir_context *ctx)
3020 {
3021         return proc_pident_readdir(file, ctx,
3022                                    tgid_base_stuff, ARRAY_SIZE(tgid_base_stuff));
3023 }
3024
3025 static const struct file_operations proc_tgid_base_operations = {
3026         .read           = generic_read_dir,
3027         .iterate_shared = proc_tgid_base_readdir,
3028         .llseek         = generic_file_llseek,
3029 };
3030
3031 static struct dentry *proc_tgid_base_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
3032 {
3033         return proc_pident_lookup(dir, dentry,
3034                                   tgid_base_stuff, ARRAY_SIZE(tgid_base_stuff));
3035 }
3036
3037 static const struct inode_operations proc_tgid_base_inode_operations = {
3038         .lookup         = proc_tgid_base_lookup,
3039         .getattr        = pid_getattr,
3040         .setattr        = proc_setattr,
3041         .permission     = proc_pid_permission,
3042 };
3043
3044 static void proc_flush_task_mnt(struct vfsmount *mnt, pid_t pid, pid_t tgid)
3045 {
3046         struct dentry *dentry, *leader, *dir;
3047         char buf[10 + 1];
3048         struct qstr name;
3049
3050         name.name = buf;
3051         name.len = snprintf(buf, sizeof(buf), "%u", pid);
3052         /* no ->d_hash() rejects on procfs */
3053         dentry = d_hash_and_lookup(mnt->mnt_root, &name);
3054         if (dentry) {
3055                 d_invalidate(dentry);
3056                 dput(dentry);
3057         }
3058
3059         if (pid == tgid)
3060                 return;
3061
3062         name.name = buf;
3063         name.len = snprintf(buf, sizeof(buf), "%u", tgid);
3064         leader = d_hash_and_lookup(mnt->mnt_root, &name);
3065         if (!leader)
3066                 goto out;
3067
3068         name.name = "task";
3069         name.len = strlen(name.name);
3070         dir = d_hash_and_lookup(leader, &name);
3071         if (!dir)
3072                 goto out_put_leader;
3073
3074         name.name = buf;
3075         name.len = snprintf(buf, sizeof(buf), "%u", pid);
3076         dentry = d_hash_and_lookup(dir, &name);
3077         if (dentry) {
3078                 d_invalidate(dentry);
3079                 dput(dentry);
3080         }
3081
3082         dput(dir);
3083 out_put_leader:
3084         dput(leader);
3085 out:
3086         return;
3087 }
3088
3089 /**
3090  * proc_flush_task -  Remove dcache entries for @task from the /proc dcache.
3091  * @task: task that should be flushed.
3092  *
3093  * When flushing dentries from proc, one needs to flush them from global
3094  * proc (proc_mnt) and from all the namespaces' procs this task was seen
3095  * in. This call is supposed to do all of this job.
3096  *
3097  * Looks in the dcache for
3098  * /proc/@pid
3099  * /proc/@tgid/task/@pid
3100  * if either directory is present flushes it and all of it'ts children
3101  * from the dcache.
3102  *
3103  * It is safe and reasonable to cache /proc entries for a task until
3104  * that task exits.  After that they just clog up the dcache with
3105  * useless entries, possibly causing useful dcache entries to be
3106  * flushed instead.  This routine is proved to flush those useless
3107  * dcache entries at process exit time.
3108  *
3109  * NOTE: This routine is just an optimization so it does not guarantee
3110  *       that no dcache entries will exist at process exit time it
3111  *       just makes it very unlikely that any will persist.
3112  */
3113
3114 void proc_flush_task(struct task_struct *task)
3115 {
3116         int i;
3117         struct pid *pid, *tgid;
3118         struct upid *upid;
3119
3120         pid = task_pid(task);
3121         tgid = task_tgid(task);
3122
3123         for (i = 0; i <= pid->level; i++) {
3124                 upid = &pid->numbers[i];
3125                 proc_flush_task_mnt(upid->ns->proc_mnt, upid->nr,
3126                                         tgid->numbers[i].nr);
3127         }
3128 }
3129
3130 static int proc_pid_instantiate(struct inode *dir,
3131                                    struct dentry * dentry,
3132                                    struct task_struct *task, const void *ptr)
3133 {
3134         struct inode *inode;
3135
3136         inode = proc_pid_make_inode(dir->i_sb, task, S_IFDIR | S_IRUGO | S_IXUGO);
3137         if (!inode)
3138                 goto out;
3139
3140         inode->i_op = &proc_tgid_base_inode_operations;
3141         inode->i_fop = &proc_tgid_base_operations;
3142         inode->i_flags|=S_IMMUTABLE;
3143
3144         set_nlink(inode, nlink_tgid);
3145
3146         d_set_d_op(dentry, &pid_dentry_operations);
3147
3148         d_add(dentry, inode);
3149         /* Close the race of the process dying before we return the dentry */
3150         if (pid_revalidate(dentry, 0))
3151                 return 0;
3152 out:
3153         return -ENOENT;
3154 }
3155
3156 struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, unsigned int flags)
3157 {
3158         int result = -ENOENT;
3159         struct task_struct *task;
3160         unsigned tgid;
3161         struct pid_namespace *ns;
3162
3163         tgid = name_to_int(&dentry->d_name);
3164         if (tgid == ~0U)
3165                 goto out;
3166
3167         ns = dentry->d_sb->s_fs_info;
3168         rcu_read_lock();
3169         task = find_task_by_pid_ns(tgid, ns);
3170         if (task)
3171                 get_task_struct(task);
3172         rcu_read_unlock();
3173         if (!task)
3174                 goto out;
3175
3176         result = proc_pid_instantiate(dir, dentry, task, NULL);
3177         put_task_struct(task);
3178 out:
3179         return ERR_PTR(result);
3180 }
3181
3182 /*
3183  * Find the first task with tgid >= tgid
3184  *
3185  */
3186 struct tgid_iter {
3187         unsigned int tgid;
3188         struct task_struct *task;
3189 };
3190 static struct tgid_iter next_tgid(struct pid_namespace *ns, struct tgid_iter iter)
3191 {
3192         struct pid *pid;
3193
3194         if (iter.task)
3195                 put_task_struct(iter.task);
3196         rcu_read_lock();
3197 retry:
3198         iter.task = NULL;
3199         pid = find_ge_pid(iter.tgid, ns);
3200         if (pid) {
3201                 iter.tgid = pid_nr_ns(pid, ns);
3202                 iter.task = pid_task(pid, PIDTYPE_PID);
3203                 /* What we to know is if the pid we have find is the
3204                  * pid of a thread_group_leader.  Testing for task
3205                  * being a thread_group_leader is the obvious thing
3206                  * todo but there is a window when it fails, due to
3207                  * the pid transfer logic in de_thread.
3208                  *
3209                  * So we perform the straight forward test of seeing
3210                  * if the pid we have found is the pid of a thread
3211                  * group leader, and don't worry if the task we have
3212                  * found doesn't happen to be a thread group leader.
3213                  * As we don't care in the case of readdir.
3214                  */
3215                 if (!iter.task || !has_group_leader_pid(iter.task)) {
3216                         iter.tgid += 1;
3217                         goto retry;
3218                 }
3219                 get_task_struct(iter.task);
3220         }
3221         rcu_read_unlock();
3222         return iter;
3223 }
3224
3225 #define TGID_OFFSET (FIRST_PROCESS_ENTRY + 2)
3226
3227 /* for the /proc/ directory itself, after non-process stuff has been done */
3228 int proc_pid_readdir(struct file *file, struct dir_context *ctx)
3229 {
3230         struct tgid_iter iter;
3231         struct pid_namespace *ns = file_inode(file)->i_sb->s_fs_info;
3232         loff_t pos = ctx->pos;
3233
3234         if (pos >= PID_MAX_LIMIT + TGID_OFFSET)
3235                 return 0;
3236
3237         if (pos == TGID_OFFSET - 2) {
3238                 struct inode *inode = d_inode(ns->proc_self);
3239                 if (!dir_emit(ctx, "self", 4, inode->i_ino, DT_LNK))
3240                         return 0;
3241                 ctx->pos = pos = pos + 1;
3242         }
3243         if (pos == TGID_OFFSET - 1) {
3244                 struct inode *inode = d_inode(ns->proc_thread_self);
3245                 if (!dir_emit(ctx, "thread-self", 11, inode->i_ino, DT_LNK))
3246                         return 0;
3247                 ctx->pos = pos = pos + 1;
3248         }
3249         iter.tgid = pos - TGID_OFFSET;
3250         iter.task = NULL;
3251         for (iter = next_tgid(ns, iter);
3252              iter.task;
3253              iter.tgid += 1, iter = next_tgid(ns, iter)) {
3254                 char name[10 + 1];
3255                 int len;
3256
3257                 cond_resched();
3258                 if (!has_pid_permissions(ns, iter.task, HIDEPID_INVISIBLE))
3259                         continue;
3260
3261                 len = snprintf(name, sizeof(name), "%u", iter.tgid);
3262                 ctx->pos = iter.tgid + TGID_OFFSET;
3263                 if (!proc_fill_cache(file, ctx, name, len,
3264                                      proc_pid_instantiate, iter.task, NULL)) {
3265                         put_task_struct(iter.task);
3266                         return 0;
3267                 }
3268         }
3269         ctx->pos = PID_MAX_LIMIT + TGID_OFFSET;
3270         return 0;
3271 }
3272
3273 /*
3274  * proc_tid_comm_permission is a special permission function exclusively
3275  * used for the node /proc/<pid>/task/<tid>/comm.
3276  * It bypasses generic permission checks in the case where a task of the same
3277  * task group attempts to access the node.
3278  * The rationale behind this is that glibc and bionic access this node for
3279  * cross thread naming (pthread_set/getname_np(!self)). However, if
3280  * PR_SET_DUMPABLE gets set to 0 this node among others becomes uid=0 gid=0,
3281  * which locks out the cross thread naming implementation.
3282  * This function makes sure that the node is always accessible for members of
3283  * same thread group.
3284  */
3285 static int proc_tid_comm_permission(struct inode *inode, int mask)
3286 {
3287         bool is_same_tgroup;
3288         struct task_struct *task;
3289
3290         task = get_proc_task(inode);
3291         if (!task)
3292                 return -ESRCH;
3293         is_same_tgroup = same_thread_group(current, task);
3294         put_task_struct(task);
3295
3296         if (likely(is_same_tgroup && !(mask & MAY_EXEC))) {
3297                 /* This file (/proc/<pid>/task/<tid>/comm) can always be
3298                  * read or written by the members of the corresponding
3299                  * thread group.
3300                  */
3301                 return 0;
3302         }
3303
3304         return generic_permission(inode, mask);
3305 }
3306
3307 static const struct inode_operations proc_tid_comm_inode_operations = {
3308                 .permission = proc_tid_comm_permission,
3309 };
3310
3311 /*
3312  * Tasks
3313  */
3314 static const struct pid_entry tid_base_stuff[] = {
3315         DIR("fd",        S_IRUSR|S_IXUSR, proc_fd_inode_operations, proc_fd_operations),
3316         DIR("fdinfo",    S_IRUSR|S_IXUSR, proc_fdinfo_inode_operations, proc_fdinfo_operations),
3317         DIR("ns",        S_IRUSR|S_IXUGO, proc_ns_dir_inode_operations, proc_ns_dir_operations),
3318 #ifdef CONFIG_NET
3319         DIR("net",        S_IRUGO|S_IXUGO, proc_net_inode_operations, proc_net_operations),
3320 #endif
3321         REG("environ",   S_IRUSR, proc_environ_operations),
3322         REG("auxv",      S_IRUSR, proc_auxv_operations),
3323         ONE("status",    S_IRUGO, proc_pid_status),
3324         ONE("personality", S_IRUSR, proc_pid_personality),
3325         ONE("limits",    S_IRUGO, proc_pid_limits),
3326 #ifdef CONFIG_SCHED_DEBUG
3327         REG("sched",     S_IRUGO|S_IWUSR, proc_pid_sched_operations),
3328 #endif
3329         NOD("comm",      S_IFREG|S_IRUGO|S_IWUSR,
3330                          &proc_tid_comm_inode_operations,
3331                          &proc_pid_set_comm_operations, {}),
3332 #ifdef CONFIG_HAVE_ARCH_TRACEHOOK
3333         ONE("syscall",   S_IRUSR, proc_pid_syscall),
3334 #endif
3335         REG("cmdline",   S_IRUGO, proc_pid_cmdline_ops),
3336         ONE("stat",      S_IRUGO, proc_tid_stat),
3337         ONE("statm",     S_IRUGO, proc_pid_statm),
3338         REG("maps",      S_IRUGO, proc_tid_maps_operations),
3339 #ifdef CONFIG_PROC_CHILDREN
3340         REG("children",  S_IRUGO, proc_tid_children_operations),
3341 #endif
3342 #ifdef CONFIG_NUMA
3343         REG("numa_maps", S_IRUGO, proc_tid_numa_maps_operations),
3344 #endif
3345         REG("mem",       S_IRUSR|S_IWUSR, proc_mem_operations),
3346         LNK("cwd",       proc_cwd_link),
3347         LNK("root",      proc_root_link),
3348         LNK("exe",       proc_exe_link),
3349         REG("mounts",    S_IRUGO, proc_mounts_operations),
3350         REG("mountinfo",  S_IRUGO, proc_mountinfo_operations),
3351 #ifdef CONFIG_PROC_PAGE_MONITOR
3352         REG("clear_refs", S_IWUSR, proc_clear_refs_operations),
3353         REG("smaps",     S_IRUGO, proc_tid_smaps_operations),
3354         REG("smaps_rollup", S_IRUGO, proc_pid_smaps_rollup_operations),
3355         REG("pagemap",    S_IRUSR, proc_pagemap_operations),
3356 #endif
3357 #ifdef CONFIG_SECURITY
3358         DIR("attr",      S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
3359 #endif
3360 #ifdef CONFIG_KALLSYMS
3361         ONE("wchan",     S_IRUGO, proc_pid_wchan),
3362 #endif
3363 #ifdef CONFIG_STACKTRACE
3364         ONE("stack",      S_IRUSR, proc_pid_stack),
3365 #endif
3366 #ifdef CONFIG_SCHED_INFO
3367         ONE("schedstat", S_IRUGO, proc_pid_schedstat),
3368 #endif
3369 #ifdef CONFIG_LATENCYTOP
3370         REG("latency",  S_IRUGO, proc_lstats_operations),
3371 #endif
3372 #ifdef CONFIG_PROC_PID_CPUSET
3373         ONE("cpuset",    S_IRUGO, proc_cpuset_show),
3374 #endif
3375 #ifdef CONFIG_CGROUPS
3376         ONE("cgroup",  S_IRUGO, proc_cgroup_show),
3377 #endif
3378         ONE("oom_score", S_IRUGO, proc_oom_score),
3379         REG("oom_adj",   S_IRUGO|S_IWUSR, proc_oom_adj_operations),
3380         REG("oom_score_adj", S_IRUGO|S_IWUSR, proc_oom_score_adj_operations),
3381 #ifdef CONFIG_AUDITSYSCALL
3382         REG("loginuid",  S_IWUSR|S_IRUGO, proc_loginuid_operations),
3383         REG("sessionid",  S_IRUGO, proc_sessionid_operations),
3384 #endif
3385 #ifdef CONFIG_FAULT_INJECTION
3386         REG("make-it-fail", S_IRUGO|S_IWUSR, proc_fault_inject_operations),
3387         REG("fail-nth", 0644, proc_fail_nth_operations),
3388 #endif
3389 #ifdef CONFIG_TASK_IO_ACCOUNTING
3390         ONE("io",       S_IRUSR, proc_tid_io_accounting),
3391 #endif
3392 #ifdef CONFIG_HARDWALL
3393         ONE("hardwall",   S_IRUGO, proc_pid_hardwall),
3394 #endif
3395 #ifdef CONFIG_USER_NS
3396         REG("uid_map",    S_IRUGO|S_IWUSR, proc_uid_map_operations),
3397         REG("gid_map",    S_IRUGO|S_IWUSR, proc_gid_map_operations),
3398         REG("projid_map", S_IRUGO|S_IWUSR, proc_projid_map_operations),
3399         REG("setgroups",  S_IRUGO|S_IWUSR, proc_setgroups_operations),
3400 #endif
3401 #ifdef CONFIG_LIVEPATCH
3402         ONE("patch_state",  S_IRUSR, proc_pid_patch_state),
3403 #endif
3404 };
3405
3406 static int proc_tid_base_readdir(struct file *file, struct dir_context *ctx)
3407 {
3408         return proc_pident_readdir(file, ctx,
3409                                    tid_base_stuff, ARRAY_SIZE(tid_base_stuff));
3410 }
3411
3412 static struct dentry *proc_tid_base_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
3413 {
3414         return proc_pident_lookup(dir, dentry,
3415                                   tid_base_stuff, ARRAY_SIZE(tid_base_stuff));
3416 }
3417
3418 static const struct file_operations proc_tid_base_operations = {
3419         .read           = generic_read_dir,
3420         .iterate_shared = proc_tid_base_readdir,
3421         .llseek         = generic_file_llseek,
3422 };
3423
3424 static const struct inode_operations proc_tid_base_inode_operations = {
3425         .lookup         = proc_tid_base_lookup,
3426         .getattr        = pid_getattr,
3427         .setattr        = proc_setattr,
3428 };
3429
3430 static int proc_task_instantiate(struct inode *dir,
3431         struct dentry *dentry, struct task_struct *task, const void *ptr)
3432 {
3433         struct inode *inode;
3434         inode = proc_pid_make_inode(dir->i_sb, task, S_IFDIR | S_IRUGO | S_IXUGO);
3435
3436         if (!inode)
3437                 goto out;
3438         inode->i_op = &proc_tid_base_inode_operations;
3439         inode->i_fop = &proc_tid_base_operations;
3440         inode->i_flags|=S_IMMUTABLE;
3441
3442         set_nlink(inode, nlink_tid);
3443
3444         d_set_d_op(dentry, &pid_dentry_operations);
3445
3446         d_add(dentry, inode);
3447         /* Close the race of the process dying before we return the dentry */
3448         if (pid_revalidate(dentry, 0))
3449                 return 0;
3450 out:
3451         return -ENOENT;
3452 }
3453
3454 static struct dentry *proc_task_lookup(struct inode *dir, struct dentry * dentry, unsigned int flags)
3455 {
3456         int result = -ENOENT;
3457         struct task_struct *task;
3458         struct task_struct *leader = get_proc_task(dir);
3459         unsigned tid;
3460         struct pid_namespace *ns;
3461
3462         if (!leader)
3463                 goto out_no_task;
3464
3465         tid = name_to_int(&dentry->d_name);
3466         if (tid == ~0U)
3467                 goto out;
3468
3469         ns = dentry->d_sb->s_fs_info;
3470         rcu_read_lock();
3471         task = find_task_by_pid_ns(tid, ns);
3472         if (task)
3473                 get_task_struct(task);
3474         rcu_read_unlock();
3475         if (!task)
3476                 goto out;
3477         if (!same_thread_group(leader, task))
3478                 goto out_drop_task;
3479
3480         result = proc_task_instantiate(dir, dentry, task, NULL);
3481 out_drop_task:
3482         put_task_struct(task);
3483 out:
3484         put_task_struct(leader);
3485 out_no_task:
3486         return ERR_PTR(result);
3487 }
3488
3489 /*
3490  * Find the first tid of a thread group to return to user space.
3491  *
3492  * Usually this is just the thread group leader, but if the users
3493  * buffer was too small or there was a seek into the middle of the
3494  * directory we have more work todo.
3495  *
3496  * In the case of a short read we start with find_task_by_pid.
3497  *
3498  * In the case of a seek we start with the leader and walk nr
3499  * threads past it.
3500  */
3501 static struct task_struct *first_tid(struct pid *pid, int tid, loff_t f_pos,
3502                                         struct pid_namespace *ns)
3503 {
3504         struct task_struct *pos, *task;
3505         unsigned long nr = f_pos;
3506
3507         if (nr != f_pos)        /* 32bit overflow? */
3508                 return NULL;
3509
3510         rcu_read_lock();
3511         task = pid_task(pid, PIDTYPE_PID);
3512         if (!task)
3513                 goto fail;
3514
3515         /* Attempt to start with the tid of a thread */
3516         if (tid && nr) {
3517                 pos = find_task_by_pid_ns(tid, ns);
3518                 if (pos && same_thread_group(pos, task))
3519                         goto found;
3520         }
3521
3522         /* If nr exceeds the number of threads there is nothing todo */
3523         if (nr >= get_nr_threads(task))
3524                 goto fail;
3525
3526         /* If we haven't found our starting place yet start
3527          * with the leader and walk nr threads forward.
3528          */
3529         pos = task = task->group_leader;
3530         do {
3531                 if (!nr--)
3532                         goto found;
3533         } while_each_thread(task, pos);
3534 fail:
3535         pos = NULL;
3536         goto out;
3537 found:
3538         get_task_struct(pos);
3539 out:
3540         rcu_read_unlock();
3541         return pos;
3542 }
3543
3544 /*
3545  * Find the next thread in the thread list.
3546  * Return NULL if there is an error or no next thread.
3547  *
3548  * The reference to the input task_struct is released.
3549  */
3550 static struct task_struct *next_tid(struct task_struct *start)
3551 {
3552         struct task_struct *pos = NULL;
3553         rcu_read_lock();
3554         if (pid_alive(start)) {
3555                 pos = next_thread(start);
3556                 if (thread_group_leader(pos))
3557                         pos = NULL;
3558                 else
3559                         get_task_struct(pos);
3560         }
3561         rcu_read_unlock();
3562         put_task_struct(start);
3563         return pos;
3564 }
3565
3566 /* for the /proc/TGID/task/ directories */
3567 static int proc_task_readdir(struct file *file, struct dir_context *ctx)
3568 {
3569         struct inode *inode = file_inode(file);
3570         struct task_struct *task;
3571         struct pid_namespace *ns;
3572         int tid;
3573
3574         if (proc_inode_is_dead(inode))
3575                 return -ENOENT;
3576
3577         if (!dir_emit_dots(file, ctx))
3578                 return 0;
3579
3580         /* f_version caches the tgid value that the last readdir call couldn't
3581          * return. lseek aka telldir automagically resets f_version to 0.
3582          */
3583         ns = inode->i_sb->s_fs_info;
3584         tid = (int)file->f_version;
3585         file->f_version = 0;
3586         for (task = first_tid(proc_pid(inode), tid, ctx->pos - 2, ns);
3587              task;
3588              task = next_tid(task), ctx->pos++) {
3589                 char name[10 + 1];
3590                 int len;
3591                 tid = task_pid_nr_ns(task, ns);
3592                 len = snprintf(name, sizeof(name), "%u", tid);
3593                 if (!proc_fill_cache(file, ctx, name, len,
3594                                 proc_task_instantiate, task, NULL)) {
3595                         /* returning this tgid failed, save it as the first
3596                          * pid for the next readir call */
3597                         file->f_version = (u64)tid;
3598                         put_task_struct(task);
3599                         break;
3600                 }
3601         }
3602
3603         return 0;
3604 }
3605
3606 static int proc_task_getattr(const struct path *path, struct kstat *stat,
3607                              u32 request_mask, unsigned int query_flags)
3608 {
3609         struct inode *inode = d_inode(path->dentry);
3610         struct task_struct *p = get_proc_task(inode);
3611         generic_fillattr(inode, stat);
3612
3613         if (p) {
3614                 stat->nlink += get_nr_threads(p);
3615                 put_task_struct(p);
3616         }
3617
3618         return 0;
3619 }
3620
3621 static const struct inode_operations proc_task_inode_operations = {
3622         .lookup         = proc_task_lookup,
3623         .getattr        = proc_task_getattr,
3624         .setattr        = proc_setattr,
3625         .permission     = proc_pid_permission,
3626 };
3627
3628 static const struct file_operations proc_task_operations = {
3629         .read           = generic_read_dir,
3630         .iterate_shared = proc_task_readdir,
3631         .llseek         = generic_file_llseek,
3632 };
3633
3634 void __init set_proc_pid_nlink(void)
3635 {
3636         nlink_tid = pid_entry_nlink(tid_base_stuff, ARRAY_SIZE(tid_base_stuff));
3637         nlink_tgid = pid_entry_nlink(tgid_base_stuff, ARRAY_SIZE(tgid_base_stuff));
3638 }