OSDN Git Service

tracing: Cache comms only after an event occurred
[tomoyo/tomoyo-test1.git] / kernel / trace / trace.c
1 /*
2  * ring buffer based function tracer
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Originally taken from the RT patch by:
8  *    Arnaldo Carvalho de Melo <acme@redhat.com>
9  *
10  * Based on code from the latency_tracer, that is:
11  *  Copyright (C) 2004-2006 Ingo Molnar
12  *  Copyright (C) 2004 William Lee Irwin III
13  */
14 #include <linux/ring_buffer.h>
15 #include <generated/utsrelease.h>
16 #include <linux/stacktrace.h>
17 #include <linux/writeback.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/notifier.h>
21 #include <linux/irqflags.h>
22 #include <linux/debugfs.h>
23 #include <linux/pagemap.h>
24 #include <linux/hardirq.h>
25 #include <linux/linkage.h>
26 #include <linux/uaccess.h>
27 #include <linux/kprobes.h>
28 #include <linux/ftrace.h>
29 #include <linux/module.h>
30 #include <linux/percpu.h>
31 #include <linux/splice.h>
32 #include <linux/kdebug.h>
33 #include <linux/string.h>
34 #include <linux/rwsem.h>
35 #include <linux/slab.h>
36 #include <linux/ctype.h>
37 #include <linux/init.h>
38 #include <linux/poll.h>
39 #include <linux/nmi.h>
40 #include <linux/fs.h>
41
42 #include "trace.h"
43 #include "trace_output.h"
44
45 /*
46  * On boot up, the ring buffer is set to the minimum size, so that
47  * we do not waste memory on systems that are not using tracing.
48  */
49 int ring_buffer_expanded;
50
51 /*
52  * We need to change this state when a selftest is running.
53  * A selftest will lurk into the ring-buffer to count the
54  * entries inserted during the selftest although some concurrent
55  * insertions into the ring-buffer such as trace_printk could occurred
56  * at the same time, giving false positive or negative results.
57  */
58 static bool __read_mostly tracing_selftest_running;
59
60 /*
61  * If a tracer is running, we do not want to run SELFTEST.
62  */
63 bool __read_mostly tracing_selftest_disabled;
64
65 /* For tracers that don't implement custom flags */
66 static struct tracer_opt dummy_tracer_opt[] = {
67         { }
68 };
69
70 static struct tracer_flags dummy_tracer_flags = {
71         .val = 0,
72         .opts = dummy_tracer_opt
73 };
74
75 static int dummy_set_flag(u32 old_flags, u32 bit, int set)
76 {
77         return 0;
78 }
79
80 /*
81  * To prevent the comm cache from being overwritten when no
82  * tracing is active, only save the comm when a trace event
83  * occurred.
84  */
85 static DEFINE_PER_CPU(bool, trace_cmdline_save);
86
87 /*
88  * Kill all tracing for good (never come back).
89  * It is initialized to 1 but will turn to zero if the initialization
90  * of the tracer is successful. But that is the only place that sets
91  * this back to zero.
92  */
93 static int tracing_disabled = 1;
94
95 DEFINE_PER_CPU(int, ftrace_cpu_disabled);
96
97 cpumask_var_t __read_mostly     tracing_buffer_mask;
98
99 /*
100  * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
101  *
102  * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
103  * is set, then ftrace_dump is called. This will output the contents
104  * of the ftrace buffers to the console.  This is very useful for
105  * capturing traces that lead to crashes and outputing it to a
106  * serial console.
107  *
108  * It is default off, but you can enable it with either specifying
109  * "ftrace_dump_on_oops" in the kernel command line, or setting
110  * /proc/sys/kernel/ftrace_dump_on_oops
111  * Set 1 if you want to dump buffers of all CPUs
112  * Set 2 if you want to dump the buffer of the CPU that triggered oops
113  */
114
115 enum ftrace_dump_mode ftrace_dump_on_oops;
116
117 static int tracing_set_tracer(const char *buf);
118
119 #define MAX_TRACER_SIZE         100
120 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
121 static char *default_bootup_tracer;
122
123 static int __init set_cmdline_ftrace(char *str)
124 {
125         strncpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
126         default_bootup_tracer = bootup_tracer_buf;
127         /* We are using ftrace early, expand it */
128         ring_buffer_expanded = 1;
129         return 1;
130 }
131 __setup("ftrace=", set_cmdline_ftrace);
132
133 static int __init set_ftrace_dump_on_oops(char *str)
134 {
135         if (*str++ != '=' || !*str) {
136                 ftrace_dump_on_oops = DUMP_ALL;
137                 return 1;
138         }
139
140         if (!strcmp("orig_cpu", str)) {
141                 ftrace_dump_on_oops = DUMP_ORIG;
142                 return 1;
143         }
144
145         return 0;
146 }
147 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
148
149 unsigned long long ns2usecs(cycle_t nsec)
150 {
151         nsec += 500;
152         do_div(nsec, 1000);
153         return nsec;
154 }
155
156 /*
157  * The global_trace is the descriptor that holds the tracing
158  * buffers for the live tracing. For each CPU, it contains
159  * a link list of pages that will store trace entries. The
160  * page descriptor of the pages in the memory is used to hold
161  * the link list by linking the lru item in the page descriptor
162  * to each of the pages in the buffer per CPU.
163  *
164  * For each active CPU there is a data field that holds the
165  * pages for the buffer for that CPU. Each CPU has the same number
166  * of pages allocated for its buffer.
167  */
168 static struct trace_array       global_trace;
169
170 static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu);
171
172 int filter_current_check_discard(struct ring_buffer *buffer,
173                                  struct ftrace_event_call *call, void *rec,
174                                  struct ring_buffer_event *event)
175 {
176         return filter_check_discard(call, rec, buffer, event);
177 }
178 EXPORT_SYMBOL_GPL(filter_current_check_discard);
179
180 cycle_t ftrace_now(int cpu)
181 {
182         u64 ts;
183
184         /* Early boot up does not have a buffer yet */
185         if (!global_trace.buffer)
186                 return trace_clock_local();
187
188         ts = ring_buffer_time_stamp(global_trace.buffer, cpu);
189         ring_buffer_normalize_time_stamp(global_trace.buffer, cpu, &ts);
190
191         return ts;
192 }
193
194 /*
195  * The max_tr is used to snapshot the global_trace when a maximum
196  * latency is reached. Some tracers will use this to store a maximum
197  * trace while it continues examining live traces.
198  *
199  * The buffers for the max_tr are set up the same as the global_trace.
200  * When a snapshot is taken, the link list of the max_tr is swapped
201  * with the link list of the global_trace and the buffers are reset for
202  * the global_trace so the tracing can continue.
203  */
204 static struct trace_array       max_tr;
205
206 static DEFINE_PER_CPU(struct trace_array_cpu, max_tr_data);
207
208 /* tracer_enabled is used to toggle activation of a tracer */
209 static int                      tracer_enabled = 1;
210
211 /**
212  * tracing_is_enabled - return tracer_enabled status
213  *
214  * This function is used by other tracers to know the status
215  * of the tracer_enabled flag.  Tracers may use this function
216  * to know if it should enable their features when starting
217  * up. See irqsoff tracer for an example (start_irqsoff_tracer).
218  */
219 int tracing_is_enabled(void)
220 {
221         return tracer_enabled;
222 }
223
224 /*
225  * trace_buf_size is the size in bytes that is allocated
226  * for a buffer. Note, the number of bytes is always rounded
227  * to page size.
228  *
229  * This number is purposely set to a low number of 16384.
230  * If the dump on oops happens, it will be much appreciated
231  * to not have to wait for all that output. Anyway this can be
232  * boot time and run time configurable.
233  */
234 #define TRACE_BUF_SIZE_DEFAULT  1441792UL /* 16384 * 88 (sizeof(entry)) */
235
236 static unsigned long            trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
237
238 /* trace_types holds a link list of available tracers. */
239 static struct tracer            *trace_types __read_mostly;
240
241 /* current_trace points to the tracer that is currently active */
242 static struct tracer            *current_trace __read_mostly;
243
244 /*
245  * trace_types_lock is used to protect the trace_types list.
246  */
247 static DEFINE_MUTEX(trace_types_lock);
248
249 /*
250  * serialize the access of the ring buffer
251  *
252  * ring buffer serializes readers, but it is low level protection.
253  * The validity of the events (which returns by ring_buffer_peek() ..etc)
254  * are not protected by ring buffer.
255  *
256  * The content of events may become garbage if we allow other process consumes
257  * these events concurrently:
258  *   A) the page of the consumed events may become a normal page
259  *      (not reader page) in ring buffer, and this page will be rewrited
260  *      by events producer.
261  *   B) The page of the consumed events may become a page for splice_read,
262  *      and this page will be returned to system.
263  *
264  * These primitives allow multi process access to different cpu ring buffer
265  * concurrently.
266  *
267  * These primitives don't distinguish read-only and read-consume access.
268  * Multi read-only access are also serialized.
269  */
270
271 #ifdef CONFIG_SMP
272 static DECLARE_RWSEM(all_cpu_access_lock);
273 static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
274
275 static inline void trace_access_lock(int cpu)
276 {
277         if (cpu == TRACE_PIPE_ALL_CPU) {
278                 /* gain it for accessing the whole ring buffer. */
279                 down_write(&all_cpu_access_lock);
280         } else {
281                 /* gain it for accessing a cpu ring buffer. */
282
283                 /* Firstly block other trace_access_lock(TRACE_PIPE_ALL_CPU). */
284                 down_read(&all_cpu_access_lock);
285
286                 /* Secondly block other access to this @cpu ring buffer. */
287                 mutex_lock(&per_cpu(cpu_access_lock, cpu));
288         }
289 }
290
291 static inline void trace_access_unlock(int cpu)
292 {
293         if (cpu == TRACE_PIPE_ALL_CPU) {
294                 up_write(&all_cpu_access_lock);
295         } else {
296                 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
297                 up_read(&all_cpu_access_lock);
298         }
299 }
300
301 static inline void trace_access_lock_init(void)
302 {
303         int cpu;
304
305         for_each_possible_cpu(cpu)
306                 mutex_init(&per_cpu(cpu_access_lock, cpu));
307 }
308
309 #else
310
311 static DEFINE_MUTEX(access_lock);
312
313 static inline void trace_access_lock(int cpu)
314 {
315         (void)cpu;
316         mutex_lock(&access_lock);
317 }
318
319 static inline void trace_access_unlock(int cpu)
320 {
321         (void)cpu;
322         mutex_unlock(&access_lock);
323 }
324
325 static inline void trace_access_lock_init(void)
326 {
327 }
328
329 #endif
330
331 /* trace_wait is a waitqueue for tasks blocked on trace_poll */
332 static DECLARE_WAIT_QUEUE_HEAD(trace_wait);
333
334 /* trace_flags holds trace_options default values */
335 unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
336         TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
337         TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |
338         TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS;
339
340 static int trace_stop_count;
341 static DEFINE_RAW_SPINLOCK(tracing_start_lock);
342
343 static void wakeup_work_handler(struct work_struct *work)
344 {
345         wake_up(&trace_wait);
346 }
347
348 static DECLARE_DELAYED_WORK(wakeup_work, wakeup_work_handler);
349
350 /**
351  * tracing_on - enable tracing buffers
352  *
353  * This function enables tracing buffers that may have been
354  * disabled with tracing_off.
355  */
356 void tracing_on(void)
357 {
358         if (global_trace.buffer)
359                 ring_buffer_record_on(global_trace.buffer);
360         /*
361          * This flag is only looked at when buffers haven't been
362          * allocated yet. We don't really care about the race
363          * between setting this flag and actually turning
364          * on the buffer.
365          */
366         global_trace.buffer_disabled = 0;
367 }
368 EXPORT_SYMBOL_GPL(tracing_on);
369
370 /**
371  * tracing_off - turn off tracing buffers
372  *
373  * This function stops the tracing buffers from recording data.
374  * It does not disable any overhead the tracers themselves may
375  * be causing. This function simply causes all recording to
376  * the ring buffers to fail.
377  */
378 void tracing_off(void)
379 {
380         if (global_trace.buffer)
381                 ring_buffer_record_off(global_trace.buffer);
382         /*
383          * This flag is only looked at when buffers haven't been
384          * allocated yet. We don't really care about the race
385          * between setting this flag and actually turning
386          * on the buffer.
387          */
388         global_trace.buffer_disabled = 1;
389 }
390 EXPORT_SYMBOL_GPL(tracing_off);
391
392 /**
393  * tracing_is_on - show state of ring buffers enabled
394  */
395 int tracing_is_on(void)
396 {
397         if (global_trace.buffer)
398                 return ring_buffer_record_is_on(global_trace.buffer);
399         return !global_trace.buffer_disabled;
400 }
401 EXPORT_SYMBOL_GPL(tracing_is_on);
402
403 /**
404  * trace_wake_up - wake up tasks waiting for trace input
405  *
406  * Schedules a delayed work to wake up any task that is blocked on the
407  * trace_wait queue. These is used with trace_poll for tasks polling the
408  * trace.
409  */
410 void trace_wake_up(void)
411 {
412         const unsigned long delay = msecs_to_jiffies(2);
413
414         if (trace_flags & TRACE_ITER_BLOCK)
415                 return;
416         schedule_delayed_work(&wakeup_work, delay);
417 }
418
419 static int __init set_buf_size(char *str)
420 {
421         unsigned long buf_size;
422
423         if (!str)
424                 return 0;
425         buf_size = memparse(str, &str);
426         /* nr_entries can not be zero */
427         if (buf_size == 0)
428                 return 0;
429         trace_buf_size = buf_size;
430         return 1;
431 }
432 __setup("trace_buf_size=", set_buf_size);
433
434 static int __init set_tracing_thresh(char *str)
435 {
436         unsigned long threshold;
437         int ret;
438
439         if (!str)
440                 return 0;
441         ret = kstrtoul(str, 0, &threshold);
442         if (ret < 0)
443                 return 0;
444         tracing_thresh = threshold * 1000;
445         return 1;
446 }
447 __setup("tracing_thresh=", set_tracing_thresh);
448
449 unsigned long nsecs_to_usecs(unsigned long nsecs)
450 {
451         return nsecs / 1000;
452 }
453
454 /* These must match the bit postions in trace_iterator_flags */
455 static const char *trace_options[] = {
456         "print-parent",
457         "sym-offset",
458         "sym-addr",
459         "verbose",
460         "raw",
461         "hex",
462         "bin",
463         "block",
464         "stacktrace",
465         "trace_printk",
466         "ftrace_preempt",
467         "branch",
468         "annotate",
469         "userstacktrace",
470         "sym-userobj",
471         "printk-msg-only",
472         "context-info",
473         "latency-format",
474         "sleep-time",
475         "graph-time",
476         "record-cmd",
477         "overwrite",
478         "disable_on_free",
479         "irq-info",
480         "markers",
481         NULL
482 };
483
484 static struct {
485         u64 (*func)(void);
486         const char *name;
487 } trace_clocks[] = {
488         { trace_clock_local,    "local" },
489         { trace_clock_global,   "global" },
490         { trace_clock_counter,  "counter" },
491 };
492
493 int trace_clock_id;
494
495 /*
496  * trace_parser_get_init - gets the buffer for trace parser
497  */
498 int trace_parser_get_init(struct trace_parser *parser, int size)
499 {
500         memset(parser, 0, sizeof(*parser));
501
502         parser->buffer = kmalloc(size, GFP_KERNEL);
503         if (!parser->buffer)
504                 return 1;
505
506         parser->size = size;
507         return 0;
508 }
509
510 /*
511  * trace_parser_put - frees the buffer for trace parser
512  */
513 void trace_parser_put(struct trace_parser *parser)
514 {
515         kfree(parser->buffer);
516 }
517
518 /*
519  * trace_get_user - reads the user input string separated by  space
520  * (matched by isspace(ch))
521  *
522  * For each string found the 'struct trace_parser' is updated,
523  * and the function returns.
524  *
525  * Returns number of bytes read.
526  *
527  * See kernel/trace/trace.h for 'struct trace_parser' details.
528  */
529 int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
530         size_t cnt, loff_t *ppos)
531 {
532         char ch;
533         size_t read = 0;
534         ssize_t ret;
535
536         if (!*ppos)
537                 trace_parser_clear(parser);
538
539         ret = get_user(ch, ubuf++);
540         if (ret)
541                 goto out;
542
543         read++;
544         cnt--;
545
546         /*
547          * The parser is not finished with the last write,
548          * continue reading the user input without skipping spaces.
549          */
550         if (!parser->cont) {
551                 /* skip white space */
552                 while (cnt && isspace(ch)) {
553                         ret = get_user(ch, ubuf++);
554                         if (ret)
555                                 goto out;
556                         read++;
557                         cnt--;
558                 }
559
560                 /* only spaces were written */
561                 if (isspace(ch)) {
562                         *ppos += read;
563                         ret = read;
564                         goto out;
565                 }
566
567                 parser->idx = 0;
568         }
569
570         /* read the non-space input */
571         while (cnt && !isspace(ch)) {
572                 if (parser->idx < parser->size - 1)
573                         parser->buffer[parser->idx++] = ch;
574                 else {
575                         ret = -EINVAL;
576                         goto out;
577                 }
578                 ret = get_user(ch, ubuf++);
579                 if (ret)
580                         goto out;
581                 read++;
582                 cnt--;
583         }
584
585         /* We either got finished input or we have to wait for another call. */
586         if (isspace(ch)) {
587                 parser->buffer[parser->idx] = 0;
588                 parser->cont = false;
589         } else {
590                 parser->cont = true;
591                 parser->buffer[parser->idx++] = ch;
592         }
593
594         *ppos += read;
595         ret = read;
596
597 out:
598         return ret;
599 }
600
601 ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt)
602 {
603         int len;
604         int ret;
605
606         if (!cnt)
607                 return 0;
608
609         if (s->len <= s->readpos)
610                 return -EBUSY;
611
612         len = s->len - s->readpos;
613         if (cnt > len)
614                 cnt = len;
615         ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt);
616         if (ret == cnt)
617                 return -EFAULT;
618
619         cnt -= ret;
620
621         s->readpos += cnt;
622         return cnt;
623 }
624
625 static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
626 {
627         int len;
628
629         if (s->len <= s->readpos)
630                 return -EBUSY;
631
632         len = s->len - s->readpos;
633         if (cnt > len)
634                 cnt = len;
635         memcpy(buf, s->buffer + s->readpos, cnt);
636
637         s->readpos += cnt;
638         return cnt;
639 }
640
641 /*
642  * ftrace_max_lock is used to protect the swapping of buffers
643  * when taking a max snapshot. The buffers themselves are
644  * protected by per_cpu spinlocks. But the action of the swap
645  * needs its own lock.
646  *
647  * This is defined as a arch_spinlock_t in order to help
648  * with performance when lockdep debugging is enabled.
649  *
650  * It is also used in other places outside the update_max_tr
651  * so it needs to be defined outside of the
652  * CONFIG_TRACER_MAX_TRACE.
653  */
654 static arch_spinlock_t ftrace_max_lock =
655         (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
656
657 unsigned long __read_mostly     tracing_thresh;
658
659 #ifdef CONFIG_TRACER_MAX_TRACE
660 unsigned long __read_mostly     tracing_max_latency;
661
662 /*
663  * Copy the new maximum trace into the separate maximum-trace
664  * structure. (this way the maximum trace is permanently saved,
665  * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
666  */
667 static void
668 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
669 {
670         struct trace_array_cpu *data = tr->data[cpu];
671         struct trace_array_cpu *max_data;
672
673         max_tr.cpu = cpu;
674         max_tr.time_start = data->preempt_timestamp;
675
676         max_data = max_tr.data[cpu];
677         max_data->saved_latency = tracing_max_latency;
678         max_data->critical_start = data->critical_start;
679         max_data->critical_end = data->critical_end;
680
681         memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
682         max_data->pid = tsk->pid;
683         max_data->uid = task_uid(tsk);
684         max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
685         max_data->policy = tsk->policy;
686         max_data->rt_priority = tsk->rt_priority;
687
688         /* record this tasks comm */
689         tracing_record_cmdline(tsk);
690 }
691
692 /**
693  * update_max_tr - snapshot all trace buffers from global_trace to max_tr
694  * @tr: tracer
695  * @tsk: the task with the latency
696  * @cpu: The cpu that initiated the trace.
697  *
698  * Flip the buffers between the @tr and the max_tr and record information
699  * about which task was the cause of this latency.
700  */
701 void
702 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
703 {
704         struct ring_buffer *buf = tr->buffer;
705
706         if (trace_stop_count)
707                 return;
708
709         WARN_ON_ONCE(!irqs_disabled());
710         if (!current_trace->use_max_tr) {
711                 WARN_ON_ONCE(1);
712                 return;
713         }
714         arch_spin_lock(&ftrace_max_lock);
715
716         tr->buffer = max_tr.buffer;
717         max_tr.buffer = buf;
718
719         __update_max_tr(tr, tsk, cpu);
720         arch_spin_unlock(&ftrace_max_lock);
721 }
722
723 /**
724  * update_max_tr_single - only copy one trace over, and reset the rest
725  * @tr - tracer
726  * @tsk - task with the latency
727  * @cpu - the cpu of the buffer to copy.
728  *
729  * Flip the trace of a single CPU buffer between the @tr and the max_tr.
730  */
731 void
732 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
733 {
734         int ret;
735
736         if (trace_stop_count)
737                 return;
738
739         WARN_ON_ONCE(!irqs_disabled());
740         if (!current_trace->use_max_tr) {
741                 WARN_ON_ONCE(1);
742                 return;
743         }
744
745         arch_spin_lock(&ftrace_max_lock);
746
747         ret = ring_buffer_swap_cpu(max_tr.buffer, tr->buffer, cpu);
748
749         if (ret == -EBUSY) {
750                 /*
751                  * We failed to swap the buffer due to a commit taking
752                  * place on this CPU. We fail to record, but we reset
753                  * the max trace buffer (no one writes directly to it)
754                  * and flag that it failed.
755                  */
756                 trace_array_printk(&max_tr, _THIS_IP_,
757                         "Failed to swap buffers due to commit in progress\n");
758         }
759
760         WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
761
762         __update_max_tr(tr, tsk, cpu);
763         arch_spin_unlock(&ftrace_max_lock);
764 }
765 #endif /* CONFIG_TRACER_MAX_TRACE */
766
767 /**
768  * register_tracer - register a tracer with the ftrace system.
769  * @type - the plugin for the tracer
770  *
771  * Register a new plugin tracer.
772  */
773 int register_tracer(struct tracer *type)
774 {
775         struct tracer *t;
776         int ret = 0;
777
778         if (!type->name) {
779                 pr_info("Tracer must have a name\n");
780                 return -1;
781         }
782
783         if (strlen(type->name) >= MAX_TRACER_SIZE) {
784                 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
785                 return -1;
786         }
787
788         mutex_lock(&trace_types_lock);
789
790         tracing_selftest_running = true;
791
792         for (t = trace_types; t; t = t->next) {
793                 if (strcmp(type->name, t->name) == 0) {
794                         /* already found */
795                         pr_info("Tracer %s already registered\n",
796                                 type->name);
797                         ret = -1;
798                         goto out;
799                 }
800         }
801
802         if (!type->set_flag)
803                 type->set_flag = &dummy_set_flag;
804         if (!type->flags)
805                 type->flags = &dummy_tracer_flags;
806         else
807                 if (!type->flags->opts)
808                         type->flags->opts = dummy_tracer_opt;
809         if (!type->wait_pipe)
810                 type->wait_pipe = default_wait_pipe;
811
812
813 #ifdef CONFIG_FTRACE_STARTUP_TEST
814         if (type->selftest && !tracing_selftest_disabled) {
815                 struct tracer *saved_tracer = current_trace;
816                 struct trace_array *tr = &global_trace;
817
818                 /*
819                  * Run a selftest on this tracer.
820                  * Here we reset the trace buffer, and set the current
821                  * tracer to be this tracer. The tracer can then run some
822                  * internal tracing to verify that everything is in order.
823                  * If we fail, we do not register this tracer.
824                  */
825                 tracing_reset_online_cpus(tr);
826
827                 current_trace = type;
828
829                 /* If we expanded the buffers, make sure the max is expanded too */
830                 if (ring_buffer_expanded && type->use_max_tr)
831                         ring_buffer_resize(max_tr.buffer, trace_buf_size,
832                                                 RING_BUFFER_ALL_CPUS);
833
834                 /* the test is responsible for initializing and enabling */
835                 pr_info("Testing tracer %s: ", type->name);
836                 ret = type->selftest(type, tr);
837                 /* the test is responsible for resetting too */
838                 current_trace = saved_tracer;
839                 if (ret) {
840                         printk(KERN_CONT "FAILED!\n");
841                         /* Add the warning after printing 'FAILED' */
842                         WARN_ON(1);
843                         goto out;
844                 }
845                 /* Only reset on passing, to avoid touching corrupted buffers */
846                 tracing_reset_online_cpus(tr);
847
848                 /* Shrink the max buffer again */
849                 if (ring_buffer_expanded && type->use_max_tr)
850                         ring_buffer_resize(max_tr.buffer, 1,
851                                                 RING_BUFFER_ALL_CPUS);
852
853                 printk(KERN_CONT "PASSED\n");
854         }
855 #endif
856
857         type->next = trace_types;
858         trace_types = type;
859
860  out:
861         tracing_selftest_running = false;
862         mutex_unlock(&trace_types_lock);
863
864         if (ret || !default_bootup_tracer)
865                 goto out_unlock;
866
867         if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
868                 goto out_unlock;
869
870         printk(KERN_INFO "Starting tracer '%s'\n", type->name);
871         /* Do we want this tracer to start on bootup? */
872         tracing_set_tracer(type->name);
873         default_bootup_tracer = NULL;
874         /* disable other selftests, since this will break it. */
875         tracing_selftest_disabled = 1;
876 #ifdef CONFIG_FTRACE_STARTUP_TEST
877         printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
878                type->name);
879 #endif
880
881  out_unlock:
882         return ret;
883 }
884
885 void unregister_tracer(struct tracer *type)
886 {
887         struct tracer **t;
888
889         mutex_lock(&trace_types_lock);
890         for (t = &trace_types; *t; t = &(*t)->next) {
891                 if (*t == type)
892                         goto found;
893         }
894         pr_info("Tracer %s not registered\n", type->name);
895         goto out;
896
897  found:
898         *t = (*t)->next;
899
900         if (type == current_trace && tracer_enabled) {
901                 tracer_enabled = 0;
902                 tracing_stop();
903                 if (current_trace->stop)
904                         current_trace->stop(&global_trace);
905                 current_trace = &nop_trace;
906         }
907 out:
908         mutex_unlock(&trace_types_lock);
909 }
910
911 void tracing_reset(struct trace_array *tr, int cpu)
912 {
913         struct ring_buffer *buffer = tr->buffer;
914
915         ring_buffer_record_disable(buffer);
916
917         /* Make sure all commits have finished */
918         synchronize_sched();
919         ring_buffer_reset_cpu(buffer, cpu);
920
921         ring_buffer_record_enable(buffer);
922 }
923
924 void tracing_reset_online_cpus(struct trace_array *tr)
925 {
926         struct ring_buffer *buffer = tr->buffer;
927         int cpu;
928
929         ring_buffer_record_disable(buffer);
930
931         /* Make sure all commits have finished */
932         synchronize_sched();
933
934         tr->time_start = ftrace_now(tr->cpu);
935
936         for_each_online_cpu(cpu)
937                 ring_buffer_reset_cpu(buffer, cpu);
938
939         ring_buffer_record_enable(buffer);
940 }
941
942 void tracing_reset_current(int cpu)
943 {
944         tracing_reset(&global_trace, cpu);
945 }
946
947 void tracing_reset_current_online_cpus(void)
948 {
949         tracing_reset_online_cpus(&global_trace);
950 }
951
952 #define SAVED_CMDLINES 128
953 #define NO_CMDLINE_MAP UINT_MAX
954 static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
955 static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
956 static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
957 static int cmdline_idx;
958 static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
959
960 /* temporary disable recording */
961 static atomic_t trace_record_cmdline_disabled __read_mostly;
962
963 static void trace_init_cmdlines(void)
964 {
965         memset(&map_pid_to_cmdline, NO_CMDLINE_MAP, sizeof(map_pid_to_cmdline));
966         memset(&map_cmdline_to_pid, NO_CMDLINE_MAP, sizeof(map_cmdline_to_pid));
967         cmdline_idx = 0;
968 }
969
970 int is_tracing_stopped(void)
971 {
972         return trace_stop_count;
973 }
974
975 /**
976  * ftrace_off_permanent - disable all ftrace code permanently
977  *
978  * This should only be called when a serious anomally has
979  * been detected.  This will turn off the function tracing,
980  * ring buffers, and other tracing utilites. It takes no
981  * locks and can be called from any context.
982  */
983 void ftrace_off_permanent(void)
984 {
985         tracing_disabled = 1;
986         ftrace_stop();
987         tracing_off_permanent();
988 }
989
990 /**
991  * tracing_start - quick start of the tracer
992  *
993  * If tracing is enabled but was stopped by tracing_stop,
994  * this will start the tracer back up.
995  */
996 void tracing_start(void)
997 {
998         struct ring_buffer *buffer;
999         unsigned long flags;
1000
1001         if (tracing_disabled)
1002                 return;
1003
1004         raw_spin_lock_irqsave(&tracing_start_lock, flags);
1005         if (--trace_stop_count) {
1006                 if (trace_stop_count < 0) {
1007                         /* Someone screwed up their debugging */
1008                         WARN_ON_ONCE(1);
1009                         trace_stop_count = 0;
1010                 }
1011                 goto out;
1012         }
1013
1014         /* Prevent the buffers from switching */
1015         arch_spin_lock(&ftrace_max_lock);
1016
1017         buffer = global_trace.buffer;
1018         if (buffer)
1019                 ring_buffer_record_enable(buffer);
1020
1021         buffer = max_tr.buffer;
1022         if (buffer)
1023                 ring_buffer_record_enable(buffer);
1024
1025         arch_spin_unlock(&ftrace_max_lock);
1026
1027         ftrace_start();
1028  out:
1029         raw_spin_unlock_irqrestore(&tracing_start_lock, flags);
1030 }
1031
1032 /**
1033  * tracing_stop - quick stop of the tracer
1034  *
1035  * Light weight way to stop tracing. Use in conjunction with
1036  * tracing_start.
1037  */
1038 void tracing_stop(void)
1039 {
1040         struct ring_buffer *buffer;
1041         unsigned long flags;
1042
1043         ftrace_stop();
1044         raw_spin_lock_irqsave(&tracing_start_lock, flags);
1045         if (trace_stop_count++)
1046                 goto out;
1047
1048         /* Prevent the buffers from switching */
1049         arch_spin_lock(&ftrace_max_lock);
1050
1051         buffer = global_trace.buffer;
1052         if (buffer)
1053                 ring_buffer_record_disable(buffer);
1054
1055         buffer = max_tr.buffer;
1056         if (buffer)
1057                 ring_buffer_record_disable(buffer);
1058
1059         arch_spin_unlock(&ftrace_max_lock);
1060
1061  out:
1062         raw_spin_unlock_irqrestore(&tracing_start_lock, flags);
1063 }
1064
1065 void trace_stop_cmdline_recording(void);
1066
1067 static void trace_save_cmdline(struct task_struct *tsk)
1068 {
1069         unsigned pid, idx;
1070
1071         if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
1072                 return;
1073
1074         /*
1075          * It's not the end of the world if we don't get
1076          * the lock, but we also don't want to spin
1077          * nor do we want to disable interrupts,
1078          * so if we miss here, then better luck next time.
1079          */
1080         if (!arch_spin_trylock(&trace_cmdline_lock))
1081                 return;
1082
1083         idx = map_pid_to_cmdline[tsk->pid];
1084         if (idx == NO_CMDLINE_MAP) {
1085                 idx = (cmdline_idx + 1) % SAVED_CMDLINES;
1086
1087                 /*
1088                  * Check whether the cmdline buffer at idx has a pid
1089                  * mapped. We are going to overwrite that entry so we
1090                  * need to clear the map_pid_to_cmdline. Otherwise we
1091                  * would read the new comm for the old pid.
1092                  */
1093                 pid = map_cmdline_to_pid[idx];
1094                 if (pid != NO_CMDLINE_MAP)
1095                         map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
1096
1097                 map_cmdline_to_pid[idx] = tsk->pid;
1098                 map_pid_to_cmdline[tsk->pid] = idx;
1099
1100                 cmdline_idx = idx;
1101         }
1102
1103         memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
1104
1105         arch_spin_unlock(&trace_cmdline_lock);
1106 }
1107
1108 void trace_find_cmdline(int pid, char comm[])
1109 {
1110         unsigned map;
1111
1112         if (!pid) {
1113                 strcpy(comm, "<idle>");
1114                 return;
1115         }
1116
1117         if (WARN_ON_ONCE(pid < 0)) {
1118                 strcpy(comm, "<XXX>");
1119                 return;
1120         }
1121
1122         if (pid > PID_MAX_DEFAULT) {
1123                 strcpy(comm, "<...>");
1124                 return;
1125         }
1126
1127         preempt_disable();
1128         arch_spin_lock(&trace_cmdline_lock);
1129         map = map_pid_to_cmdline[pid];
1130         if (map != NO_CMDLINE_MAP)
1131                 strcpy(comm, saved_cmdlines[map]);
1132         else
1133                 strcpy(comm, "<...>");
1134
1135         arch_spin_unlock(&trace_cmdline_lock);
1136         preempt_enable();
1137 }
1138
1139 void tracing_record_cmdline(struct task_struct *tsk)
1140 {
1141         if (atomic_read(&trace_record_cmdline_disabled) || !tracer_enabled ||
1142             !tracing_is_on())
1143                 return;
1144
1145         if (!__this_cpu_read(trace_cmdline_save))
1146                 return;
1147
1148         __this_cpu_write(trace_cmdline_save, false);
1149
1150         trace_save_cmdline(tsk);
1151 }
1152
1153 void
1154 tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1155                              int pc)
1156 {
1157         struct task_struct *tsk = current;
1158
1159         entry->preempt_count            = pc & 0xff;
1160         entry->pid                      = (tsk) ? tsk->pid : 0;
1161         entry->padding                  = 0;
1162         entry->flags =
1163 #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
1164                 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
1165 #else
1166                 TRACE_FLAG_IRQS_NOSUPPORT |
1167 #endif
1168                 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
1169                 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
1170                 (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
1171 }
1172 EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
1173
1174 struct ring_buffer_event *
1175 trace_buffer_lock_reserve(struct ring_buffer *buffer,
1176                           int type,
1177                           unsigned long len,
1178                           unsigned long flags, int pc)
1179 {
1180         struct ring_buffer_event *event;
1181
1182         event = ring_buffer_lock_reserve(buffer, len);
1183         if (event != NULL) {
1184                 struct trace_entry *ent = ring_buffer_event_data(event);
1185
1186                 tracing_generic_entry_update(ent, flags, pc);
1187                 ent->type = type;
1188         }
1189
1190         return event;
1191 }
1192
1193 void
1194 __buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
1195 {
1196         __this_cpu_write(trace_cmdline_save, true);
1197         ring_buffer_unlock_commit(buffer, event);
1198 }
1199
1200 static inline void
1201 __trace_buffer_unlock_commit(struct ring_buffer *buffer,
1202                              struct ring_buffer_event *event,
1203                              unsigned long flags, int pc,
1204                              int wake)
1205 {
1206         __buffer_unlock_commit(buffer, event);
1207
1208         ftrace_trace_stack(buffer, flags, 6, pc);
1209         ftrace_trace_userstack(buffer, flags, pc);
1210
1211         if (wake)
1212                 trace_wake_up();
1213 }
1214
1215 void trace_buffer_unlock_commit(struct ring_buffer *buffer,
1216                                 struct ring_buffer_event *event,
1217                                 unsigned long flags, int pc)
1218 {
1219         __trace_buffer_unlock_commit(buffer, event, flags, pc, 1);
1220 }
1221
1222 struct ring_buffer_event *
1223 trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
1224                                   int type, unsigned long len,
1225                                   unsigned long flags, int pc)
1226 {
1227         *current_rb = global_trace.buffer;
1228         return trace_buffer_lock_reserve(*current_rb,
1229                                          type, len, flags, pc);
1230 }
1231 EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
1232
1233 void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
1234                                         struct ring_buffer_event *event,
1235                                         unsigned long flags, int pc)
1236 {
1237         __trace_buffer_unlock_commit(buffer, event, flags, pc, 1);
1238 }
1239 EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit);
1240
1241 void trace_nowake_buffer_unlock_commit(struct ring_buffer *buffer,
1242                                        struct ring_buffer_event *event,
1243                                        unsigned long flags, int pc)
1244 {
1245         __trace_buffer_unlock_commit(buffer, event, flags, pc, 0);
1246 }
1247 EXPORT_SYMBOL_GPL(trace_nowake_buffer_unlock_commit);
1248
1249 void trace_nowake_buffer_unlock_commit_regs(struct ring_buffer *buffer,
1250                                             struct ring_buffer_event *event,
1251                                             unsigned long flags, int pc,
1252                                             struct pt_regs *regs)
1253 {
1254         __buffer_unlock_commit(buffer, event);
1255
1256         ftrace_trace_stack_regs(buffer, flags, 0, pc, regs);
1257         ftrace_trace_userstack(buffer, flags, pc);
1258 }
1259 EXPORT_SYMBOL_GPL(trace_nowake_buffer_unlock_commit_regs);
1260
1261 void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
1262                                          struct ring_buffer_event *event)
1263 {
1264         ring_buffer_discard_commit(buffer, event);
1265 }
1266 EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit);
1267
1268 void
1269 trace_function(struct trace_array *tr,
1270                unsigned long ip, unsigned long parent_ip, unsigned long flags,
1271                int pc)
1272 {
1273         struct ftrace_event_call *call = &event_function;
1274         struct ring_buffer *buffer = tr->buffer;
1275         struct ring_buffer_event *event;
1276         struct ftrace_entry *entry;
1277
1278         /* If we are reading the ring buffer, don't trace */
1279         if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
1280                 return;
1281
1282         event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
1283                                           flags, pc);
1284         if (!event)
1285                 return;
1286         entry   = ring_buffer_event_data(event);
1287         entry->ip                       = ip;
1288         entry->parent_ip                = parent_ip;
1289
1290         if (!filter_check_discard(call, entry, buffer, event))
1291                 __buffer_unlock_commit(buffer, event);
1292 }
1293
1294 void
1295 ftrace(struct trace_array *tr, struct trace_array_cpu *data,
1296        unsigned long ip, unsigned long parent_ip, unsigned long flags,
1297        int pc)
1298 {
1299         if (likely(!atomic_read(&data->disabled)))
1300                 trace_function(tr, ip, parent_ip, flags, pc);
1301 }
1302
1303 #ifdef CONFIG_STACKTRACE
1304
1305 #define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
1306 struct ftrace_stack {
1307         unsigned long           calls[FTRACE_STACK_MAX_ENTRIES];
1308 };
1309
1310 static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
1311 static DEFINE_PER_CPU(int, ftrace_stack_reserve);
1312
1313 static void __ftrace_trace_stack(struct ring_buffer *buffer,
1314                                  unsigned long flags,
1315                                  int skip, int pc, struct pt_regs *regs)
1316 {
1317         struct ftrace_event_call *call = &event_kernel_stack;
1318         struct ring_buffer_event *event;
1319         struct stack_entry *entry;
1320         struct stack_trace trace;
1321         int use_stack;
1322         int size = FTRACE_STACK_ENTRIES;
1323
1324         trace.nr_entries        = 0;
1325         trace.skip              = skip;
1326
1327         /*
1328          * Since events can happen in NMIs there's no safe way to
1329          * use the per cpu ftrace_stacks. We reserve it and if an interrupt
1330          * or NMI comes in, it will just have to use the default
1331          * FTRACE_STACK_SIZE.
1332          */
1333         preempt_disable_notrace();
1334
1335         use_stack = ++__get_cpu_var(ftrace_stack_reserve);
1336         /*
1337          * We don't need any atomic variables, just a barrier.
1338          * If an interrupt comes in, we don't care, because it would
1339          * have exited and put the counter back to what we want.
1340          * We just need a barrier to keep gcc from moving things
1341          * around.
1342          */
1343         barrier();
1344         if (use_stack == 1) {
1345                 trace.entries           = &__get_cpu_var(ftrace_stack).calls[0];
1346                 trace.max_entries       = FTRACE_STACK_MAX_ENTRIES;
1347
1348                 if (regs)
1349                         save_stack_trace_regs(regs, &trace);
1350                 else
1351                         save_stack_trace(&trace);
1352
1353                 if (trace.nr_entries > size)
1354                         size = trace.nr_entries;
1355         } else
1356                 /* From now on, use_stack is a boolean */
1357                 use_stack = 0;
1358
1359         size *= sizeof(unsigned long);
1360
1361         event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
1362                                           sizeof(*entry) + size, flags, pc);
1363         if (!event)
1364                 goto out;
1365         entry = ring_buffer_event_data(event);
1366
1367         memset(&entry->caller, 0, size);
1368
1369         if (use_stack)
1370                 memcpy(&entry->caller, trace.entries,
1371                        trace.nr_entries * sizeof(unsigned long));
1372         else {
1373                 trace.max_entries       = FTRACE_STACK_ENTRIES;
1374                 trace.entries           = entry->caller;
1375                 if (regs)
1376                         save_stack_trace_regs(regs, &trace);
1377                 else
1378                         save_stack_trace(&trace);
1379         }
1380
1381         entry->size = trace.nr_entries;
1382
1383         if (!filter_check_discard(call, entry, buffer, event))
1384                 __buffer_unlock_commit(buffer, event);
1385
1386  out:
1387         /* Again, don't let gcc optimize things here */
1388         barrier();
1389         __get_cpu_var(ftrace_stack_reserve)--;
1390         preempt_enable_notrace();
1391
1392 }
1393
1394 void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags,
1395                              int skip, int pc, struct pt_regs *regs)
1396 {
1397         if (!(trace_flags & TRACE_ITER_STACKTRACE))
1398                 return;
1399
1400         __ftrace_trace_stack(buffer, flags, skip, pc, regs);
1401 }
1402
1403 void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
1404                         int skip, int pc)
1405 {
1406         if (!(trace_flags & TRACE_ITER_STACKTRACE))
1407                 return;
1408
1409         __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
1410 }
1411
1412 void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
1413                    int pc)
1414 {
1415         __ftrace_trace_stack(tr->buffer, flags, skip, pc, NULL);
1416 }
1417
1418 /**
1419  * trace_dump_stack - record a stack back trace in the trace buffer
1420  */
1421 void trace_dump_stack(void)
1422 {
1423         unsigned long flags;
1424
1425         if (tracing_disabled || tracing_selftest_running)
1426                 return;
1427
1428         local_save_flags(flags);
1429
1430         /* skipping 3 traces, seems to get us at the caller of this function */
1431         __ftrace_trace_stack(global_trace.buffer, flags, 3, preempt_count(), NULL);
1432 }
1433
1434 static DEFINE_PER_CPU(int, user_stack_count);
1435
1436 void
1437 ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
1438 {
1439         struct ftrace_event_call *call = &event_user_stack;
1440         struct ring_buffer_event *event;
1441         struct userstack_entry *entry;
1442         struct stack_trace trace;
1443
1444         if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
1445                 return;
1446
1447         /*
1448          * NMIs can not handle page faults, even with fix ups.
1449          * The save user stack can (and often does) fault.
1450          */
1451         if (unlikely(in_nmi()))
1452                 return;
1453
1454         /*
1455          * prevent recursion, since the user stack tracing may
1456          * trigger other kernel events.
1457          */
1458         preempt_disable();
1459         if (__this_cpu_read(user_stack_count))
1460                 goto out;
1461
1462         __this_cpu_inc(user_stack_count);
1463
1464         event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
1465                                           sizeof(*entry), flags, pc);
1466         if (!event)
1467                 goto out_drop_count;
1468         entry   = ring_buffer_event_data(event);
1469
1470         entry->tgid             = current->tgid;
1471         memset(&entry->caller, 0, sizeof(entry->caller));
1472
1473         trace.nr_entries        = 0;
1474         trace.max_entries       = FTRACE_STACK_ENTRIES;
1475         trace.skip              = 0;
1476         trace.entries           = entry->caller;
1477
1478         save_stack_trace_user(&trace);
1479         if (!filter_check_discard(call, entry, buffer, event))
1480                 __buffer_unlock_commit(buffer, event);
1481
1482  out_drop_count:
1483         __this_cpu_dec(user_stack_count);
1484  out:
1485         preempt_enable();
1486 }
1487
1488 #ifdef UNUSED
1489 static void __trace_userstack(struct trace_array *tr, unsigned long flags)
1490 {
1491         ftrace_trace_userstack(tr, flags, preempt_count());
1492 }
1493 #endif /* UNUSED */
1494
1495 #endif /* CONFIG_STACKTRACE */
1496
1497 /* created for use with alloc_percpu */
1498 struct trace_buffer_struct {
1499         char buffer[TRACE_BUF_SIZE];
1500 };
1501
1502 static struct trace_buffer_struct *trace_percpu_buffer;
1503 static struct trace_buffer_struct *trace_percpu_sirq_buffer;
1504 static struct trace_buffer_struct *trace_percpu_irq_buffer;
1505 static struct trace_buffer_struct *trace_percpu_nmi_buffer;
1506
1507 /*
1508  * The buffer used is dependent on the context. There is a per cpu
1509  * buffer for normal context, softirq contex, hard irq context and
1510  * for NMI context. Thise allows for lockless recording.
1511  *
1512  * Note, if the buffers failed to be allocated, then this returns NULL
1513  */
1514 static char *get_trace_buf(void)
1515 {
1516         struct trace_buffer_struct *percpu_buffer;
1517         struct trace_buffer_struct *buffer;
1518
1519         /*
1520          * If we have allocated per cpu buffers, then we do not
1521          * need to do any locking.
1522          */
1523         if (in_nmi())
1524                 percpu_buffer = trace_percpu_nmi_buffer;
1525         else if (in_irq())
1526                 percpu_buffer = trace_percpu_irq_buffer;
1527         else if (in_softirq())
1528                 percpu_buffer = trace_percpu_sirq_buffer;
1529         else
1530                 percpu_buffer = trace_percpu_buffer;
1531
1532         if (!percpu_buffer)
1533                 return NULL;
1534
1535         buffer = per_cpu_ptr(percpu_buffer, smp_processor_id());
1536
1537         return buffer->buffer;
1538 }
1539
1540 static int alloc_percpu_trace_buffer(void)
1541 {
1542         struct trace_buffer_struct *buffers;
1543         struct trace_buffer_struct *sirq_buffers;
1544         struct trace_buffer_struct *irq_buffers;
1545         struct trace_buffer_struct *nmi_buffers;
1546
1547         buffers = alloc_percpu(struct trace_buffer_struct);
1548         if (!buffers)
1549                 goto err_warn;
1550
1551         sirq_buffers = alloc_percpu(struct trace_buffer_struct);
1552         if (!sirq_buffers)
1553                 goto err_sirq;
1554
1555         irq_buffers = alloc_percpu(struct trace_buffer_struct);
1556         if (!irq_buffers)
1557                 goto err_irq;
1558
1559         nmi_buffers = alloc_percpu(struct trace_buffer_struct);
1560         if (!nmi_buffers)
1561                 goto err_nmi;
1562
1563         trace_percpu_buffer = buffers;
1564         trace_percpu_sirq_buffer = sirq_buffers;
1565         trace_percpu_irq_buffer = irq_buffers;
1566         trace_percpu_nmi_buffer = nmi_buffers;
1567
1568         return 0;
1569
1570  err_nmi:
1571         free_percpu(irq_buffers);
1572  err_irq:
1573         free_percpu(sirq_buffers);
1574  err_sirq:
1575         free_percpu(buffers);
1576  err_warn:
1577         WARN(1, "Could not allocate percpu trace_printk buffer");
1578         return -ENOMEM;
1579 }
1580
1581 static int buffers_allocated;
1582
1583 void trace_printk_init_buffers(void)
1584 {
1585         if (buffers_allocated)
1586                 return;
1587
1588         if (alloc_percpu_trace_buffer())
1589                 return;
1590
1591         pr_info("ftrace: Allocated trace_printk buffers\n");
1592
1593         /* Expand the buffers to set size */
1594         tracing_update_buffers();
1595
1596         buffers_allocated = 1;
1597
1598         /*
1599          * trace_printk_init_buffers() can be called by modules.
1600          * If that happens, then we need to start cmdline recording
1601          * directly here. If the global_trace.buffer is already
1602          * allocated here, then this was called by module code.
1603          */
1604         if (global_trace.buffer)
1605                 tracing_start_cmdline_record();
1606 }
1607
1608 void trace_printk_start_comm(void)
1609 {
1610         /* Start tracing comms if trace printk is set */
1611         if (!buffers_allocated)
1612                 return;
1613         tracing_start_cmdline_record();
1614 }
1615
1616 static void trace_printk_start_stop_comm(int enabled)
1617 {
1618         if (!buffers_allocated)
1619                 return;
1620
1621         if (enabled)
1622                 tracing_start_cmdline_record();
1623         else
1624                 tracing_stop_cmdline_record();
1625 }
1626
1627 /**
1628  * trace_vbprintk - write binary msg to tracing buffer
1629  *
1630  */
1631 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
1632 {
1633         struct ftrace_event_call *call = &event_bprint;
1634         struct ring_buffer_event *event;
1635         struct ring_buffer *buffer;
1636         struct trace_array *tr = &global_trace;
1637         struct bprint_entry *entry;
1638         unsigned long flags;
1639         char *tbuffer;
1640         int len = 0, size, pc;
1641
1642         if (unlikely(tracing_selftest_running || tracing_disabled))
1643                 return 0;
1644
1645         /* Don't pollute graph traces with trace_vprintk internals */
1646         pause_graph_tracing();
1647
1648         pc = preempt_count();
1649         preempt_disable_notrace();
1650
1651         tbuffer = get_trace_buf();
1652         if (!tbuffer) {
1653                 len = 0;
1654                 goto out;
1655         }
1656
1657         len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
1658
1659         if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
1660                 goto out;
1661
1662         local_save_flags(flags);
1663         size = sizeof(*entry) + sizeof(u32) * len;
1664         buffer = tr->buffer;
1665         event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
1666                                           flags, pc);
1667         if (!event)
1668                 goto out;
1669         entry = ring_buffer_event_data(event);
1670         entry->ip                       = ip;
1671         entry->fmt                      = fmt;
1672
1673         memcpy(entry->buf, tbuffer, sizeof(u32) * len);
1674         if (!filter_check_discard(call, entry, buffer, event)) {
1675                 __buffer_unlock_commit(buffer, event);
1676                 ftrace_trace_stack(buffer, flags, 6, pc);
1677         }
1678
1679 out:
1680         preempt_enable_notrace();
1681         unpause_graph_tracing();
1682
1683         return len;
1684 }
1685 EXPORT_SYMBOL_GPL(trace_vbprintk);
1686
1687 int trace_array_printk(struct trace_array *tr,
1688                        unsigned long ip, const char *fmt, ...)
1689 {
1690         int ret;
1691         va_list ap;
1692
1693         if (!(trace_flags & TRACE_ITER_PRINTK))
1694                 return 0;
1695
1696         va_start(ap, fmt);
1697         ret = trace_array_vprintk(tr, ip, fmt, ap);
1698         va_end(ap);
1699         return ret;
1700 }
1701
1702 int trace_array_vprintk(struct trace_array *tr,
1703                         unsigned long ip, const char *fmt, va_list args)
1704 {
1705         struct ftrace_event_call *call = &event_print;
1706         struct ring_buffer_event *event;
1707         struct ring_buffer *buffer;
1708         int len = 0, size, pc;
1709         struct print_entry *entry;
1710         unsigned long flags;
1711         char *tbuffer;
1712
1713         if (tracing_disabled || tracing_selftest_running)
1714                 return 0;
1715
1716         /* Don't pollute graph traces with trace_vprintk internals */
1717         pause_graph_tracing();
1718
1719         pc = preempt_count();
1720         preempt_disable_notrace();
1721
1722
1723         tbuffer = get_trace_buf();
1724         if (!tbuffer) {
1725                 len = 0;
1726                 goto out;
1727         }
1728
1729         len = vsnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
1730         if (len > TRACE_BUF_SIZE)
1731                 goto out;
1732
1733         local_save_flags(flags);
1734         size = sizeof(*entry) + len + 1;
1735         buffer = tr->buffer;
1736         event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
1737                                           flags, pc);
1738         if (!event)
1739                 goto out;
1740         entry = ring_buffer_event_data(event);
1741         entry->ip = ip;
1742
1743         memcpy(&entry->buf, tbuffer, len);
1744         entry->buf[len] = '\0';
1745         if (!filter_check_discard(call, entry, buffer, event)) {
1746                 __buffer_unlock_commit(buffer, event);
1747                 ftrace_trace_stack(buffer, flags, 6, pc);
1748         }
1749  out:
1750         preempt_enable_notrace();
1751         unpause_graph_tracing();
1752
1753         return len;
1754 }
1755
1756 int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
1757 {
1758         return trace_array_vprintk(&global_trace, ip, fmt, args);
1759 }
1760 EXPORT_SYMBOL_GPL(trace_vprintk);
1761
1762 static void trace_iterator_increment(struct trace_iterator *iter)
1763 {
1764         struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
1765
1766         iter->idx++;
1767         if (buf_iter)
1768                 ring_buffer_read(buf_iter, NULL);
1769 }
1770
1771 static struct trace_entry *
1772 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
1773                 unsigned long *lost_events)
1774 {
1775         struct ring_buffer_event *event;
1776         struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
1777
1778         if (buf_iter)
1779                 event = ring_buffer_iter_peek(buf_iter, ts);
1780         else
1781                 event = ring_buffer_peek(iter->tr->buffer, cpu, ts,
1782                                          lost_events);
1783
1784         if (event) {
1785                 iter->ent_size = ring_buffer_event_length(event);
1786                 return ring_buffer_event_data(event);
1787         }
1788         iter->ent_size = 0;
1789         return NULL;
1790 }
1791
1792 static struct trace_entry *
1793 __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
1794                   unsigned long *missing_events, u64 *ent_ts)
1795 {
1796         struct ring_buffer *buffer = iter->tr->buffer;
1797         struct trace_entry *ent, *next = NULL;
1798         unsigned long lost_events = 0, next_lost = 0;
1799         int cpu_file = iter->cpu_file;
1800         u64 next_ts = 0, ts;
1801         int next_cpu = -1;
1802         int next_size = 0;
1803         int cpu;
1804
1805         /*
1806          * If we are in a per_cpu trace file, don't bother by iterating over
1807          * all cpu and peek directly.
1808          */
1809         if (cpu_file > TRACE_PIPE_ALL_CPU) {
1810                 if (ring_buffer_empty_cpu(buffer, cpu_file))
1811                         return NULL;
1812                 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
1813                 if (ent_cpu)
1814                         *ent_cpu = cpu_file;
1815
1816                 return ent;
1817         }
1818
1819         for_each_tracing_cpu(cpu) {
1820
1821                 if (ring_buffer_empty_cpu(buffer, cpu))
1822                         continue;
1823
1824                 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
1825
1826                 /*
1827                  * Pick the entry with the smallest timestamp:
1828                  */
1829                 if (ent && (!next || ts < next_ts)) {
1830                         next = ent;
1831                         next_cpu = cpu;
1832                         next_ts = ts;
1833                         next_lost = lost_events;
1834                         next_size = iter->ent_size;
1835                 }
1836         }
1837
1838         iter->ent_size = next_size;
1839
1840         if (ent_cpu)
1841                 *ent_cpu = next_cpu;
1842
1843         if (ent_ts)
1844                 *ent_ts = next_ts;
1845
1846         if (missing_events)
1847                 *missing_events = next_lost;
1848
1849         return next;
1850 }
1851
1852 /* Find the next real entry, without updating the iterator itself */
1853 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
1854                                           int *ent_cpu, u64 *ent_ts)
1855 {
1856         return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
1857 }
1858
1859 /* Find the next real entry, and increment the iterator to the next entry */
1860 void *trace_find_next_entry_inc(struct trace_iterator *iter)
1861 {
1862         iter->ent = __find_next_entry(iter, &iter->cpu,
1863                                       &iter->lost_events, &iter->ts);
1864
1865         if (iter->ent)
1866                 trace_iterator_increment(iter);
1867
1868         return iter->ent ? iter : NULL;
1869 }
1870
1871 static void trace_consume(struct trace_iterator *iter)
1872 {
1873         ring_buffer_consume(iter->tr->buffer, iter->cpu, &iter->ts,
1874                             &iter->lost_events);
1875 }
1876
1877 static void *s_next(struct seq_file *m, void *v, loff_t *pos)
1878 {
1879         struct trace_iterator *iter = m->private;
1880         int i = (int)*pos;
1881         void *ent;
1882
1883         WARN_ON_ONCE(iter->leftover);
1884
1885         (*pos)++;
1886
1887         /* can't go backwards */
1888         if (iter->idx > i)
1889                 return NULL;
1890
1891         if (iter->idx < 0)
1892                 ent = trace_find_next_entry_inc(iter);
1893         else
1894                 ent = iter;
1895
1896         while (ent && iter->idx < i)
1897                 ent = trace_find_next_entry_inc(iter);
1898
1899         iter->pos = *pos;
1900
1901         return ent;
1902 }
1903
1904 void tracing_iter_reset(struct trace_iterator *iter, int cpu)
1905 {
1906         struct trace_array *tr = iter->tr;
1907         struct ring_buffer_event *event;
1908         struct ring_buffer_iter *buf_iter;
1909         unsigned long entries = 0;
1910         u64 ts;
1911
1912         tr->data[cpu]->skipped_entries = 0;
1913
1914         buf_iter = trace_buffer_iter(iter, cpu);
1915         if (!buf_iter)
1916                 return;
1917
1918         ring_buffer_iter_reset(buf_iter);
1919
1920         /*
1921          * We could have the case with the max latency tracers
1922          * that a reset never took place on a cpu. This is evident
1923          * by the timestamp being before the start of the buffer.
1924          */
1925         while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
1926                 if (ts >= iter->tr->time_start)
1927                         break;
1928                 entries++;
1929                 ring_buffer_read(buf_iter, NULL);
1930         }
1931
1932         tr->data[cpu]->skipped_entries = entries;
1933 }
1934
1935 /*
1936  * The current tracer is copied to avoid a global locking
1937  * all around.
1938  */
1939 static void *s_start(struct seq_file *m, loff_t *pos)
1940 {
1941         struct trace_iterator *iter = m->private;
1942         static struct tracer *old_tracer;
1943         int cpu_file = iter->cpu_file;
1944         void *p = NULL;
1945         loff_t l = 0;
1946         int cpu;
1947
1948         /* copy the tracer to avoid using a global lock all around */
1949         mutex_lock(&trace_types_lock);
1950         if (unlikely(old_tracer != current_trace && current_trace)) {
1951                 old_tracer = current_trace;
1952                 *iter->trace = *current_trace;
1953         }
1954         mutex_unlock(&trace_types_lock);
1955
1956         atomic_inc(&trace_record_cmdline_disabled);
1957
1958         if (*pos != iter->pos) {
1959                 iter->ent = NULL;
1960                 iter->cpu = 0;
1961                 iter->idx = -1;
1962
1963                 if (cpu_file == TRACE_PIPE_ALL_CPU) {
1964                         for_each_tracing_cpu(cpu)
1965                                 tracing_iter_reset(iter, cpu);
1966                 } else
1967                         tracing_iter_reset(iter, cpu_file);
1968
1969                 iter->leftover = 0;
1970                 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
1971                         ;
1972
1973         } else {
1974                 /*
1975                  * If we overflowed the seq_file before, then we want
1976                  * to just reuse the trace_seq buffer again.
1977                  */
1978                 if (iter->leftover)
1979                         p = iter;
1980                 else {
1981                         l = *pos - 1;
1982                         p = s_next(m, p, &l);
1983                 }
1984         }
1985
1986         trace_event_read_lock();
1987         trace_access_lock(cpu_file);
1988         return p;
1989 }
1990
1991 static void s_stop(struct seq_file *m, void *p)
1992 {
1993         struct trace_iterator *iter = m->private;
1994
1995         atomic_dec(&trace_record_cmdline_disabled);
1996         trace_access_unlock(iter->cpu_file);
1997         trace_event_read_unlock();
1998 }
1999
2000 static void
2001 get_total_entries(struct trace_array *tr, unsigned long *total, unsigned long *entries)
2002 {
2003         unsigned long count;
2004         int cpu;
2005
2006         *total = 0;
2007         *entries = 0;
2008
2009         for_each_tracing_cpu(cpu) {
2010                 count = ring_buffer_entries_cpu(tr->buffer, cpu);
2011                 /*
2012                  * If this buffer has skipped entries, then we hold all
2013                  * entries for the trace and we need to ignore the
2014                  * ones before the time stamp.
2015                  */
2016                 if (tr->data[cpu]->skipped_entries) {
2017                         count -= tr->data[cpu]->skipped_entries;
2018                         /* total is the same as the entries */
2019                         *total += count;
2020                 } else
2021                         *total += count +
2022                                 ring_buffer_overrun_cpu(tr->buffer, cpu);
2023                 *entries += count;
2024         }
2025 }
2026
2027 static void print_lat_help_header(struct seq_file *m)
2028 {
2029         seq_puts(m, "#                  _------=> CPU#            \n");
2030         seq_puts(m, "#                 / _-----=> irqs-off        \n");
2031         seq_puts(m, "#                | / _----=> need-resched    \n");
2032         seq_puts(m, "#                || / _---=> hardirq/softirq \n");
2033         seq_puts(m, "#                ||| / _--=> preempt-depth   \n");
2034         seq_puts(m, "#                |||| /     delay             \n");
2035         seq_puts(m, "#  cmd     pid   ||||| time  |   caller      \n");
2036         seq_puts(m, "#     \\   /      |||||  \\    |   /           \n");
2037 }
2038
2039 static void print_event_info(struct trace_array *tr, struct seq_file *m)
2040 {
2041         unsigned long total;
2042         unsigned long entries;
2043
2044         get_total_entries(tr, &total, &entries);
2045         seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu   #P:%d\n",
2046                    entries, total, num_online_cpus());
2047         seq_puts(m, "#\n");
2048 }
2049
2050 static void print_func_help_header(struct trace_array *tr, struct seq_file *m)
2051 {
2052         print_event_info(tr, m);
2053         seq_puts(m, "#           TASK-PID   CPU#      TIMESTAMP  FUNCTION\n");
2054         seq_puts(m, "#              | |       |          |         |\n");
2055 }
2056
2057 static void print_func_help_header_irq(struct trace_array *tr, struct seq_file *m)
2058 {
2059         print_event_info(tr, m);
2060         seq_puts(m, "#                              _-----=> irqs-off\n");
2061         seq_puts(m, "#                             / _----=> need-resched\n");
2062         seq_puts(m, "#                            | / _---=> hardirq/softirq\n");
2063         seq_puts(m, "#                            || / _--=> preempt-depth\n");
2064         seq_puts(m, "#                            ||| /     delay\n");
2065         seq_puts(m, "#           TASK-PID   CPU#  ||||    TIMESTAMP  FUNCTION\n");
2066         seq_puts(m, "#              | |       |   ||||       |         |\n");
2067 }
2068
2069 void
2070 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
2071 {
2072         unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
2073         struct trace_array *tr = iter->tr;
2074         struct trace_array_cpu *data = tr->data[tr->cpu];
2075         struct tracer *type = current_trace;
2076         unsigned long entries;
2077         unsigned long total;
2078         const char *name = "preemption";
2079
2080         if (type)
2081                 name = type->name;
2082
2083         get_total_entries(tr, &total, &entries);
2084
2085         seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
2086                    name, UTS_RELEASE);
2087         seq_puts(m, "# -----------------------------------"
2088                  "---------------------------------\n");
2089         seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
2090                    " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
2091                    nsecs_to_usecs(data->saved_latency),
2092                    entries,
2093                    total,
2094                    tr->cpu,
2095 #if defined(CONFIG_PREEMPT_NONE)
2096                    "server",
2097 #elif defined(CONFIG_PREEMPT_VOLUNTARY)
2098                    "desktop",
2099 #elif defined(CONFIG_PREEMPT)
2100                    "preempt",
2101 #else
2102                    "unknown",
2103 #endif
2104                    /* These are reserved for later use */
2105                    0, 0, 0, 0);
2106 #ifdef CONFIG_SMP
2107         seq_printf(m, " #P:%d)\n", num_online_cpus());
2108 #else
2109         seq_puts(m, ")\n");
2110 #endif
2111         seq_puts(m, "#    -----------------\n");
2112         seq_printf(m, "#    | task: %.16s-%d "
2113                    "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
2114                    data->comm, data->pid,
2115                    from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
2116                    data->policy, data->rt_priority);
2117         seq_puts(m, "#    -----------------\n");
2118
2119         if (data->critical_start) {
2120                 seq_puts(m, "#  => started at: ");
2121                 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
2122                 trace_print_seq(m, &iter->seq);
2123                 seq_puts(m, "\n#  => ended at:   ");
2124                 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
2125                 trace_print_seq(m, &iter->seq);
2126                 seq_puts(m, "\n#\n");
2127         }
2128
2129         seq_puts(m, "#\n");
2130 }
2131
2132 static void test_cpu_buff_start(struct trace_iterator *iter)
2133 {
2134         struct trace_seq *s = &iter->seq;
2135
2136         if (!(trace_flags & TRACE_ITER_ANNOTATE))
2137                 return;
2138
2139         if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
2140                 return;
2141
2142         if (cpumask_test_cpu(iter->cpu, iter->started))
2143                 return;
2144
2145         if (iter->tr->data[iter->cpu]->skipped_entries)
2146                 return;
2147
2148         cpumask_set_cpu(iter->cpu, iter->started);
2149
2150         /* Don't print started cpu buffer for the first entry of the trace */
2151         if (iter->idx > 1)
2152                 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
2153                                 iter->cpu);
2154 }
2155
2156 static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
2157 {
2158         struct trace_seq *s = &iter->seq;
2159         unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
2160         struct trace_entry *entry;
2161         struct trace_event *event;
2162
2163         entry = iter->ent;
2164
2165         test_cpu_buff_start(iter);
2166
2167         event = ftrace_find_event(entry->type);
2168
2169         if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2170                 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2171                         if (!trace_print_lat_context(iter))
2172                                 goto partial;
2173                 } else {
2174                         if (!trace_print_context(iter))
2175                                 goto partial;
2176                 }
2177         }
2178
2179         if (event)
2180                 return event->funcs->trace(iter, sym_flags, event);
2181
2182         if (!trace_seq_printf(s, "Unknown type %d\n", entry->type))
2183                 goto partial;
2184
2185         return TRACE_TYPE_HANDLED;
2186 partial:
2187         return TRACE_TYPE_PARTIAL_LINE;
2188 }
2189
2190 static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
2191 {
2192         struct trace_seq *s = &iter->seq;
2193         struct trace_entry *entry;
2194         struct trace_event *event;
2195
2196         entry = iter->ent;
2197
2198         if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2199                 if (!trace_seq_printf(s, "%d %d %llu ",
2200                                       entry->pid, iter->cpu, iter->ts))
2201                         goto partial;
2202         }
2203
2204         event = ftrace_find_event(entry->type);
2205         if (event)
2206                 return event->funcs->raw(iter, 0, event);
2207
2208         if (!trace_seq_printf(s, "%d ?\n", entry->type))
2209                 goto partial;
2210
2211         return TRACE_TYPE_HANDLED;
2212 partial:
2213         return TRACE_TYPE_PARTIAL_LINE;
2214 }
2215
2216 static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
2217 {
2218         struct trace_seq *s = &iter->seq;
2219         unsigned char newline = '\n';
2220         struct trace_entry *entry;
2221         struct trace_event *event;
2222
2223         entry = iter->ent;
2224
2225         if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2226                 SEQ_PUT_HEX_FIELD_RET(s, entry->pid);
2227                 SEQ_PUT_HEX_FIELD_RET(s, iter->cpu);
2228                 SEQ_PUT_HEX_FIELD_RET(s, iter->ts);
2229         }
2230
2231         event = ftrace_find_event(entry->type);
2232         if (event) {
2233                 enum print_line_t ret = event->funcs->hex(iter, 0, event);
2234                 if (ret != TRACE_TYPE_HANDLED)
2235                         return ret;
2236         }
2237
2238         SEQ_PUT_FIELD_RET(s, newline);
2239
2240         return TRACE_TYPE_HANDLED;
2241 }
2242
2243 static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
2244 {
2245         struct trace_seq *s = &iter->seq;
2246         struct trace_entry *entry;
2247         struct trace_event *event;
2248
2249         entry = iter->ent;
2250
2251         if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2252                 SEQ_PUT_FIELD_RET(s, entry->pid);
2253                 SEQ_PUT_FIELD_RET(s, iter->cpu);
2254                 SEQ_PUT_FIELD_RET(s, iter->ts);
2255         }
2256
2257         event = ftrace_find_event(entry->type);
2258         return event ? event->funcs->binary(iter, 0, event) :
2259                 TRACE_TYPE_HANDLED;
2260 }
2261
2262 int trace_empty(struct trace_iterator *iter)
2263 {
2264         struct ring_buffer_iter *buf_iter;
2265         int cpu;
2266
2267         /* If we are looking at one CPU buffer, only check that one */
2268         if (iter->cpu_file != TRACE_PIPE_ALL_CPU) {
2269                 cpu = iter->cpu_file;
2270                 buf_iter = trace_buffer_iter(iter, cpu);
2271                 if (buf_iter) {
2272                         if (!ring_buffer_iter_empty(buf_iter))
2273                                 return 0;
2274                 } else {
2275                         if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu))
2276                                 return 0;
2277                 }
2278                 return 1;
2279         }
2280
2281         for_each_tracing_cpu(cpu) {
2282                 buf_iter = trace_buffer_iter(iter, cpu);
2283                 if (buf_iter) {
2284                         if (!ring_buffer_iter_empty(buf_iter))
2285                                 return 0;
2286                 } else {
2287                         if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu))
2288                                 return 0;
2289                 }
2290         }
2291
2292         return 1;
2293 }
2294
2295 /*  Called with trace_event_read_lock() held. */
2296 enum print_line_t print_trace_line(struct trace_iterator *iter)
2297 {
2298         enum print_line_t ret;
2299
2300         if (iter->lost_events &&
2301             !trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
2302                                  iter->cpu, iter->lost_events))
2303                 return TRACE_TYPE_PARTIAL_LINE;
2304
2305         if (iter->trace && iter->trace->print_line) {
2306                 ret = iter->trace->print_line(iter);
2307                 if (ret != TRACE_TYPE_UNHANDLED)
2308                         return ret;
2309         }
2310
2311         if (iter->ent->type == TRACE_BPRINT &&
2312                         trace_flags & TRACE_ITER_PRINTK &&
2313                         trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2314                 return trace_print_bprintk_msg_only(iter);
2315
2316         if (iter->ent->type == TRACE_PRINT &&
2317                         trace_flags & TRACE_ITER_PRINTK &&
2318                         trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2319                 return trace_print_printk_msg_only(iter);
2320
2321         if (trace_flags & TRACE_ITER_BIN)
2322                 return print_bin_fmt(iter);
2323
2324         if (trace_flags & TRACE_ITER_HEX)
2325                 return print_hex_fmt(iter);
2326
2327         if (trace_flags & TRACE_ITER_RAW)
2328                 return print_raw_fmt(iter);
2329
2330         return print_trace_fmt(iter);
2331 }
2332
2333 void trace_latency_header(struct seq_file *m)
2334 {
2335         struct trace_iterator *iter = m->private;
2336
2337         /* print nothing if the buffers are empty */
2338         if (trace_empty(iter))
2339                 return;
2340
2341         if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2342                 print_trace_header(m, iter);
2343
2344         if (!(trace_flags & TRACE_ITER_VERBOSE))
2345                 print_lat_help_header(m);
2346 }
2347
2348 void trace_default_header(struct seq_file *m)
2349 {
2350         struct trace_iterator *iter = m->private;
2351
2352         if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
2353                 return;
2354
2355         if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2356                 /* print nothing if the buffers are empty */
2357                 if (trace_empty(iter))
2358                         return;
2359                 print_trace_header(m, iter);
2360                 if (!(trace_flags & TRACE_ITER_VERBOSE))
2361                         print_lat_help_header(m);
2362         } else {
2363                 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
2364                         if (trace_flags & TRACE_ITER_IRQ_INFO)
2365                                 print_func_help_header_irq(iter->tr, m);
2366                         else
2367                                 print_func_help_header(iter->tr, m);
2368                 }
2369         }
2370 }
2371
2372 static void test_ftrace_alive(struct seq_file *m)
2373 {
2374         if (!ftrace_is_dead())
2375                 return;
2376         seq_printf(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n");
2377         seq_printf(m, "#          MAY BE MISSING FUNCTION EVENTS\n");
2378 }
2379
2380 static int s_show(struct seq_file *m, void *v)
2381 {
2382         struct trace_iterator *iter = v;
2383         int ret;
2384
2385         if (iter->ent == NULL) {
2386                 if (iter->tr) {
2387                         seq_printf(m, "# tracer: %s\n", iter->trace->name);
2388                         seq_puts(m, "#\n");
2389                         test_ftrace_alive(m);
2390                 }
2391                 if (iter->trace && iter->trace->print_header)
2392                         iter->trace->print_header(m);
2393                 else
2394                         trace_default_header(m);
2395
2396         } else if (iter->leftover) {
2397                 /*
2398                  * If we filled the seq_file buffer earlier, we
2399                  * want to just show it now.
2400                  */
2401                 ret = trace_print_seq(m, &iter->seq);
2402
2403                 /* ret should this time be zero, but you never know */
2404                 iter->leftover = ret;
2405
2406         } else {
2407                 print_trace_line(iter);
2408                 ret = trace_print_seq(m, &iter->seq);
2409                 /*
2410                  * If we overflow the seq_file buffer, then it will
2411                  * ask us for this data again at start up.
2412                  * Use that instead.
2413                  *  ret is 0 if seq_file write succeeded.
2414                  *        -1 otherwise.
2415                  */
2416                 iter->leftover = ret;
2417         }
2418
2419         return 0;
2420 }
2421
2422 static const struct seq_operations tracer_seq_ops = {
2423         .start          = s_start,
2424         .next           = s_next,
2425         .stop           = s_stop,
2426         .show           = s_show,
2427 };
2428
2429 static struct trace_iterator *
2430 __tracing_open(struct inode *inode, struct file *file)
2431 {
2432         long cpu_file = (long) inode->i_private;
2433         struct trace_iterator *iter;
2434         int cpu;
2435
2436         if (tracing_disabled)
2437                 return ERR_PTR(-ENODEV);
2438
2439         iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
2440         if (!iter)
2441                 return ERR_PTR(-ENOMEM);
2442
2443         iter->buffer_iter = kzalloc(sizeof(*iter->buffer_iter) * num_possible_cpus(),
2444                                     GFP_KERNEL);
2445         if (!iter->buffer_iter)
2446                 goto release;
2447
2448         /*
2449          * We make a copy of the current tracer to avoid concurrent
2450          * changes on it while we are reading.
2451          */
2452         mutex_lock(&trace_types_lock);
2453         iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
2454         if (!iter->trace)
2455                 goto fail;
2456
2457         if (current_trace)
2458                 *iter->trace = *current_trace;
2459
2460         if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
2461                 goto fail;
2462
2463         if (current_trace && current_trace->print_max)
2464                 iter->tr = &max_tr;
2465         else
2466                 iter->tr = &global_trace;
2467         iter->pos = -1;
2468         mutex_init(&iter->mutex);
2469         iter->cpu_file = cpu_file;
2470
2471         /* Notify the tracer early; before we stop tracing. */
2472         if (iter->trace && iter->trace->open)
2473                 iter->trace->open(iter);
2474
2475         /* Annotate start of buffers if we had overruns */
2476         if (ring_buffer_overruns(iter->tr->buffer))
2477                 iter->iter_flags |= TRACE_FILE_ANNOTATE;
2478
2479         /* stop the trace while dumping */
2480         tracing_stop();
2481
2482         if (iter->cpu_file == TRACE_PIPE_ALL_CPU) {
2483                 for_each_tracing_cpu(cpu) {
2484                         iter->buffer_iter[cpu] =
2485                                 ring_buffer_read_prepare(iter->tr->buffer, cpu);
2486                 }
2487                 ring_buffer_read_prepare_sync();
2488                 for_each_tracing_cpu(cpu) {
2489                         ring_buffer_read_start(iter->buffer_iter[cpu]);
2490                         tracing_iter_reset(iter, cpu);
2491                 }
2492         } else {
2493                 cpu = iter->cpu_file;
2494                 iter->buffer_iter[cpu] =
2495                         ring_buffer_read_prepare(iter->tr->buffer, cpu);
2496                 ring_buffer_read_prepare_sync();
2497                 ring_buffer_read_start(iter->buffer_iter[cpu]);
2498                 tracing_iter_reset(iter, cpu);
2499         }
2500
2501         mutex_unlock(&trace_types_lock);
2502
2503         return iter;
2504
2505  fail:
2506         mutex_unlock(&trace_types_lock);
2507         kfree(iter->trace);
2508         kfree(iter->buffer_iter);
2509 release:
2510         seq_release_private(inode, file);
2511         return ERR_PTR(-ENOMEM);
2512 }
2513
2514 int tracing_open_generic(struct inode *inode, struct file *filp)
2515 {
2516         if (tracing_disabled)
2517                 return -ENODEV;
2518
2519         filp->private_data = inode->i_private;
2520         return 0;
2521 }
2522
2523 static int tracing_release(struct inode *inode, struct file *file)
2524 {
2525         struct seq_file *m = file->private_data;
2526         struct trace_iterator *iter;
2527         int cpu;
2528
2529         if (!(file->f_mode & FMODE_READ))
2530                 return 0;
2531
2532         iter = m->private;
2533
2534         mutex_lock(&trace_types_lock);
2535         for_each_tracing_cpu(cpu) {
2536                 if (iter->buffer_iter[cpu])
2537                         ring_buffer_read_finish(iter->buffer_iter[cpu]);
2538         }
2539
2540         if (iter->trace && iter->trace->close)
2541                 iter->trace->close(iter);
2542
2543         /* reenable tracing if it was previously enabled */
2544         tracing_start();
2545         mutex_unlock(&trace_types_lock);
2546
2547         mutex_destroy(&iter->mutex);
2548         free_cpumask_var(iter->started);
2549         kfree(iter->trace);
2550         kfree(iter->buffer_iter);
2551         seq_release_private(inode, file);
2552         return 0;
2553 }
2554
2555 static int tracing_open(struct inode *inode, struct file *file)
2556 {
2557         struct trace_iterator *iter;
2558         int ret = 0;
2559
2560         /* If this file was open for write, then erase contents */
2561         if ((file->f_mode & FMODE_WRITE) &&
2562             (file->f_flags & O_TRUNC)) {
2563                 long cpu = (long) inode->i_private;
2564
2565                 if (cpu == TRACE_PIPE_ALL_CPU)
2566                         tracing_reset_online_cpus(&global_trace);
2567                 else
2568                         tracing_reset(&global_trace, cpu);
2569         }
2570
2571         if (file->f_mode & FMODE_READ) {
2572                 iter = __tracing_open(inode, file);
2573                 if (IS_ERR(iter))
2574                         ret = PTR_ERR(iter);
2575                 else if (trace_flags & TRACE_ITER_LATENCY_FMT)
2576                         iter->iter_flags |= TRACE_FILE_LAT_FMT;
2577         }
2578         return ret;
2579 }
2580
2581 static void *
2582 t_next(struct seq_file *m, void *v, loff_t *pos)
2583 {
2584         struct tracer *t = v;
2585
2586         (*pos)++;
2587
2588         if (t)
2589                 t = t->next;
2590
2591         return t;
2592 }
2593
2594 static void *t_start(struct seq_file *m, loff_t *pos)
2595 {
2596         struct tracer *t;
2597         loff_t l = 0;
2598
2599         mutex_lock(&trace_types_lock);
2600         for (t = trace_types; t && l < *pos; t = t_next(m, t, &l))
2601                 ;
2602
2603         return t;
2604 }
2605
2606 static void t_stop(struct seq_file *m, void *p)
2607 {
2608         mutex_unlock(&trace_types_lock);
2609 }
2610
2611 static int t_show(struct seq_file *m, void *v)
2612 {
2613         struct tracer *t = v;
2614
2615         if (!t)
2616                 return 0;
2617
2618         seq_printf(m, "%s", t->name);
2619         if (t->next)
2620                 seq_putc(m, ' ');
2621         else
2622                 seq_putc(m, '\n');
2623
2624         return 0;
2625 }
2626
2627 static const struct seq_operations show_traces_seq_ops = {
2628         .start          = t_start,
2629         .next           = t_next,
2630         .stop           = t_stop,
2631         .show           = t_show,
2632 };
2633
2634 static int show_traces_open(struct inode *inode, struct file *file)
2635 {
2636         if (tracing_disabled)
2637                 return -ENODEV;
2638
2639         return seq_open(file, &show_traces_seq_ops);
2640 }
2641
2642 static ssize_t
2643 tracing_write_stub(struct file *filp, const char __user *ubuf,
2644                    size_t count, loff_t *ppos)
2645 {
2646         return count;
2647 }
2648
2649 static loff_t tracing_seek(struct file *file, loff_t offset, int origin)
2650 {
2651         if (file->f_mode & FMODE_READ)
2652                 return seq_lseek(file, offset, origin);
2653         else
2654                 return 0;
2655 }
2656
2657 static const struct file_operations tracing_fops = {
2658         .open           = tracing_open,
2659         .read           = seq_read,
2660         .write          = tracing_write_stub,
2661         .llseek         = tracing_seek,
2662         .release        = tracing_release,
2663 };
2664
2665 static const struct file_operations show_traces_fops = {
2666         .open           = show_traces_open,
2667         .read           = seq_read,
2668         .release        = seq_release,
2669         .llseek         = seq_lseek,
2670 };
2671
2672 /*
2673  * Only trace on a CPU if the bitmask is set:
2674  */
2675 static cpumask_var_t tracing_cpumask;
2676
2677 /*
2678  * The tracer itself will not take this lock, but still we want
2679  * to provide a consistent cpumask to user-space:
2680  */
2681 static DEFINE_MUTEX(tracing_cpumask_update_lock);
2682
2683 /*
2684  * Temporary storage for the character representation of the
2685  * CPU bitmask (and one more byte for the newline):
2686  */
2687 static char mask_str[NR_CPUS + 1];
2688
2689 static ssize_t
2690 tracing_cpumask_read(struct file *filp, char __user *ubuf,
2691                      size_t count, loff_t *ppos)
2692 {
2693         int len;
2694
2695         mutex_lock(&tracing_cpumask_update_lock);
2696
2697         len = cpumask_scnprintf(mask_str, count, tracing_cpumask);
2698         if (count - len < 2) {
2699                 count = -EINVAL;
2700                 goto out_err;
2701         }
2702         len += sprintf(mask_str + len, "\n");
2703         count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
2704
2705 out_err:
2706         mutex_unlock(&tracing_cpumask_update_lock);
2707
2708         return count;
2709 }
2710
2711 static ssize_t
2712 tracing_cpumask_write(struct file *filp, const char __user *ubuf,
2713                       size_t count, loff_t *ppos)
2714 {
2715         int err, cpu;
2716         cpumask_var_t tracing_cpumask_new;
2717
2718         if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
2719                 return -ENOMEM;
2720
2721         err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
2722         if (err)
2723                 goto err_unlock;
2724
2725         mutex_lock(&tracing_cpumask_update_lock);
2726
2727         local_irq_disable();
2728         arch_spin_lock(&ftrace_max_lock);
2729         for_each_tracing_cpu(cpu) {
2730                 /*
2731                  * Increase/decrease the disabled counter if we are
2732                  * about to flip a bit in the cpumask:
2733                  */
2734                 if (cpumask_test_cpu(cpu, tracing_cpumask) &&
2735                                 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
2736                         atomic_inc(&global_trace.data[cpu]->disabled);
2737                         ring_buffer_record_disable_cpu(global_trace.buffer, cpu);
2738                 }
2739                 if (!cpumask_test_cpu(cpu, tracing_cpumask) &&
2740                                 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
2741                         atomic_dec(&global_trace.data[cpu]->disabled);
2742                         ring_buffer_record_enable_cpu(global_trace.buffer, cpu);
2743                 }
2744         }
2745         arch_spin_unlock(&ftrace_max_lock);
2746         local_irq_enable();
2747
2748         cpumask_copy(tracing_cpumask, tracing_cpumask_new);
2749
2750         mutex_unlock(&tracing_cpumask_update_lock);
2751         free_cpumask_var(tracing_cpumask_new);
2752
2753         return count;
2754
2755 err_unlock:
2756         free_cpumask_var(tracing_cpumask_new);
2757
2758         return err;
2759 }
2760
2761 static const struct file_operations tracing_cpumask_fops = {
2762         .open           = tracing_open_generic,
2763         .read           = tracing_cpumask_read,
2764         .write          = tracing_cpumask_write,
2765         .llseek         = generic_file_llseek,
2766 };
2767
2768 static int tracing_trace_options_show(struct seq_file *m, void *v)
2769 {
2770         struct tracer_opt *trace_opts;
2771         u32 tracer_flags;
2772         int i;
2773
2774         mutex_lock(&trace_types_lock);
2775         tracer_flags = current_trace->flags->val;
2776         trace_opts = current_trace->flags->opts;
2777
2778         for (i = 0; trace_options[i]; i++) {
2779                 if (trace_flags & (1 << i))
2780                         seq_printf(m, "%s\n", trace_options[i]);
2781                 else
2782                         seq_printf(m, "no%s\n", trace_options[i]);
2783         }
2784
2785         for (i = 0; trace_opts[i].name; i++) {
2786                 if (tracer_flags & trace_opts[i].bit)
2787                         seq_printf(m, "%s\n", trace_opts[i].name);
2788                 else
2789                         seq_printf(m, "no%s\n", trace_opts[i].name);
2790         }
2791         mutex_unlock(&trace_types_lock);
2792
2793         return 0;
2794 }
2795
2796 static int __set_tracer_option(struct tracer *trace,
2797                                struct tracer_flags *tracer_flags,
2798                                struct tracer_opt *opts, int neg)
2799 {
2800         int ret;
2801
2802         ret = trace->set_flag(tracer_flags->val, opts->bit, !neg);
2803         if (ret)
2804                 return ret;
2805
2806         if (neg)
2807                 tracer_flags->val &= ~opts->bit;
2808         else
2809                 tracer_flags->val |= opts->bit;
2810         return 0;
2811 }
2812
2813 /* Try to assign a tracer specific option */
2814 static int set_tracer_option(struct tracer *trace, char *cmp, int neg)
2815 {
2816         struct tracer_flags *tracer_flags = trace->flags;
2817         struct tracer_opt *opts = NULL;
2818         int i;
2819
2820         for (i = 0; tracer_flags->opts[i].name; i++) {
2821                 opts = &tracer_flags->opts[i];
2822
2823                 if (strcmp(cmp, opts->name) == 0)
2824                         return __set_tracer_option(trace, trace->flags,
2825                                                    opts, neg);
2826         }
2827
2828         return -EINVAL;
2829 }
2830
2831 static void set_tracer_flags(unsigned int mask, int enabled)
2832 {
2833         /* do nothing if flag is already set */
2834         if (!!(trace_flags & mask) == !!enabled)
2835                 return;
2836
2837         if (enabled)
2838                 trace_flags |= mask;
2839         else
2840                 trace_flags &= ~mask;
2841
2842         if (mask == TRACE_ITER_RECORD_CMD)
2843                 trace_event_enable_cmd_record(enabled);
2844
2845         if (mask == TRACE_ITER_OVERWRITE)
2846                 ring_buffer_change_overwrite(global_trace.buffer, enabled);
2847
2848         if (mask == TRACE_ITER_PRINTK)
2849                 trace_printk_start_stop_comm(enabled);
2850 }
2851
2852 static ssize_t
2853 tracing_trace_options_write(struct file *filp, const char __user *ubuf,
2854                         size_t cnt, loff_t *ppos)
2855 {
2856         char buf[64];
2857         char *cmp;
2858         int neg = 0;
2859         int ret;
2860         int i;
2861
2862         if (cnt >= sizeof(buf))
2863                 return -EINVAL;
2864
2865         if (copy_from_user(&buf, ubuf, cnt))
2866                 return -EFAULT;
2867
2868         buf[cnt] = 0;
2869         cmp = strstrip(buf);
2870
2871         if (strncmp(cmp, "no", 2) == 0) {
2872                 neg = 1;
2873                 cmp += 2;
2874         }
2875
2876         for (i = 0; trace_options[i]; i++) {
2877                 if (strcmp(cmp, trace_options[i]) == 0) {
2878                         set_tracer_flags(1 << i, !neg);
2879                         break;
2880                 }
2881         }
2882
2883         /* If no option could be set, test the specific tracer options */
2884         if (!trace_options[i]) {
2885                 mutex_lock(&trace_types_lock);
2886                 ret = set_tracer_option(current_trace, cmp, neg);
2887                 mutex_unlock(&trace_types_lock);
2888                 if (ret)
2889                         return ret;
2890         }
2891
2892         *ppos += cnt;
2893
2894         return cnt;
2895 }
2896
2897 static int tracing_trace_options_open(struct inode *inode, struct file *file)
2898 {
2899         if (tracing_disabled)
2900                 return -ENODEV;
2901         return single_open(file, tracing_trace_options_show, NULL);
2902 }
2903
2904 static const struct file_operations tracing_iter_fops = {
2905         .open           = tracing_trace_options_open,
2906         .read           = seq_read,
2907         .llseek         = seq_lseek,
2908         .release        = single_release,
2909         .write          = tracing_trace_options_write,
2910 };
2911
2912 static const char readme_msg[] =
2913         "tracing mini-HOWTO:\n\n"
2914         "# mount -t debugfs nodev /sys/kernel/debug\n\n"
2915         "# cat /sys/kernel/debug/tracing/available_tracers\n"
2916         "wakeup wakeup_rt preemptirqsoff preemptoff irqsoff function nop\n\n"
2917         "# cat /sys/kernel/debug/tracing/current_tracer\n"
2918         "nop\n"
2919         "# echo wakeup > /sys/kernel/debug/tracing/current_tracer\n"
2920         "# cat /sys/kernel/debug/tracing/current_tracer\n"
2921         "wakeup\n"
2922         "# cat /sys/kernel/debug/tracing/trace_options\n"
2923         "noprint-parent nosym-offset nosym-addr noverbose\n"
2924         "# echo print-parent > /sys/kernel/debug/tracing/trace_options\n"
2925         "# echo 1 > /sys/kernel/debug/tracing/tracing_on\n"
2926         "# cat /sys/kernel/debug/tracing/trace > /tmp/trace.txt\n"
2927         "# echo 0 > /sys/kernel/debug/tracing/tracing_on\n"
2928 ;
2929
2930 static ssize_t
2931 tracing_readme_read(struct file *filp, char __user *ubuf,
2932                        size_t cnt, loff_t *ppos)
2933 {
2934         return simple_read_from_buffer(ubuf, cnt, ppos,
2935                                         readme_msg, strlen(readme_msg));
2936 }
2937
2938 static const struct file_operations tracing_readme_fops = {
2939         .open           = tracing_open_generic,
2940         .read           = tracing_readme_read,
2941         .llseek         = generic_file_llseek,
2942 };
2943
2944 static ssize_t
2945 tracing_saved_cmdlines_read(struct file *file, char __user *ubuf,
2946                                 size_t cnt, loff_t *ppos)
2947 {
2948         char *buf_comm;
2949         char *file_buf;
2950         char *buf;
2951         int len = 0;
2952         int pid;
2953         int i;
2954
2955         file_buf = kmalloc(SAVED_CMDLINES*(16+TASK_COMM_LEN), GFP_KERNEL);
2956         if (!file_buf)
2957                 return -ENOMEM;
2958
2959         buf_comm = kmalloc(TASK_COMM_LEN, GFP_KERNEL);
2960         if (!buf_comm) {
2961                 kfree(file_buf);
2962                 return -ENOMEM;
2963         }
2964
2965         buf = file_buf;
2966
2967         for (i = 0; i < SAVED_CMDLINES; i++) {
2968                 int r;
2969
2970                 pid = map_cmdline_to_pid[i];
2971                 if (pid == -1 || pid == NO_CMDLINE_MAP)
2972                         continue;
2973
2974                 trace_find_cmdline(pid, buf_comm);
2975                 r = sprintf(buf, "%d %s\n", pid, buf_comm);
2976                 buf += r;
2977                 len += r;
2978         }
2979
2980         len = simple_read_from_buffer(ubuf, cnt, ppos,
2981                                       file_buf, len);
2982
2983         kfree(file_buf);
2984         kfree(buf_comm);
2985
2986         return len;
2987 }
2988
2989 static const struct file_operations tracing_saved_cmdlines_fops = {
2990     .open       = tracing_open_generic,
2991     .read       = tracing_saved_cmdlines_read,
2992     .llseek     = generic_file_llseek,
2993 };
2994
2995 static ssize_t
2996 tracing_ctrl_read(struct file *filp, char __user *ubuf,
2997                   size_t cnt, loff_t *ppos)
2998 {
2999         char buf[64];
3000         int r;
3001
3002         r = sprintf(buf, "%u\n", tracer_enabled);
3003         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3004 }
3005
3006 static ssize_t
3007 tracing_ctrl_write(struct file *filp, const char __user *ubuf,
3008                    size_t cnt, loff_t *ppos)
3009 {
3010         struct trace_array *tr = filp->private_data;
3011         unsigned long val;
3012         int ret;
3013
3014         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
3015         if (ret)
3016                 return ret;
3017
3018         val = !!val;
3019
3020         mutex_lock(&trace_types_lock);
3021         if (tracer_enabled ^ val) {
3022
3023                 /* Only need to warn if this is used to change the state */
3024                 WARN_ONCE(1, "tracing_enabled is deprecated. Use tracing_on");
3025
3026                 if (val) {
3027                         tracer_enabled = 1;
3028                         if (current_trace->start)
3029                                 current_trace->start(tr);
3030                         tracing_start();
3031                 } else {
3032                         tracer_enabled = 0;
3033                         tracing_stop();
3034                         if (current_trace->stop)
3035                                 current_trace->stop(tr);
3036                 }
3037         }
3038         mutex_unlock(&trace_types_lock);
3039
3040         *ppos += cnt;
3041
3042         return cnt;
3043 }
3044
3045 static ssize_t
3046 tracing_set_trace_read(struct file *filp, char __user *ubuf,
3047                        size_t cnt, loff_t *ppos)
3048 {
3049         char buf[MAX_TRACER_SIZE+2];
3050         int r;
3051
3052         mutex_lock(&trace_types_lock);
3053         if (current_trace)
3054                 r = sprintf(buf, "%s\n", current_trace->name);
3055         else
3056                 r = sprintf(buf, "\n");
3057         mutex_unlock(&trace_types_lock);
3058
3059         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3060 }
3061
3062 int tracer_init(struct tracer *t, struct trace_array *tr)
3063 {
3064         tracing_reset_online_cpus(tr);
3065         return t->init(tr);
3066 }
3067
3068 static void set_buffer_entries(struct trace_array *tr, unsigned long val)
3069 {
3070         int cpu;
3071         for_each_tracing_cpu(cpu)
3072                 tr->data[cpu]->entries = val;
3073 }
3074
3075 static int __tracing_resize_ring_buffer(unsigned long size, int cpu)
3076 {
3077         int ret;
3078
3079         /*
3080          * If kernel or user changes the size of the ring buffer
3081          * we use the size that was given, and we can forget about
3082          * expanding it later.
3083          */
3084         ring_buffer_expanded = 1;
3085
3086         /* May be called before buffers are initialized */
3087         if (!global_trace.buffer)
3088                 return 0;
3089
3090         ret = ring_buffer_resize(global_trace.buffer, size, cpu);
3091         if (ret < 0)
3092                 return ret;
3093
3094         if (!current_trace->use_max_tr)
3095                 goto out;
3096
3097         ret = ring_buffer_resize(max_tr.buffer, size, cpu);
3098         if (ret < 0) {
3099                 int r = 0;
3100
3101                 if (cpu == RING_BUFFER_ALL_CPUS) {
3102                         int i;
3103                         for_each_tracing_cpu(i) {
3104                                 r = ring_buffer_resize(global_trace.buffer,
3105                                                 global_trace.data[i]->entries,
3106                                                 i);
3107                                 if (r < 0)
3108                                         break;
3109                         }
3110                 } else {
3111                         r = ring_buffer_resize(global_trace.buffer,
3112                                                 global_trace.data[cpu]->entries,
3113                                                 cpu);
3114                 }
3115
3116                 if (r < 0) {
3117                         /*
3118                          * AARGH! We are left with different
3119                          * size max buffer!!!!
3120                          * The max buffer is our "snapshot" buffer.
3121                          * When a tracer needs a snapshot (one of the
3122                          * latency tracers), it swaps the max buffer
3123                          * with the saved snap shot. We succeeded to
3124                          * update the size of the main buffer, but failed to
3125                          * update the size of the max buffer. But when we tried
3126                          * to reset the main buffer to the original size, we
3127                          * failed there too. This is very unlikely to
3128                          * happen, but if it does, warn and kill all
3129                          * tracing.
3130                          */
3131                         WARN_ON(1);
3132                         tracing_disabled = 1;
3133                 }
3134                 return ret;
3135         }
3136
3137         if (cpu == RING_BUFFER_ALL_CPUS)
3138                 set_buffer_entries(&max_tr, size);
3139         else
3140                 max_tr.data[cpu]->entries = size;
3141
3142  out:
3143         if (cpu == RING_BUFFER_ALL_CPUS)
3144                 set_buffer_entries(&global_trace, size);
3145         else
3146                 global_trace.data[cpu]->entries = size;
3147
3148         return ret;
3149 }
3150
3151 static ssize_t tracing_resize_ring_buffer(unsigned long size, int cpu_id)
3152 {
3153         int ret = size;
3154
3155         mutex_lock(&trace_types_lock);
3156
3157         if (cpu_id != RING_BUFFER_ALL_CPUS) {
3158                 /* make sure, this cpu is enabled in the mask */
3159                 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
3160                         ret = -EINVAL;
3161                         goto out;
3162                 }
3163         }
3164
3165         ret = __tracing_resize_ring_buffer(size, cpu_id);
3166         if (ret < 0)
3167                 ret = -ENOMEM;
3168
3169 out:
3170         mutex_unlock(&trace_types_lock);
3171
3172         return ret;
3173 }
3174
3175
3176 /**
3177  * tracing_update_buffers - used by tracing facility to expand ring buffers
3178  *
3179  * To save on memory when the tracing is never used on a system with it
3180  * configured in. The ring buffers are set to a minimum size. But once
3181  * a user starts to use the tracing facility, then they need to grow
3182  * to their default size.
3183  *
3184  * This function is to be called when a tracer is about to be used.
3185  */
3186 int tracing_update_buffers(void)
3187 {
3188         int ret = 0;
3189
3190         mutex_lock(&trace_types_lock);
3191         if (!ring_buffer_expanded)
3192                 ret = __tracing_resize_ring_buffer(trace_buf_size,
3193                                                 RING_BUFFER_ALL_CPUS);
3194         mutex_unlock(&trace_types_lock);
3195
3196         return ret;
3197 }
3198
3199 struct trace_option_dentry;
3200
3201 static struct trace_option_dentry *
3202 create_trace_option_files(struct tracer *tracer);
3203
3204 static void
3205 destroy_trace_option_files(struct trace_option_dentry *topts);
3206
3207 static int tracing_set_tracer(const char *buf)
3208 {
3209         static struct trace_option_dentry *topts;
3210         struct trace_array *tr = &global_trace;
3211         struct tracer *t;
3212         int ret = 0;
3213
3214         mutex_lock(&trace_types_lock);
3215
3216         if (!ring_buffer_expanded) {
3217                 ret = __tracing_resize_ring_buffer(trace_buf_size,
3218                                                 RING_BUFFER_ALL_CPUS);
3219                 if (ret < 0)
3220                         goto out;
3221                 ret = 0;
3222         }
3223
3224         for (t = trace_types; t; t = t->next) {
3225                 if (strcmp(t->name, buf) == 0)
3226                         break;
3227         }
3228         if (!t) {
3229                 ret = -EINVAL;
3230                 goto out;
3231         }
3232         if (t == current_trace)
3233                 goto out;
3234
3235         trace_branch_disable();
3236         if (current_trace && current_trace->reset)
3237                 current_trace->reset(tr);
3238         if (current_trace && current_trace->use_max_tr) {
3239                 /*
3240                  * We don't free the ring buffer. instead, resize it because
3241                  * The max_tr ring buffer has some state (e.g. ring->clock) and
3242                  * we want preserve it.
3243                  */
3244                 ring_buffer_resize(max_tr.buffer, 1, RING_BUFFER_ALL_CPUS);
3245                 set_buffer_entries(&max_tr, 1);
3246         }
3247         destroy_trace_option_files(topts);
3248
3249         current_trace = &nop_trace;
3250
3251         topts = create_trace_option_files(t);
3252         if (t->use_max_tr) {
3253                 int cpu;
3254                 /* we need to make per cpu buffer sizes equivalent */
3255                 for_each_tracing_cpu(cpu) {
3256                         ret = ring_buffer_resize(max_tr.buffer,
3257                                                 global_trace.data[cpu]->entries,
3258                                                 cpu);
3259                         if (ret < 0)
3260                                 goto out;
3261                         max_tr.data[cpu]->entries =
3262                                         global_trace.data[cpu]->entries;
3263                 }
3264         }
3265
3266         if (t->init) {
3267                 ret = tracer_init(t, tr);
3268                 if (ret)
3269                         goto out;
3270         }
3271
3272         current_trace = t;
3273         trace_branch_enable(tr);
3274  out:
3275         mutex_unlock(&trace_types_lock);
3276
3277         return ret;
3278 }
3279
3280 static ssize_t
3281 tracing_set_trace_write(struct file *filp, const char __user *ubuf,
3282                         size_t cnt, loff_t *ppos)
3283 {
3284         char buf[MAX_TRACER_SIZE+1];
3285         int i;
3286         size_t ret;
3287         int err;
3288
3289         ret = cnt;
3290
3291         if (cnt > MAX_TRACER_SIZE)
3292                 cnt = MAX_TRACER_SIZE;
3293
3294         if (copy_from_user(&buf, ubuf, cnt))
3295                 return -EFAULT;
3296
3297         buf[cnt] = 0;
3298
3299         /* strip ending whitespace. */
3300         for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
3301                 buf[i] = 0;
3302
3303         err = tracing_set_tracer(buf);
3304         if (err)
3305                 return err;
3306
3307         *ppos += ret;
3308
3309         return ret;
3310 }
3311
3312 static ssize_t
3313 tracing_max_lat_read(struct file *filp, char __user *ubuf,
3314                      size_t cnt, loff_t *ppos)
3315 {
3316         unsigned long *ptr = filp->private_data;
3317         char buf[64];
3318         int r;
3319
3320         r = snprintf(buf, sizeof(buf), "%ld\n",
3321                      *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
3322         if (r > sizeof(buf))
3323                 r = sizeof(buf);
3324         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3325 }
3326
3327 static ssize_t
3328 tracing_max_lat_write(struct file *filp, const char __user *ubuf,
3329                       size_t cnt, loff_t *ppos)
3330 {
3331         unsigned long *ptr = filp->private_data;
3332         unsigned long val;
3333         int ret;
3334
3335         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
3336         if (ret)
3337                 return ret;
3338
3339         *ptr = val * 1000;
3340
3341         return cnt;
3342 }
3343
3344 static int tracing_open_pipe(struct inode *inode, struct file *filp)
3345 {
3346         long cpu_file = (long) inode->i_private;
3347         struct trace_iterator *iter;
3348         int ret = 0;
3349
3350         if (tracing_disabled)
3351                 return -ENODEV;
3352
3353         mutex_lock(&trace_types_lock);
3354
3355         /* create a buffer to store the information to pass to userspace */
3356         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
3357         if (!iter) {
3358                 ret = -ENOMEM;
3359                 goto out;
3360         }
3361
3362         /*
3363          * We make a copy of the current tracer to avoid concurrent
3364          * changes on it while we are reading.
3365          */
3366         iter->trace = kmalloc(sizeof(*iter->trace), GFP_KERNEL);
3367         if (!iter->trace) {
3368                 ret = -ENOMEM;
3369                 goto fail;
3370         }
3371         if (current_trace)
3372                 *iter->trace = *current_trace;
3373
3374         if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
3375                 ret = -ENOMEM;
3376                 goto fail;
3377         }
3378
3379         /* trace pipe does not show start of buffer */
3380         cpumask_setall(iter->started);
3381
3382         if (trace_flags & TRACE_ITER_LATENCY_FMT)
3383                 iter->iter_flags |= TRACE_FILE_LAT_FMT;
3384
3385         iter->cpu_file = cpu_file;
3386         iter->tr = &global_trace;
3387         mutex_init(&iter->mutex);
3388         filp->private_data = iter;
3389
3390         if (iter->trace->pipe_open)
3391                 iter->trace->pipe_open(iter);
3392
3393         nonseekable_open(inode, filp);
3394 out:
3395         mutex_unlock(&trace_types_lock);
3396         return ret;
3397
3398 fail:
3399         kfree(iter->trace);
3400         kfree(iter);
3401         mutex_unlock(&trace_types_lock);
3402         return ret;
3403 }
3404
3405 static int tracing_release_pipe(struct inode *inode, struct file *file)
3406 {
3407         struct trace_iterator *iter = file->private_data;
3408
3409         mutex_lock(&trace_types_lock);
3410
3411         if (iter->trace->pipe_close)
3412                 iter->trace->pipe_close(iter);
3413
3414         mutex_unlock(&trace_types_lock);
3415
3416         free_cpumask_var(iter->started);
3417         mutex_destroy(&iter->mutex);
3418         kfree(iter->trace);
3419         kfree(iter);
3420
3421         return 0;
3422 }
3423
3424 static unsigned int
3425 tracing_poll_pipe(struct file *filp, poll_table *poll_table)
3426 {
3427         struct trace_iterator *iter = filp->private_data;
3428
3429         if (trace_flags & TRACE_ITER_BLOCK) {
3430                 /*
3431                  * Always select as readable when in blocking mode
3432                  */
3433                 return POLLIN | POLLRDNORM;
3434         } else {
3435                 if (!trace_empty(iter))
3436                         return POLLIN | POLLRDNORM;
3437                 poll_wait(filp, &trace_wait, poll_table);
3438                 if (!trace_empty(iter))
3439                         return POLLIN | POLLRDNORM;
3440
3441                 return 0;
3442         }
3443 }
3444
3445
3446 void default_wait_pipe(struct trace_iterator *iter)
3447 {
3448         DEFINE_WAIT(wait);
3449
3450         prepare_to_wait(&trace_wait, &wait, TASK_INTERRUPTIBLE);
3451
3452         if (trace_empty(iter))
3453                 schedule();
3454
3455         finish_wait(&trace_wait, &wait);
3456 }
3457
3458 /*
3459  * This is a make-shift waitqueue.
3460  * A tracer might use this callback on some rare cases:
3461  *
3462  *  1) the current tracer might hold the runqueue lock when it wakes up
3463  *     a reader, hence a deadlock (sched, function, and function graph tracers)
3464  *  2) the function tracers, trace all functions, we don't want
3465  *     the overhead of calling wake_up and friends
3466  *     (and tracing them too)
3467  *
3468  *     Anyway, this is really very primitive wakeup.
3469  */
3470 void poll_wait_pipe(struct trace_iterator *iter)
3471 {
3472         set_current_state(TASK_INTERRUPTIBLE);
3473         /* sleep for 100 msecs, and try again. */
3474         schedule_timeout(HZ / 10);
3475 }
3476
3477 /* Must be called with trace_types_lock mutex held. */
3478 static int tracing_wait_pipe(struct file *filp)
3479 {
3480         struct trace_iterator *iter = filp->private_data;
3481
3482         while (trace_empty(iter)) {
3483
3484                 if ((filp->f_flags & O_NONBLOCK)) {
3485                         return -EAGAIN;
3486                 }
3487
3488                 mutex_unlock(&iter->mutex);
3489
3490                 iter->trace->wait_pipe(iter);
3491
3492                 mutex_lock(&iter->mutex);
3493
3494                 if (signal_pending(current))
3495                         return -EINTR;
3496
3497                 /*
3498                  * We block until we read something and tracing is disabled.
3499                  * We still block if tracing is disabled, but we have never
3500                  * read anything. This allows a user to cat this file, and
3501                  * then enable tracing. But after we have read something,
3502                  * we give an EOF when tracing is again disabled.
3503                  *
3504                  * iter->pos will be 0 if we haven't read anything.
3505                  */
3506                 if (!tracer_enabled && iter->pos)
3507                         break;
3508         }
3509
3510         return 1;
3511 }
3512
3513 /*
3514  * Consumer reader.
3515  */
3516 static ssize_t
3517 tracing_read_pipe(struct file *filp, char __user *ubuf,
3518                   size_t cnt, loff_t *ppos)
3519 {
3520         struct trace_iterator *iter = filp->private_data;
3521         static struct tracer *old_tracer;
3522         ssize_t sret;
3523
3524         /* return any leftover data */
3525         sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
3526         if (sret != -EBUSY)
3527                 return sret;
3528
3529         trace_seq_init(&iter->seq);
3530
3531         /* copy the tracer to avoid using a global lock all around */
3532         mutex_lock(&trace_types_lock);
3533         if (unlikely(old_tracer != current_trace && current_trace)) {
3534                 old_tracer = current_trace;
3535                 *iter->trace = *current_trace;
3536         }
3537         mutex_unlock(&trace_types_lock);
3538
3539         /*
3540          * Avoid more than one consumer on a single file descriptor
3541          * This is just a matter of traces coherency, the ring buffer itself
3542          * is protected.
3543          */
3544         mutex_lock(&iter->mutex);
3545         if (iter->trace->read) {
3546                 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
3547                 if (sret)
3548                         goto out;
3549         }
3550
3551 waitagain:
3552         sret = tracing_wait_pipe(filp);
3553         if (sret <= 0)
3554                 goto out;
3555
3556         /* stop when tracing is finished */
3557         if (trace_empty(iter)) {
3558                 sret = 0;
3559                 goto out;
3560         }
3561
3562         if (cnt >= PAGE_SIZE)
3563                 cnt = PAGE_SIZE - 1;
3564
3565         /* reset all but tr, trace, and overruns */
3566         memset(&iter->seq, 0,
3567                sizeof(struct trace_iterator) -
3568                offsetof(struct trace_iterator, seq));
3569         iter->pos = -1;
3570
3571         trace_event_read_lock();
3572         trace_access_lock(iter->cpu_file);
3573         while (trace_find_next_entry_inc(iter) != NULL) {
3574                 enum print_line_t ret;
3575                 int len = iter->seq.len;
3576
3577                 ret = print_trace_line(iter);
3578                 if (ret == TRACE_TYPE_PARTIAL_LINE) {
3579                         /* don't print partial lines */
3580                         iter->seq.len = len;
3581                         break;
3582                 }
3583                 if (ret != TRACE_TYPE_NO_CONSUME)
3584                         trace_consume(iter);
3585
3586                 if (iter->seq.len >= cnt)
3587                         break;
3588
3589                 /*
3590                  * Setting the full flag means we reached the trace_seq buffer
3591                  * size and we should leave by partial output condition above.
3592                  * One of the trace_seq_* functions is not used properly.
3593                  */
3594                 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
3595                           iter->ent->type);
3596         }
3597         trace_access_unlock(iter->cpu_file);
3598         trace_event_read_unlock();
3599
3600         /* Now copy what we have to the user */
3601         sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
3602         if (iter->seq.readpos >= iter->seq.len)
3603                 trace_seq_init(&iter->seq);
3604
3605         /*
3606          * If there was nothing to send to user, in spite of consuming trace
3607          * entries, go back to wait for more entries.
3608          */
3609         if (sret == -EBUSY)
3610                 goto waitagain;
3611
3612 out:
3613         mutex_unlock(&iter->mutex);
3614
3615         return sret;
3616 }
3617
3618 static void tracing_pipe_buf_release(struct pipe_inode_info *pipe,
3619                                      struct pipe_buffer *buf)
3620 {
3621         __free_page(buf->page);
3622 }
3623
3624 static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
3625                                      unsigned int idx)
3626 {
3627         __free_page(spd->pages[idx]);
3628 }
3629
3630 static const struct pipe_buf_operations tracing_pipe_buf_ops = {
3631         .can_merge              = 0,
3632         .map                    = generic_pipe_buf_map,
3633         .unmap                  = generic_pipe_buf_unmap,
3634         .confirm                = generic_pipe_buf_confirm,
3635         .release                = tracing_pipe_buf_release,
3636         .steal                  = generic_pipe_buf_steal,
3637         .get                    = generic_pipe_buf_get,
3638 };
3639
3640 static size_t
3641 tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
3642 {
3643         size_t count;
3644         int ret;
3645
3646         /* Seq buffer is page-sized, exactly what we need. */
3647         for (;;) {
3648                 count = iter->seq.len;
3649                 ret = print_trace_line(iter);
3650                 count = iter->seq.len - count;
3651                 if (rem < count) {
3652                         rem = 0;
3653                         iter->seq.len -= count;
3654                         break;
3655                 }
3656                 if (ret == TRACE_TYPE_PARTIAL_LINE) {
3657                         iter->seq.len -= count;
3658                         break;
3659                 }
3660
3661                 if (ret != TRACE_TYPE_NO_CONSUME)
3662                         trace_consume(iter);
3663                 rem -= count;
3664                 if (!trace_find_next_entry_inc(iter))   {
3665                         rem = 0;
3666                         iter->ent = NULL;
3667                         break;
3668                 }
3669         }
3670
3671         return rem;
3672 }
3673
3674 static ssize_t tracing_splice_read_pipe(struct file *filp,
3675                                         loff_t *ppos,
3676                                         struct pipe_inode_info *pipe,
3677                                         size_t len,
3678                                         unsigned int flags)
3679 {
3680         struct page *pages_def[PIPE_DEF_BUFFERS];
3681         struct partial_page partial_def[PIPE_DEF_BUFFERS];
3682         struct trace_iterator *iter = filp->private_data;
3683         struct splice_pipe_desc spd = {
3684                 .pages          = pages_def,
3685                 .partial        = partial_def,
3686                 .nr_pages       = 0, /* This gets updated below. */
3687                 .nr_pages_max   = PIPE_DEF_BUFFERS,
3688                 .flags          = flags,
3689                 .ops            = &tracing_pipe_buf_ops,
3690                 .spd_release    = tracing_spd_release_pipe,
3691         };
3692         static struct tracer *old_tracer;
3693         ssize_t ret;
3694         size_t rem;
3695         unsigned int i;
3696
3697         if (splice_grow_spd(pipe, &spd))
3698                 return -ENOMEM;
3699
3700         /* copy the tracer to avoid using a global lock all around */
3701         mutex_lock(&trace_types_lock);
3702         if (unlikely(old_tracer != current_trace && current_trace)) {
3703                 old_tracer = current_trace;
3704                 *iter->trace = *current_trace;
3705         }
3706         mutex_unlock(&trace_types_lock);
3707
3708         mutex_lock(&iter->mutex);
3709
3710         if (iter->trace->splice_read) {
3711                 ret = iter->trace->splice_read(iter, filp,
3712                                                ppos, pipe, len, flags);
3713                 if (ret)
3714                         goto out_err;
3715         }
3716
3717         ret = tracing_wait_pipe(filp);
3718         if (ret <= 0)
3719                 goto out_err;
3720
3721         if (!iter->ent && !trace_find_next_entry_inc(iter)) {
3722                 ret = -EFAULT;
3723                 goto out_err;
3724         }
3725
3726         trace_event_read_lock();
3727         trace_access_lock(iter->cpu_file);
3728
3729         /* Fill as many pages as possible. */
3730         for (i = 0, rem = len; i < pipe->buffers && rem; i++) {
3731                 spd.pages[i] = alloc_page(GFP_KERNEL);
3732                 if (!spd.pages[i])
3733                         break;
3734
3735                 rem = tracing_fill_pipe_page(rem, iter);
3736
3737                 /* Copy the data into the page, so we can start over. */
3738                 ret = trace_seq_to_buffer(&iter->seq,
3739                                           page_address(spd.pages[i]),
3740                                           iter->seq.len);
3741                 if (ret < 0) {
3742                         __free_page(spd.pages[i]);
3743                         break;
3744                 }
3745                 spd.partial[i].offset = 0;
3746                 spd.partial[i].len = iter->seq.len;
3747
3748                 trace_seq_init(&iter->seq);
3749         }
3750
3751         trace_access_unlock(iter->cpu_file);
3752         trace_event_read_unlock();
3753         mutex_unlock(&iter->mutex);
3754
3755         spd.nr_pages = i;
3756
3757         ret = splice_to_pipe(pipe, &spd);
3758 out:
3759         splice_shrink_spd(&spd);
3760         return ret;
3761
3762 out_err:
3763         mutex_unlock(&iter->mutex);
3764         goto out;
3765 }
3766
3767 struct ftrace_entries_info {
3768         struct trace_array      *tr;
3769         int                     cpu;
3770 };
3771
3772 static int tracing_entries_open(struct inode *inode, struct file *filp)
3773 {
3774         struct ftrace_entries_info *info;
3775
3776         if (tracing_disabled)
3777                 return -ENODEV;
3778
3779         info = kzalloc(sizeof(*info), GFP_KERNEL);
3780         if (!info)
3781                 return -ENOMEM;
3782
3783         info->tr = &global_trace;
3784         info->cpu = (unsigned long)inode->i_private;
3785
3786         filp->private_data = info;
3787
3788         return 0;
3789 }
3790
3791 static ssize_t
3792 tracing_entries_read(struct file *filp, char __user *ubuf,
3793                      size_t cnt, loff_t *ppos)
3794 {
3795         struct ftrace_entries_info *info = filp->private_data;
3796         struct trace_array *tr = info->tr;
3797         char buf[64];
3798         int r = 0;
3799         ssize_t ret;
3800
3801         mutex_lock(&trace_types_lock);
3802
3803         if (info->cpu == RING_BUFFER_ALL_CPUS) {
3804                 int cpu, buf_size_same;
3805                 unsigned long size;
3806
3807                 size = 0;
3808                 buf_size_same = 1;
3809                 /* check if all cpu sizes are same */
3810                 for_each_tracing_cpu(cpu) {
3811                         /* fill in the size from first enabled cpu */
3812                         if (size == 0)
3813                                 size = tr->data[cpu]->entries;
3814                         if (size != tr->data[cpu]->entries) {
3815                                 buf_size_same = 0;
3816                                 break;
3817                         }
3818                 }
3819
3820                 if (buf_size_same) {
3821                         if (!ring_buffer_expanded)
3822                                 r = sprintf(buf, "%lu (expanded: %lu)\n",
3823                                             size >> 10,
3824                                             trace_buf_size >> 10);
3825                         else
3826                                 r = sprintf(buf, "%lu\n", size >> 10);
3827                 } else
3828                         r = sprintf(buf, "X\n");
3829         } else
3830                 r = sprintf(buf, "%lu\n", tr->data[info->cpu]->entries >> 10);
3831
3832         mutex_unlock(&trace_types_lock);
3833
3834         ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3835         return ret;
3836 }
3837
3838 static ssize_t
3839 tracing_entries_write(struct file *filp, const char __user *ubuf,
3840                       size_t cnt, loff_t *ppos)
3841 {
3842         struct ftrace_entries_info *info = filp->private_data;
3843         unsigned long val;
3844         int ret;
3845
3846         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
3847         if (ret)
3848                 return ret;
3849
3850         /* must have at least 1 entry */
3851         if (!val)
3852                 return -EINVAL;
3853
3854         /* value is in KB */
3855         val <<= 10;
3856
3857         ret = tracing_resize_ring_buffer(val, info->cpu);
3858         if (ret < 0)
3859                 return ret;
3860
3861         *ppos += cnt;
3862
3863         return cnt;
3864 }
3865
3866 static int
3867 tracing_entries_release(struct inode *inode, struct file *filp)
3868 {
3869         struct ftrace_entries_info *info = filp->private_data;
3870
3871         kfree(info);
3872
3873         return 0;
3874 }
3875
3876 static ssize_t
3877 tracing_total_entries_read(struct file *filp, char __user *ubuf,
3878                                 size_t cnt, loff_t *ppos)
3879 {
3880         struct trace_array *tr = filp->private_data;
3881         char buf[64];
3882         int r, cpu;
3883         unsigned long size = 0, expanded_size = 0;
3884
3885         mutex_lock(&trace_types_lock);
3886         for_each_tracing_cpu(cpu) {
3887                 size += tr->data[cpu]->entries >> 10;
3888                 if (!ring_buffer_expanded)
3889                         expanded_size += trace_buf_size >> 10;
3890         }
3891         if (ring_buffer_expanded)
3892                 r = sprintf(buf, "%lu\n", size);
3893         else
3894                 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
3895         mutex_unlock(&trace_types_lock);
3896
3897         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3898 }
3899
3900 static ssize_t
3901 tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
3902                           size_t cnt, loff_t *ppos)
3903 {
3904         /*
3905          * There is no need to read what the user has written, this function
3906          * is just to make sure that there is no error when "echo" is used
3907          */
3908
3909         *ppos += cnt;
3910
3911         return cnt;
3912 }
3913
3914 static int
3915 tracing_free_buffer_release(struct inode *inode, struct file *filp)
3916 {
3917         /* disable tracing ? */
3918         if (trace_flags & TRACE_ITER_STOP_ON_FREE)
3919                 tracing_off();
3920         /* resize the ring buffer to 0 */
3921         tracing_resize_ring_buffer(0, RING_BUFFER_ALL_CPUS);
3922
3923         return 0;
3924 }
3925
3926 static ssize_t
3927 tracing_mark_write(struct file *filp, const char __user *ubuf,
3928                                         size_t cnt, loff_t *fpos)
3929 {
3930         unsigned long addr = (unsigned long)ubuf;
3931         struct ring_buffer_event *event;
3932         struct ring_buffer *buffer;
3933         struct print_entry *entry;
3934         unsigned long irq_flags;
3935         struct page *pages[2];
3936         void *map_page[2];
3937         int nr_pages = 1;
3938         ssize_t written;
3939         int offset;
3940         int size;
3941         int len;
3942         int ret;
3943         int i;
3944
3945         if (tracing_disabled)
3946                 return -EINVAL;
3947
3948         if (!(trace_flags & TRACE_ITER_MARKERS))
3949                 return -EINVAL;
3950
3951         if (cnt > TRACE_BUF_SIZE)
3952                 cnt = TRACE_BUF_SIZE;
3953
3954         /*
3955          * Userspace is injecting traces into the kernel trace buffer.
3956          * We want to be as non intrusive as possible.
3957          * To do so, we do not want to allocate any special buffers
3958          * or take any locks, but instead write the userspace data
3959          * straight into the ring buffer.
3960          *
3961          * First we need to pin the userspace buffer into memory,
3962          * which, most likely it is, because it just referenced it.
3963          * But there's no guarantee that it is. By using get_user_pages_fast()
3964          * and kmap_atomic/kunmap_atomic() we can get access to the
3965          * pages directly. We then write the data directly into the
3966          * ring buffer.
3967          */
3968         BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
3969
3970         /* check if we cross pages */
3971         if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
3972                 nr_pages = 2;
3973
3974         offset = addr & (PAGE_SIZE - 1);
3975         addr &= PAGE_MASK;
3976
3977         ret = get_user_pages_fast(addr, nr_pages, 0, pages);
3978         if (ret < nr_pages) {
3979                 while (--ret >= 0)
3980                         put_page(pages[ret]);
3981                 written = -EFAULT;
3982                 goto out;
3983         }
3984
3985         for (i = 0; i < nr_pages; i++)
3986                 map_page[i] = kmap_atomic(pages[i]);
3987
3988         local_save_flags(irq_flags);
3989         size = sizeof(*entry) + cnt + 2; /* possible \n added */
3990         buffer = global_trace.buffer;
3991         event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
3992                                           irq_flags, preempt_count());
3993         if (!event) {
3994                 /* Ring buffer disabled, return as if not open for write */
3995                 written = -EBADF;
3996                 goto out_unlock;
3997         }
3998
3999         entry = ring_buffer_event_data(event);
4000         entry->ip = _THIS_IP_;
4001
4002         if (nr_pages == 2) {
4003                 len = PAGE_SIZE - offset;
4004                 memcpy(&entry->buf, map_page[0] + offset, len);
4005                 memcpy(&entry->buf[len], map_page[1], cnt - len);
4006         } else
4007                 memcpy(&entry->buf, map_page[0] + offset, cnt);
4008
4009         if (entry->buf[cnt - 1] != '\n') {
4010                 entry->buf[cnt] = '\n';
4011                 entry->buf[cnt + 1] = '\0';
4012         } else
4013                 entry->buf[cnt] = '\0';
4014
4015         __buffer_unlock_commit(buffer, event);
4016
4017         written = cnt;
4018
4019         *fpos += written;
4020
4021  out_unlock:
4022         for (i = 0; i < nr_pages; i++){
4023                 kunmap_atomic(map_page[i]);
4024                 put_page(pages[i]);
4025         }
4026  out:
4027         return written;
4028 }
4029
4030 static int tracing_clock_show(struct seq_file *m, void *v)
4031 {
4032         int i;
4033
4034         for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
4035                 seq_printf(m,
4036                         "%s%s%s%s", i ? " " : "",
4037                         i == trace_clock_id ? "[" : "", trace_clocks[i].name,
4038                         i == trace_clock_id ? "]" : "");
4039         seq_putc(m, '\n');
4040
4041         return 0;
4042 }
4043
4044 static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
4045                                    size_t cnt, loff_t *fpos)
4046 {
4047         char buf[64];
4048         const char *clockstr;
4049         int i;
4050
4051         if (cnt >= sizeof(buf))
4052                 return -EINVAL;
4053
4054         if (copy_from_user(&buf, ubuf, cnt))
4055                 return -EFAULT;
4056
4057         buf[cnt] = 0;
4058
4059         clockstr = strstrip(buf);
4060
4061         for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
4062                 if (strcmp(trace_clocks[i].name, clockstr) == 0)
4063                         break;
4064         }
4065         if (i == ARRAY_SIZE(trace_clocks))
4066                 return -EINVAL;
4067
4068         trace_clock_id = i;
4069
4070         mutex_lock(&trace_types_lock);
4071
4072         ring_buffer_set_clock(global_trace.buffer, trace_clocks[i].func);
4073         if (max_tr.buffer)
4074                 ring_buffer_set_clock(max_tr.buffer, trace_clocks[i].func);
4075
4076         mutex_unlock(&trace_types_lock);
4077
4078         *fpos += cnt;
4079
4080         return cnt;
4081 }
4082
4083 static int tracing_clock_open(struct inode *inode, struct file *file)
4084 {
4085         if (tracing_disabled)
4086                 return -ENODEV;
4087         return single_open(file, tracing_clock_show, NULL);
4088 }
4089
4090 static const struct file_operations tracing_max_lat_fops = {
4091         .open           = tracing_open_generic,
4092         .read           = tracing_max_lat_read,
4093         .write          = tracing_max_lat_write,
4094         .llseek         = generic_file_llseek,
4095 };
4096
4097 static const struct file_operations tracing_ctrl_fops = {
4098         .open           = tracing_open_generic,
4099         .read           = tracing_ctrl_read,
4100         .write          = tracing_ctrl_write,
4101         .llseek         = generic_file_llseek,
4102 };
4103
4104 static const struct file_operations set_tracer_fops = {
4105         .open           = tracing_open_generic,
4106         .read           = tracing_set_trace_read,
4107         .write          = tracing_set_trace_write,
4108         .llseek         = generic_file_llseek,
4109 };
4110
4111 static const struct file_operations tracing_pipe_fops = {
4112         .open           = tracing_open_pipe,
4113         .poll           = tracing_poll_pipe,
4114         .read           = tracing_read_pipe,
4115         .splice_read    = tracing_splice_read_pipe,
4116         .release        = tracing_release_pipe,
4117         .llseek         = no_llseek,
4118 };
4119
4120 static const struct file_operations tracing_entries_fops = {
4121         .open           = tracing_entries_open,
4122         .read           = tracing_entries_read,
4123         .write          = tracing_entries_write,
4124         .release        = tracing_entries_release,
4125         .llseek         = generic_file_llseek,
4126 };
4127
4128 static const struct file_operations tracing_total_entries_fops = {
4129         .open           = tracing_open_generic,
4130         .read           = tracing_total_entries_read,
4131         .llseek         = generic_file_llseek,
4132 };
4133
4134 static const struct file_operations tracing_free_buffer_fops = {
4135         .write          = tracing_free_buffer_write,
4136         .release        = tracing_free_buffer_release,
4137 };
4138
4139 static const struct file_operations tracing_mark_fops = {
4140         .open           = tracing_open_generic,
4141         .write          = tracing_mark_write,
4142         .llseek         = generic_file_llseek,
4143 };
4144
4145 static const struct file_operations trace_clock_fops = {
4146         .open           = tracing_clock_open,
4147         .read           = seq_read,
4148         .llseek         = seq_lseek,
4149         .release        = single_release,
4150         .write          = tracing_clock_write,
4151 };
4152
4153 struct ftrace_buffer_info {
4154         struct trace_array      *tr;
4155         void                    *spare;
4156         int                     cpu;
4157         unsigned int            read;
4158 };
4159
4160 static int tracing_buffers_open(struct inode *inode, struct file *filp)
4161 {
4162         int cpu = (int)(long)inode->i_private;
4163         struct ftrace_buffer_info *info;
4164
4165         if (tracing_disabled)
4166                 return -ENODEV;
4167
4168         info = kzalloc(sizeof(*info), GFP_KERNEL);
4169         if (!info)
4170                 return -ENOMEM;
4171
4172         info->tr        = &global_trace;
4173         info->cpu       = cpu;
4174         info->spare     = NULL;
4175         /* Force reading ring buffer for first read */
4176         info->read      = (unsigned int)-1;
4177
4178         filp->private_data = info;
4179
4180         return nonseekable_open(inode, filp);
4181 }
4182
4183 static ssize_t
4184 tracing_buffers_read(struct file *filp, char __user *ubuf,
4185                      size_t count, loff_t *ppos)
4186 {
4187         struct ftrace_buffer_info *info = filp->private_data;
4188         ssize_t ret;
4189         size_t size;
4190
4191         if (!count)
4192                 return 0;
4193
4194         if (!info->spare)
4195                 info->spare = ring_buffer_alloc_read_page(info->tr->buffer, info->cpu);
4196         if (!info->spare)
4197                 return -ENOMEM;
4198
4199         /* Do we have previous read data to read? */
4200         if (info->read < PAGE_SIZE)
4201                 goto read;
4202
4203         trace_access_lock(info->cpu);
4204         ret = ring_buffer_read_page(info->tr->buffer,
4205                                     &info->spare,
4206                                     count,
4207                                     info->cpu, 0);
4208         trace_access_unlock(info->cpu);
4209         if (ret < 0)
4210                 return 0;
4211
4212         info->read = 0;
4213
4214 read:
4215         size = PAGE_SIZE - info->read;
4216         if (size > count)
4217                 size = count;
4218
4219         ret = copy_to_user(ubuf, info->spare + info->read, size);
4220         if (ret == size)
4221                 return -EFAULT;
4222         size -= ret;
4223
4224         *ppos += size;
4225         info->read += size;
4226
4227         return size;
4228 }
4229
4230 static int tracing_buffers_release(struct inode *inode, struct file *file)
4231 {
4232         struct ftrace_buffer_info *info = file->private_data;
4233
4234         if (info->spare)
4235                 ring_buffer_free_read_page(info->tr->buffer, info->spare);
4236         kfree(info);
4237
4238         return 0;
4239 }
4240
4241 struct buffer_ref {
4242         struct ring_buffer      *buffer;
4243         void                    *page;
4244         int                     ref;
4245 };
4246
4247 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
4248                                     struct pipe_buffer *buf)
4249 {
4250         struct buffer_ref *ref = (struct buffer_ref *)buf->private;
4251
4252         if (--ref->ref)
4253                 return;
4254
4255         ring_buffer_free_read_page(ref->buffer, ref->page);
4256         kfree(ref);
4257         buf->private = 0;
4258 }
4259
4260 static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
4261                                 struct pipe_buffer *buf)
4262 {
4263         struct buffer_ref *ref = (struct buffer_ref *)buf->private;
4264
4265         ref->ref++;
4266 }
4267
4268 /* Pipe buffer operations for a buffer. */
4269 static const struct pipe_buf_operations buffer_pipe_buf_ops = {
4270         .can_merge              = 0,
4271         .map                    = generic_pipe_buf_map,
4272         .unmap                  = generic_pipe_buf_unmap,
4273         .confirm                = generic_pipe_buf_confirm,
4274         .release                = buffer_pipe_buf_release,
4275         .steal                  = generic_pipe_buf_steal,
4276         .get                    = buffer_pipe_buf_get,
4277 };
4278
4279 /*
4280  * Callback from splice_to_pipe(), if we need to release some pages
4281  * at the end of the spd in case we error'ed out in filling the pipe.
4282  */
4283 static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
4284 {
4285         struct buffer_ref *ref =
4286                 (struct buffer_ref *)spd->partial[i].private;
4287
4288         if (--ref->ref)
4289                 return;
4290
4291         ring_buffer_free_read_page(ref->buffer, ref->page);
4292         kfree(ref);
4293         spd->partial[i].private = 0;
4294 }
4295
4296 static ssize_t
4297 tracing_buffers_splice_read(struct file *file, loff_t *ppos,
4298                             struct pipe_inode_info *pipe, size_t len,
4299                             unsigned int flags)
4300 {
4301         struct ftrace_buffer_info *info = file->private_data;
4302         struct partial_page partial_def[PIPE_DEF_BUFFERS];
4303         struct page *pages_def[PIPE_DEF_BUFFERS];
4304         struct splice_pipe_desc spd = {
4305                 .pages          = pages_def,
4306                 .partial        = partial_def,
4307                 .nr_pages_max   = PIPE_DEF_BUFFERS,
4308                 .flags          = flags,
4309                 .ops            = &buffer_pipe_buf_ops,
4310                 .spd_release    = buffer_spd_release,
4311         };
4312         struct buffer_ref *ref;
4313         int entries, size, i;
4314         size_t ret;
4315
4316         if (splice_grow_spd(pipe, &spd))
4317                 return -ENOMEM;
4318
4319         if (*ppos & (PAGE_SIZE - 1)) {
4320                 WARN_ONCE(1, "Ftrace: previous read must page-align\n");
4321                 ret = -EINVAL;
4322                 goto out;
4323         }
4324
4325         if (len & (PAGE_SIZE - 1)) {
4326                 WARN_ONCE(1, "Ftrace: splice_read should page-align\n");
4327                 if (len < PAGE_SIZE) {
4328                         ret = -EINVAL;
4329                         goto out;
4330                 }
4331                 len &= PAGE_MASK;
4332         }
4333
4334         trace_access_lock(info->cpu);
4335         entries = ring_buffer_entries_cpu(info->tr->buffer, info->cpu);
4336
4337         for (i = 0; i < pipe->buffers && len && entries; i++, len -= PAGE_SIZE) {
4338                 struct page *page;
4339                 int r;
4340
4341                 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
4342                 if (!ref)
4343                         break;
4344
4345                 ref->ref = 1;
4346                 ref->buffer = info->tr->buffer;
4347                 ref->page = ring_buffer_alloc_read_page(ref->buffer, info->cpu);
4348                 if (!ref->page) {
4349                         kfree(ref);
4350                         break;
4351                 }
4352
4353                 r = ring_buffer_read_page(ref->buffer, &ref->page,
4354                                           len, info->cpu, 1);
4355                 if (r < 0) {
4356                         ring_buffer_free_read_page(ref->buffer, ref->page);
4357                         kfree(ref);
4358                         break;
4359                 }
4360
4361                 /*
4362                  * zero out any left over data, this is going to
4363                  * user land.
4364                  */
4365                 size = ring_buffer_page_len(ref->page);
4366                 if (size < PAGE_SIZE)
4367                         memset(ref->page + size, 0, PAGE_SIZE - size);
4368
4369                 page = virt_to_page(ref->page);
4370
4371                 spd.pages[i] = page;
4372                 spd.partial[i].len = PAGE_SIZE;
4373                 spd.partial[i].offset = 0;
4374                 spd.partial[i].private = (unsigned long)ref;
4375                 spd.nr_pages++;
4376                 *ppos += PAGE_SIZE;
4377
4378                 entries = ring_buffer_entries_cpu(info->tr->buffer, info->cpu);
4379         }
4380
4381         trace_access_unlock(info->cpu);
4382         spd.nr_pages = i;
4383
4384         /* did we read anything? */
4385         if (!spd.nr_pages) {
4386                 if (flags & SPLICE_F_NONBLOCK)
4387                         ret = -EAGAIN;
4388                 else
4389                         ret = 0;
4390                 /* TODO: block */
4391                 goto out;
4392         }
4393
4394         ret = splice_to_pipe(pipe, &spd);
4395         splice_shrink_spd(&spd);
4396 out:
4397         return ret;
4398 }
4399
4400 static const struct file_operations tracing_buffers_fops = {
4401         .open           = tracing_buffers_open,
4402         .read           = tracing_buffers_read,
4403         .release        = tracing_buffers_release,
4404         .splice_read    = tracing_buffers_splice_read,
4405         .llseek         = no_llseek,
4406 };
4407
4408 static ssize_t
4409 tracing_stats_read(struct file *filp, char __user *ubuf,
4410                    size_t count, loff_t *ppos)
4411 {
4412         unsigned long cpu = (unsigned long)filp->private_data;
4413         struct trace_array *tr = &global_trace;
4414         struct trace_seq *s;
4415         unsigned long cnt;
4416         unsigned long long t;
4417         unsigned long usec_rem;
4418
4419         s = kmalloc(sizeof(*s), GFP_KERNEL);
4420         if (!s)
4421                 return -ENOMEM;
4422
4423         trace_seq_init(s);
4424
4425         cnt = ring_buffer_entries_cpu(tr->buffer, cpu);
4426         trace_seq_printf(s, "entries: %ld\n", cnt);
4427
4428         cnt = ring_buffer_overrun_cpu(tr->buffer, cpu);
4429         trace_seq_printf(s, "overrun: %ld\n", cnt);
4430
4431         cnt = ring_buffer_commit_overrun_cpu(tr->buffer, cpu);
4432         trace_seq_printf(s, "commit overrun: %ld\n", cnt);
4433
4434         cnt = ring_buffer_bytes_cpu(tr->buffer, cpu);
4435         trace_seq_printf(s, "bytes: %ld\n", cnt);
4436
4437         t = ns2usecs(ring_buffer_oldest_event_ts(tr->buffer, cpu));
4438         usec_rem = do_div(t, USEC_PER_SEC);
4439         trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n", t, usec_rem);
4440
4441         t = ns2usecs(ring_buffer_time_stamp(tr->buffer, cpu));
4442         usec_rem = do_div(t, USEC_PER_SEC);
4443         trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
4444
4445         cnt = ring_buffer_dropped_events_cpu(tr->buffer, cpu);
4446         trace_seq_printf(s, "dropped events: %ld\n", cnt);
4447
4448         count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len);
4449
4450         kfree(s);
4451
4452         return count;
4453 }
4454
4455 static const struct file_operations tracing_stats_fops = {
4456         .open           = tracing_open_generic,
4457         .read           = tracing_stats_read,
4458         .llseek         = generic_file_llseek,
4459 };
4460
4461 #ifdef CONFIG_DYNAMIC_FTRACE
4462
4463 int __weak ftrace_arch_read_dyn_info(char *buf, int size)
4464 {
4465         return 0;
4466 }
4467
4468 static ssize_t
4469 tracing_read_dyn_info(struct file *filp, char __user *ubuf,
4470                   size_t cnt, loff_t *ppos)
4471 {
4472         static char ftrace_dyn_info_buffer[1024];
4473         static DEFINE_MUTEX(dyn_info_mutex);
4474         unsigned long *p = filp->private_data;
4475         char *buf = ftrace_dyn_info_buffer;
4476         int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
4477         int r;
4478
4479         mutex_lock(&dyn_info_mutex);
4480         r = sprintf(buf, "%ld ", *p);
4481
4482         r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
4483         buf[r++] = '\n';
4484
4485         r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4486
4487         mutex_unlock(&dyn_info_mutex);
4488
4489         return r;
4490 }
4491
4492 static const struct file_operations tracing_dyn_info_fops = {
4493         .open           = tracing_open_generic,
4494         .read           = tracing_read_dyn_info,
4495         .llseek         = generic_file_llseek,
4496 };
4497 #endif
4498
4499 static struct dentry *d_tracer;
4500
4501 struct dentry *tracing_init_dentry(void)
4502 {
4503         static int once;
4504
4505         if (d_tracer)
4506                 return d_tracer;
4507
4508         if (!debugfs_initialized())
4509                 return NULL;
4510
4511         d_tracer = debugfs_create_dir("tracing", NULL);
4512
4513         if (!d_tracer && !once) {
4514                 once = 1;
4515                 pr_warning("Could not create debugfs directory 'tracing'\n");
4516                 return NULL;
4517         }
4518
4519         return d_tracer;
4520 }
4521
4522 static struct dentry *d_percpu;
4523
4524 struct dentry *tracing_dentry_percpu(void)
4525 {
4526         static int once;
4527         struct dentry *d_tracer;
4528
4529         if (d_percpu)
4530                 return d_percpu;
4531
4532         d_tracer = tracing_init_dentry();
4533
4534         if (!d_tracer)
4535                 return NULL;
4536
4537         d_percpu = debugfs_create_dir("per_cpu", d_tracer);
4538
4539         if (!d_percpu && !once) {
4540                 once = 1;
4541                 pr_warning("Could not create debugfs directory 'per_cpu'\n");
4542                 return NULL;
4543         }
4544
4545         return d_percpu;
4546 }
4547
4548 static void tracing_init_debugfs_percpu(long cpu)
4549 {
4550         struct dentry *d_percpu = tracing_dentry_percpu();
4551         struct dentry *d_cpu;
4552         char cpu_dir[30]; /* 30 characters should be more than enough */
4553
4554         if (!d_percpu)
4555                 return;
4556
4557         snprintf(cpu_dir, 30, "cpu%ld", cpu);
4558         d_cpu = debugfs_create_dir(cpu_dir, d_percpu);
4559         if (!d_cpu) {
4560                 pr_warning("Could not create debugfs '%s' entry\n", cpu_dir);
4561                 return;
4562         }
4563
4564         /* per cpu trace_pipe */
4565         trace_create_file("trace_pipe", 0444, d_cpu,
4566                         (void *) cpu, &tracing_pipe_fops);
4567
4568         /* per cpu trace */
4569         trace_create_file("trace", 0644, d_cpu,
4570                         (void *) cpu, &tracing_fops);
4571
4572         trace_create_file("trace_pipe_raw", 0444, d_cpu,
4573                         (void *) cpu, &tracing_buffers_fops);
4574
4575         trace_create_file("stats", 0444, d_cpu,
4576                         (void *) cpu, &tracing_stats_fops);
4577
4578         trace_create_file("buffer_size_kb", 0444, d_cpu,
4579                         (void *) cpu, &tracing_entries_fops);
4580 }
4581
4582 #ifdef CONFIG_FTRACE_SELFTEST
4583 /* Let selftest have access to static functions in this file */
4584 #include "trace_selftest.c"
4585 #endif
4586
4587 struct trace_option_dentry {
4588         struct tracer_opt               *opt;
4589         struct tracer_flags             *flags;
4590         struct dentry                   *entry;
4591 };
4592
4593 static ssize_t
4594 trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
4595                         loff_t *ppos)
4596 {
4597         struct trace_option_dentry *topt = filp->private_data;
4598         char *buf;
4599
4600         if (topt->flags->val & topt->opt->bit)
4601                 buf = "1\n";
4602         else
4603                 buf = "0\n";
4604
4605         return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
4606 }
4607
4608 static ssize_t
4609 trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
4610                          loff_t *ppos)
4611 {
4612         struct trace_option_dentry *topt = filp->private_data;
4613         unsigned long val;
4614         int ret;
4615
4616         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4617         if (ret)
4618                 return ret;
4619
4620         if (val != 0 && val != 1)
4621                 return -EINVAL;
4622
4623         if (!!(topt->flags->val & topt->opt->bit) != val) {
4624                 mutex_lock(&trace_types_lock);
4625                 ret = __set_tracer_option(current_trace, topt->flags,
4626                                           topt->opt, !val);
4627                 mutex_unlock(&trace_types_lock);
4628                 if (ret)
4629                         return ret;
4630         }
4631
4632         *ppos += cnt;
4633
4634         return cnt;
4635 }
4636
4637
4638 static const struct file_operations trace_options_fops = {
4639         .open = tracing_open_generic,
4640         .read = trace_options_read,
4641         .write = trace_options_write,
4642         .llseek = generic_file_llseek,
4643 };
4644
4645 static ssize_t
4646 trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
4647                         loff_t *ppos)
4648 {
4649         long index = (long)filp->private_data;
4650         char *buf;
4651
4652         if (trace_flags & (1 << index))
4653                 buf = "1\n";
4654         else
4655                 buf = "0\n";
4656
4657         return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
4658 }
4659
4660 static ssize_t
4661 trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
4662                          loff_t *ppos)
4663 {
4664         long index = (long)filp->private_data;
4665         unsigned long val;
4666         int ret;
4667
4668         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4669         if (ret)
4670                 return ret;
4671
4672         if (val != 0 && val != 1)
4673                 return -EINVAL;
4674         set_tracer_flags(1 << index, val);
4675
4676         *ppos += cnt;
4677
4678         return cnt;
4679 }
4680
4681 static const struct file_operations trace_options_core_fops = {
4682         .open = tracing_open_generic,
4683         .read = trace_options_core_read,
4684         .write = trace_options_core_write,
4685         .llseek = generic_file_llseek,
4686 };
4687
4688 struct dentry *trace_create_file(const char *name,
4689                                  umode_t mode,
4690                                  struct dentry *parent,
4691                                  void *data,
4692                                  const struct file_operations *fops)
4693 {
4694         struct dentry *ret;
4695
4696         ret = debugfs_create_file(name, mode, parent, data, fops);
4697         if (!ret)
4698                 pr_warning("Could not create debugfs '%s' entry\n", name);
4699
4700         return ret;
4701 }
4702
4703
4704 static struct dentry *trace_options_init_dentry(void)
4705 {
4706         struct dentry *d_tracer;
4707         static struct dentry *t_options;
4708
4709         if (t_options)
4710                 return t_options;
4711
4712         d_tracer = tracing_init_dentry();
4713         if (!d_tracer)
4714                 return NULL;
4715
4716         t_options = debugfs_create_dir("options", d_tracer);
4717         if (!t_options) {
4718                 pr_warning("Could not create debugfs directory 'options'\n");
4719                 return NULL;
4720         }
4721
4722         return t_options;
4723 }
4724
4725 static void
4726 create_trace_option_file(struct trace_option_dentry *topt,
4727                          struct tracer_flags *flags,
4728                          struct tracer_opt *opt)
4729 {
4730         struct dentry *t_options;
4731
4732         t_options = trace_options_init_dentry();
4733         if (!t_options)
4734                 return;
4735
4736         topt->flags = flags;
4737         topt->opt = opt;
4738
4739         topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
4740                                     &trace_options_fops);
4741
4742 }
4743
4744 static struct trace_option_dentry *
4745 create_trace_option_files(struct tracer *tracer)
4746 {
4747         struct trace_option_dentry *topts;
4748         struct tracer_flags *flags;
4749         struct tracer_opt *opts;
4750         int cnt;
4751
4752         if (!tracer)
4753                 return NULL;
4754
4755         flags = tracer->flags;
4756
4757         if (!flags || !flags->opts)
4758                 return NULL;
4759
4760         opts = flags->opts;
4761
4762         for (cnt = 0; opts[cnt].name; cnt++)
4763                 ;
4764
4765         topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
4766         if (!topts)
4767                 return NULL;
4768
4769         for (cnt = 0; opts[cnt].name; cnt++)
4770                 create_trace_option_file(&topts[cnt], flags,
4771                                          &opts[cnt]);
4772
4773         return topts;
4774 }
4775
4776 static void
4777 destroy_trace_option_files(struct trace_option_dentry *topts)
4778 {
4779         int cnt;
4780
4781         if (!topts)
4782                 return;
4783
4784         for (cnt = 0; topts[cnt].opt; cnt++) {
4785                 if (topts[cnt].entry)
4786                         debugfs_remove(topts[cnt].entry);
4787         }
4788
4789         kfree(topts);
4790 }
4791
4792 static struct dentry *
4793 create_trace_option_core_file(const char *option, long index)
4794 {
4795         struct dentry *t_options;
4796
4797         t_options = trace_options_init_dentry();
4798         if (!t_options)
4799                 return NULL;
4800
4801         return trace_create_file(option, 0644, t_options, (void *)index,
4802                                     &trace_options_core_fops);
4803 }
4804
4805 static __init void create_trace_options_dir(void)
4806 {
4807         struct dentry *t_options;
4808         int i;
4809
4810         t_options = trace_options_init_dentry();
4811         if (!t_options)
4812                 return;
4813
4814         for (i = 0; trace_options[i]; i++)
4815                 create_trace_option_core_file(trace_options[i], i);
4816 }
4817
4818 static ssize_t
4819 rb_simple_read(struct file *filp, char __user *ubuf,
4820                size_t cnt, loff_t *ppos)
4821 {
4822         struct trace_array *tr = filp->private_data;
4823         struct ring_buffer *buffer = tr->buffer;
4824         char buf[64];
4825         int r;
4826
4827         if (buffer)
4828                 r = ring_buffer_record_is_on(buffer);
4829         else
4830                 r = 0;
4831
4832         r = sprintf(buf, "%d\n", r);
4833
4834         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4835 }
4836
4837 static ssize_t
4838 rb_simple_write(struct file *filp, const char __user *ubuf,
4839                 size_t cnt, loff_t *ppos)
4840 {
4841         struct trace_array *tr = filp->private_data;
4842         struct ring_buffer *buffer = tr->buffer;
4843         unsigned long val;
4844         int ret;
4845
4846         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4847         if (ret)
4848                 return ret;
4849
4850         if (buffer) {
4851                 if (val)
4852                         ring_buffer_record_on(buffer);
4853                 else
4854                         ring_buffer_record_off(buffer);
4855         }
4856
4857         (*ppos)++;
4858
4859         return cnt;
4860 }
4861
4862 static const struct file_operations rb_simple_fops = {
4863         .open           = tracing_open_generic,
4864         .read           = rb_simple_read,
4865         .write          = rb_simple_write,
4866         .llseek         = default_llseek,
4867 };
4868
4869 static __init int tracer_init_debugfs(void)
4870 {
4871         struct dentry *d_tracer;
4872         int cpu;
4873
4874         trace_access_lock_init();
4875
4876         d_tracer = tracing_init_dentry();
4877
4878         trace_create_file("tracing_enabled", 0644, d_tracer,
4879                         &global_trace, &tracing_ctrl_fops);
4880
4881         trace_create_file("trace_options", 0644, d_tracer,
4882                         NULL, &tracing_iter_fops);
4883
4884         trace_create_file("tracing_cpumask", 0644, d_tracer,
4885                         NULL, &tracing_cpumask_fops);
4886
4887         trace_create_file("trace", 0644, d_tracer,
4888                         (void *) TRACE_PIPE_ALL_CPU, &tracing_fops);
4889
4890         trace_create_file("available_tracers", 0444, d_tracer,
4891                         &global_trace, &show_traces_fops);
4892
4893         trace_create_file("current_tracer", 0644, d_tracer,
4894                         &global_trace, &set_tracer_fops);
4895
4896 #ifdef CONFIG_TRACER_MAX_TRACE
4897         trace_create_file("tracing_max_latency", 0644, d_tracer,
4898                         &tracing_max_latency, &tracing_max_lat_fops);
4899 #endif
4900
4901         trace_create_file("tracing_thresh", 0644, d_tracer,
4902                         &tracing_thresh, &tracing_max_lat_fops);
4903
4904         trace_create_file("README", 0444, d_tracer,
4905                         NULL, &tracing_readme_fops);
4906
4907         trace_create_file("trace_pipe", 0444, d_tracer,
4908                         (void *) TRACE_PIPE_ALL_CPU, &tracing_pipe_fops);
4909
4910         trace_create_file("buffer_size_kb", 0644, d_tracer,
4911                         (void *) RING_BUFFER_ALL_CPUS, &tracing_entries_fops);
4912
4913         trace_create_file("buffer_total_size_kb", 0444, d_tracer,
4914                         &global_trace, &tracing_total_entries_fops);
4915
4916         trace_create_file("free_buffer", 0644, d_tracer,
4917                         &global_trace, &tracing_free_buffer_fops);
4918
4919         trace_create_file("trace_marker", 0220, d_tracer,
4920                         NULL, &tracing_mark_fops);
4921
4922         trace_create_file("saved_cmdlines", 0444, d_tracer,
4923                         NULL, &tracing_saved_cmdlines_fops);
4924
4925         trace_create_file("trace_clock", 0644, d_tracer, NULL,
4926                           &trace_clock_fops);
4927
4928         trace_create_file("tracing_on", 0644, d_tracer,
4929                             &global_trace, &rb_simple_fops);
4930
4931 #ifdef CONFIG_DYNAMIC_FTRACE
4932         trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
4933                         &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
4934 #endif
4935
4936         create_trace_options_dir();
4937
4938         for_each_tracing_cpu(cpu)
4939                 tracing_init_debugfs_percpu(cpu);
4940
4941         return 0;
4942 }
4943
4944 static int trace_panic_handler(struct notifier_block *this,
4945                                unsigned long event, void *unused)
4946 {
4947         if (ftrace_dump_on_oops)
4948                 ftrace_dump(ftrace_dump_on_oops);
4949         return NOTIFY_OK;
4950 }
4951
4952 static struct notifier_block trace_panic_notifier = {
4953         .notifier_call  = trace_panic_handler,
4954         .next           = NULL,
4955         .priority       = 150   /* priority: INT_MAX >= x >= 0 */
4956 };
4957
4958 static int trace_die_handler(struct notifier_block *self,
4959                              unsigned long val,
4960                              void *data)
4961 {
4962         switch (val) {
4963         case DIE_OOPS:
4964                 if (ftrace_dump_on_oops)
4965                         ftrace_dump(ftrace_dump_on_oops);
4966                 break;
4967         default:
4968                 break;
4969         }
4970         return NOTIFY_OK;
4971 }
4972
4973 static struct notifier_block trace_die_notifier = {
4974         .notifier_call = trace_die_handler,
4975         .priority = 200
4976 };
4977
4978 /*
4979  * printk is set to max of 1024, we really don't need it that big.
4980  * Nothing should be printing 1000 characters anyway.
4981  */
4982 #define TRACE_MAX_PRINT         1000
4983
4984 /*
4985  * Define here KERN_TRACE so that we have one place to modify
4986  * it if we decide to change what log level the ftrace dump
4987  * should be at.
4988  */
4989 #define KERN_TRACE              KERN_EMERG
4990
4991 void
4992 trace_printk_seq(struct trace_seq *s)
4993 {
4994         /* Probably should print a warning here. */
4995         if (s->len >= 1000)
4996                 s->len = 1000;
4997
4998         /* should be zero ended, but we are paranoid. */
4999         s->buffer[s->len] = 0;
5000
5001         printk(KERN_TRACE "%s", s->buffer);
5002
5003         trace_seq_init(s);
5004 }
5005
5006 void trace_init_global_iter(struct trace_iterator *iter)
5007 {
5008         iter->tr = &global_trace;
5009         iter->trace = current_trace;
5010         iter->cpu_file = TRACE_PIPE_ALL_CPU;
5011 }
5012
5013 static void
5014 __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
5015 {
5016         static arch_spinlock_t ftrace_dump_lock =
5017                 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
5018         /* use static because iter can be a bit big for the stack */
5019         static struct trace_iterator iter;
5020         unsigned int old_userobj;
5021         static int dump_ran;
5022         unsigned long flags;
5023         int cnt = 0, cpu;
5024
5025         /* only one dump */
5026         local_irq_save(flags);
5027         arch_spin_lock(&ftrace_dump_lock);
5028         if (dump_ran)
5029                 goto out;
5030
5031         dump_ran = 1;
5032
5033         tracing_off();
5034
5035         /* Did function tracer already get disabled? */
5036         if (ftrace_is_dead()) {
5037                 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
5038                 printk("#          MAY BE MISSING FUNCTION EVENTS\n");
5039         }
5040
5041         if (disable_tracing)
5042                 ftrace_kill();
5043
5044         trace_init_global_iter(&iter);
5045
5046         for_each_tracing_cpu(cpu) {
5047                 atomic_inc(&iter.tr->data[cpu]->disabled);
5048         }
5049
5050         old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ;
5051
5052         /* don't look at user memory in panic mode */
5053         trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
5054
5055         /* Simulate the iterator */
5056         iter.tr = &global_trace;
5057         iter.trace = current_trace;
5058
5059         switch (oops_dump_mode) {
5060         case DUMP_ALL:
5061                 iter.cpu_file = TRACE_PIPE_ALL_CPU;
5062                 break;
5063         case DUMP_ORIG:
5064                 iter.cpu_file = raw_smp_processor_id();
5065                 break;
5066         case DUMP_NONE:
5067                 goto out_enable;
5068         default:
5069                 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
5070                 iter.cpu_file = TRACE_PIPE_ALL_CPU;
5071         }
5072
5073         printk(KERN_TRACE "Dumping ftrace buffer:\n");
5074
5075         /*
5076          * We need to stop all tracing on all CPUS to read the
5077          * the next buffer. This is a bit expensive, but is
5078          * not done often. We fill all what we can read,
5079          * and then release the locks again.
5080          */
5081
5082         while (!trace_empty(&iter)) {
5083
5084                 if (!cnt)
5085                         printk(KERN_TRACE "---------------------------------\n");
5086
5087                 cnt++;
5088
5089                 /* reset all but tr, trace, and overruns */
5090                 memset(&iter.seq, 0,
5091                        sizeof(struct trace_iterator) -
5092                        offsetof(struct trace_iterator, seq));
5093                 iter.iter_flags |= TRACE_FILE_LAT_FMT;
5094                 iter.pos = -1;
5095
5096                 if (trace_find_next_entry_inc(&iter) != NULL) {
5097                         int ret;
5098
5099                         ret = print_trace_line(&iter);
5100                         if (ret != TRACE_TYPE_NO_CONSUME)
5101                                 trace_consume(&iter);
5102                 }
5103                 touch_nmi_watchdog();
5104
5105                 trace_printk_seq(&iter.seq);
5106         }
5107
5108         if (!cnt)
5109                 printk(KERN_TRACE "   (ftrace buffer empty)\n");
5110         else
5111                 printk(KERN_TRACE "---------------------------------\n");
5112
5113  out_enable:
5114         /* Re-enable tracing if requested */
5115         if (!disable_tracing) {
5116                 trace_flags |= old_userobj;
5117
5118                 for_each_tracing_cpu(cpu) {
5119                         atomic_dec(&iter.tr->data[cpu]->disabled);
5120                 }
5121                 tracing_on();
5122         }
5123
5124  out:
5125         arch_spin_unlock(&ftrace_dump_lock);
5126         local_irq_restore(flags);
5127 }
5128
5129 /* By default: disable tracing after the dump */
5130 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
5131 {
5132         __ftrace_dump(true, oops_dump_mode);
5133 }
5134 EXPORT_SYMBOL_GPL(ftrace_dump);
5135
5136 __init static int tracer_alloc_buffers(void)
5137 {
5138         int ring_buf_size;
5139         enum ring_buffer_flags rb_flags;
5140         int i;
5141         int ret = -ENOMEM;
5142
5143
5144         if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
5145                 goto out;
5146
5147         if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL))
5148                 goto out_free_buffer_mask;
5149
5150         /* Only allocate trace_printk buffers if a trace_printk exists */
5151         if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
5152                 /* Must be called before global_trace.buffer is allocated */
5153                 trace_printk_init_buffers();
5154
5155         /* To save memory, keep the ring buffer size to its minimum */
5156         if (ring_buffer_expanded)
5157                 ring_buf_size = trace_buf_size;
5158         else
5159                 ring_buf_size = 1;
5160
5161         rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
5162
5163         cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
5164         cpumask_copy(tracing_cpumask, cpu_all_mask);
5165
5166         /* TODO: make the number of buffers hot pluggable with CPUS */
5167         global_trace.buffer = ring_buffer_alloc(ring_buf_size, rb_flags);
5168         if (!global_trace.buffer) {
5169                 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
5170                 WARN_ON(1);
5171                 goto out_free_cpumask;
5172         }
5173         if (global_trace.buffer_disabled)
5174                 tracing_off();
5175
5176
5177 #ifdef CONFIG_TRACER_MAX_TRACE
5178         max_tr.buffer = ring_buffer_alloc(1, rb_flags);
5179         if (!max_tr.buffer) {
5180                 printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n");
5181                 WARN_ON(1);
5182                 ring_buffer_free(global_trace.buffer);
5183                 goto out_free_cpumask;
5184         }
5185 #endif
5186
5187         /* Allocate the first page for all buffers */
5188         for_each_tracing_cpu(i) {
5189                 global_trace.data[i] = &per_cpu(global_trace_cpu, i);
5190                 max_tr.data[i] = &per_cpu(max_tr_data, i);
5191         }
5192
5193         set_buffer_entries(&global_trace,
5194                            ring_buffer_size(global_trace.buffer, 0));
5195 #ifdef CONFIG_TRACER_MAX_TRACE
5196         set_buffer_entries(&max_tr, 1);
5197 #endif
5198
5199         trace_init_cmdlines();
5200
5201         register_tracer(&nop_trace);
5202         current_trace = &nop_trace;
5203         /* All seems OK, enable tracing */
5204         tracing_disabled = 0;
5205
5206         atomic_notifier_chain_register(&panic_notifier_list,
5207                                        &trace_panic_notifier);
5208
5209         register_die_notifier(&trace_die_notifier);
5210
5211         return 0;
5212
5213 out_free_cpumask:
5214         free_cpumask_var(tracing_cpumask);
5215 out_free_buffer_mask:
5216         free_cpumask_var(tracing_buffer_mask);
5217 out:
5218         return ret;
5219 }
5220
5221 __init static int clear_boot_tracer(void)
5222 {
5223         /*
5224          * The default tracer at boot buffer is an init section.
5225          * This function is called in lateinit. If we did not
5226          * find the boot tracer, then clear it out, to prevent
5227          * later registration from accessing the buffer that is
5228          * about to be freed.
5229          */
5230         if (!default_bootup_tracer)
5231                 return 0;
5232
5233         printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
5234                default_bootup_tracer);
5235         default_bootup_tracer = NULL;
5236
5237         return 0;
5238 }
5239
5240 early_initcall(tracer_alloc_buffers);
5241 fs_initcall(tracer_init_debugfs);
5242 late_initcall(clear_boot_tracer);