2 * ring buffer based function tracer
4 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
12 * Copyright (C) 2004 Nadia Yvette Chambers
14 #include <linux/ring_buffer.h>
15 #include <generated/utsrelease.h>
16 #include <linux/stacktrace.h>
17 #include <linux/writeback.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/notifier.h>
21 #include <linux/irqflags.h>
22 #include <linux/debugfs.h>
23 #include <linux/pagemap.h>
24 #include <linux/hardirq.h>
25 #include <linux/linkage.h>
26 #include <linux/uaccess.h>
27 #include <linux/kprobes.h>
28 #include <linux/ftrace.h>
29 #include <linux/module.h>
30 #include <linux/percpu.h>
31 #include <linux/splice.h>
32 #include <linux/kdebug.h>
33 #include <linux/string.h>
34 #include <linux/rwsem.h>
35 #include <linux/slab.h>
36 #include <linux/ctype.h>
37 #include <linux/init.h>
38 #include <linux/poll.h>
39 #include <linux/nmi.h>
41 #include <linux/sched/rt.h>
44 #include "trace_output.h"
47 * On boot up, the ring buffer is set to the minimum size, so that
48 * we do not waste memory on systems that are not using tracing.
50 bool ring_buffer_expanded;
53 * We need to change this state when a selftest is running.
54 * A selftest will lurk into the ring-buffer to count the
55 * entries inserted during the selftest although some concurrent
56 * insertions into the ring-buffer such as trace_printk could occurred
57 * at the same time, giving false positive or negative results.
59 static bool __read_mostly tracing_selftest_running;
62 * If a tracer is running, we do not want to run SELFTEST.
64 bool __read_mostly tracing_selftest_disabled;
66 /* For tracers that don't implement custom flags */
67 static struct tracer_opt dummy_tracer_opt[] = {
71 static struct tracer_flags dummy_tracer_flags = {
73 .opts = dummy_tracer_opt
76 static int dummy_set_flag(u32 old_flags, u32 bit, int set)
82 * To prevent the comm cache from being overwritten when no
83 * tracing is active, only save the comm when a trace event
86 static DEFINE_PER_CPU(bool, trace_cmdline_save);
89 * Kill all tracing for good (never come back).
90 * It is initialized to 1 but will turn to zero if the initialization
91 * of the tracer is successful. But that is the only place that sets
94 static int tracing_disabled = 1;
96 DEFINE_PER_CPU(int, ftrace_cpu_disabled);
98 cpumask_var_t __read_mostly tracing_buffer_mask;
101 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
103 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
104 * is set, then ftrace_dump is called. This will output the contents
105 * of the ftrace buffers to the console. This is very useful for
106 * capturing traces that lead to crashes and outputing it to a
109 * It is default off, but you can enable it with either specifying
110 * "ftrace_dump_on_oops" in the kernel command line, or setting
111 * /proc/sys/kernel/ftrace_dump_on_oops
112 * Set 1 if you want to dump buffers of all CPUs
113 * Set 2 if you want to dump the buffer of the CPU that triggered oops
116 enum ftrace_dump_mode ftrace_dump_on_oops;
118 /* When set, tracing will stop when a WARN*() is hit */
119 int __disable_trace_on_warning;
121 static int tracing_set_tracer(const char *buf);
123 #define MAX_TRACER_SIZE 100
124 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
125 static char *default_bootup_tracer;
127 static bool allocate_snapshot;
129 static int __init set_cmdline_ftrace(char *str)
131 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
132 default_bootup_tracer = bootup_tracer_buf;
133 /* We are using ftrace early, expand it */
134 ring_buffer_expanded = true;
137 __setup("ftrace=", set_cmdline_ftrace);
139 static int __init set_ftrace_dump_on_oops(char *str)
141 if (*str++ != '=' || !*str) {
142 ftrace_dump_on_oops = DUMP_ALL;
146 if (!strcmp("orig_cpu", str)) {
147 ftrace_dump_on_oops = DUMP_ORIG;
153 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
155 static int __init stop_trace_on_warning(char *str)
157 __disable_trace_on_warning = 1;
160 __setup("traceoff_on_warning=", stop_trace_on_warning);
162 static int __init boot_alloc_snapshot(char *str)
164 allocate_snapshot = true;
165 /* We also need the main ring buffer expanded */
166 ring_buffer_expanded = true;
169 __setup("alloc_snapshot", boot_alloc_snapshot);
172 static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
173 static char *trace_boot_options __initdata;
175 static int __init set_trace_boot_options(char *str)
177 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
178 trace_boot_options = trace_boot_options_buf;
181 __setup("trace_options=", set_trace_boot_options);
184 unsigned long long ns2usecs(cycle_t nsec)
192 * The global_trace is the descriptor that holds the tracing
193 * buffers for the live tracing. For each CPU, it contains
194 * a link list of pages that will store trace entries. The
195 * page descriptor of the pages in the memory is used to hold
196 * the link list by linking the lru item in the page descriptor
197 * to each of the pages in the buffer per CPU.
199 * For each active CPU there is a data field that holds the
200 * pages for the buffer for that CPU. Each CPU has the same number
201 * of pages allocated for its buffer.
203 static struct trace_array global_trace;
205 LIST_HEAD(ftrace_trace_arrays);
207 int trace_array_get(struct trace_array *this_tr)
209 struct trace_array *tr;
212 mutex_lock(&trace_types_lock);
213 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
220 mutex_unlock(&trace_types_lock);
225 static void __trace_array_put(struct trace_array *this_tr)
227 WARN_ON(!this_tr->ref);
231 void trace_array_put(struct trace_array *this_tr)
233 mutex_lock(&trace_types_lock);
234 __trace_array_put(this_tr);
235 mutex_unlock(&trace_types_lock);
238 int filter_current_check_discard(struct ring_buffer *buffer,
239 struct ftrace_event_call *call, void *rec,
240 struct ring_buffer_event *event)
242 return filter_check_discard(call, rec, buffer, event);
244 EXPORT_SYMBOL_GPL(filter_current_check_discard);
246 cycle_t ftrace_now(int cpu)
250 /* Early boot up does not have a buffer yet */
251 if (!global_trace.trace_buffer.buffer)
252 return trace_clock_local();
254 ts = ring_buffer_time_stamp(global_trace.trace_buffer.buffer, cpu);
255 ring_buffer_normalize_time_stamp(global_trace.trace_buffer.buffer, cpu, &ts);
261 * tracing_is_enabled - Show if global_trace has been disabled
263 * Shows if the global trace has been enabled or not. It uses the
264 * mirror flag "buffer_disabled" to be used in fast paths such as for
265 * the irqsoff tracer. But it may be inaccurate due to races. If you
266 * need to know the accurate state, use tracing_is_on() which is a little
267 * slower, but accurate.
269 int tracing_is_enabled(void)
272 * For quick access (irqsoff uses this in fast path), just
273 * return the mirror variable of the state of the ring buffer.
274 * It's a little racy, but we don't really care.
277 return !global_trace.buffer_disabled;
281 * trace_buf_size is the size in bytes that is allocated
282 * for a buffer. Note, the number of bytes is always rounded
285 * This number is purposely set to a low number of 16384.
286 * If the dump on oops happens, it will be much appreciated
287 * to not have to wait for all that output. Anyway this can be
288 * boot time and run time configurable.
290 #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
292 static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
294 /* trace_types holds a link list of available tracers. */
295 static struct tracer *trace_types __read_mostly;
298 * trace_types_lock is used to protect the trace_types list.
300 DEFINE_MUTEX(trace_types_lock);
303 * serialize the access of the ring buffer
305 * ring buffer serializes readers, but it is low level protection.
306 * The validity of the events (which returns by ring_buffer_peek() ..etc)
307 * are not protected by ring buffer.
309 * The content of events may become garbage if we allow other process consumes
310 * these events concurrently:
311 * A) the page of the consumed events may become a normal page
312 * (not reader page) in ring buffer, and this page will be rewrited
313 * by events producer.
314 * B) The page of the consumed events may become a page for splice_read,
315 * and this page will be returned to system.
317 * These primitives allow multi process access to different cpu ring buffer
320 * These primitives don't distinguish read-only and read-consume access.
321 * Multi read-only access are also serialized.
325 static DECLARE_RWSEM(all_cpu_access_lock);
326 static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
328 static inline void trace_access_lock(int cpu)
330 if (cpu == RING_BUFFER_ALL_CPUS) {
331 /* gain it for accessing the whole ring buffer. */
332 down_write(&all_cpu_access_lock);
334 /* gain it for accessing a cpu ring buffer. */
336 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
337 down_read(&all_cpu_access_lock);
339 /* Secondly block other access to this @cpu ring buffer. */
340 mutex_lock(&per_cpu(cpu_access_lock, cpu));
344 static inline void trace_access_unlock(int cpu)
346 if (cpu == RING_BUFFER_ALL_CPUS) {
347 up_write(&all_cpu_access_lock);
349 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
350 up_read(&all_cpu_access_lock);
354 static inline void trace_access_lock_init(void)
358 for_each_possible_cpu(cpu)
359 mutex_init(&per_cpu(cpu_access_lock, cpu));
364 static DEFINE_MUTEX(access_lock);
366 static inline void trace_access_lock(int cpu)
369 mutex_lock(&access_lock);
372 static inline void trace_access_unlock(int cpu)
375 mutex_unlock(&access_lock);
378 static inline void trace_access_lock_init(void)
384 /* trace_flags holds trace_options default values */
385 unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
386 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
387 TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |
388 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | TRACE_ITER_FUNCTION;
390 static void tracer_tracing_on(struct trace_array *tr)
392 if (tr->trace_buffer.buffer)
393 ring_buffer_record_on(tr->trace_buffer.buffer);
395 * This flag is looked at when buffers haven't been allocated
396 * yet, or by some tracers (like irqsoff), that just want to
397 * know if the ring buffer has been disabled, but it can handle
398 * races of where it gets disabled but we still do a record.
399 * As the check is in the fast path of the tracers, it is more
400 * important to be fast than accurate.
402 tr->buffer_disabled = 0;
403 /* Make the flag seen by readers */
408 * tracing_on - enable tracing buffers
410 * This function enables tracing buffers that may have been
411 * disabled with tracing_off.
413 void tracing_on(void)
415 tracer_tracing_on(&global_trace);
417 EXPORT_SYMBOL_GPL(tracing_on);
420 * __trace_puts - write a constant string into the trace buffer.
421 * @ip: The address of the caller
422 * @str: The constant string to write
423 * @size: The size of the string.
425 int __trace_puts(unsigned long ip, const char *str, int size)
427 struct ring_buffer_event *event;
428 struct ring_buffer *buffer;
429 struct print_entry *entry;
430 unsigned long irq_flags;
433 alloc = sizeof(*entry) + size + 2; /* possible \n added */
435 local_save_flags(irq_flags);
436 buffer = global_trace.trace_buffer.buffer;
437 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
438 irq_flags, preempt_count());
442 entry = ring_buffer_event_data(event);
445 memcpy(&entry->buf, str, size);
447 /* Add a newline if necessary */
448 if (entry->buf[size - 1] != '\n') {
449 entry->buf[size] = '\n';
450 entry->buf[size + 1] = '\0';
452 entry->buf[size] = '\0';
454 __buffer_unlock_commit(buffer, event);
458 EXPORT_SYMBOL_GPL(__trace_puts);
461 * __trace_bputs - write the pointer to a constant string into trace buffer
462 * @ip: The address of the caller
463 * @str: The constant string to write to the buffer to
465 int __trace_bputs(unsigned long ip, const char *str)
467 struct ring_buffer_event *event;
468 struct ring_buffer *buffer;
469 struct bputs_entry *entry;
470 unsigned long irq_flags;
471 int size = sizeof(struct bputs_entry);
473 local_save_flags(irq_flags);
474 buffer = global_trace.trace_buffer.buffer;
475 event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
476 irq_flags, preempt_count());
480 entry = ring_buffer_event_data(event);
484 __buffer_unlock_commit(buffer, event);
488 EXPORT_SYMBOL_GPL(__trace_bputs);
490 #ifdef CONFIG_TRACER_SNAPSHOT
492 * trace_snapshot - take a snapshot of the current buffer.
494 * This causes a swap between the snapshot buffer and the current live
495 * tracing buffer. You can use this to take snapshots of the live
496 * trace when some condition is triggered, but continue to trace.
498 * Note, make sure to allocate the snapshot with either
499 * a tracing_snapshot_alloc(), or by doing it manually
500 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
502 * If the snapshot buffer is not allocated, it will stop tracing.
503 * Basically making a permanent snapshot.
505 void tracing_snapshot(void)
507 struct trace_array *tr = &global_trace;
508 struct tracer *tracer = tr->current_trace;
512 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
513 internal_trace_puts("*** snapshot is being ignored ***\n");
517 if (!tr->allocated_snapshot) {
518 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
519 internal_trace_puts("*** stopping trace here! ***\n");
524 /* Note, snapshot can not be used when the tracer uses it */
525 if (tracer->use_max_tr) {
526 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
527 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
531 local_irq_save(flags);
532 update_max_tr(tr, current, smp_processor_id());
533 local_irq_restore(flags);
535 EXPORT_SYMBOL_GPL(tracing_snapshot);
537 static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
538 struct trace_buffer *size_buf, int cpu_id);
539 static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
541 static int alloc_snapshot(struct trace_array *tr)
545 if (!tr->allocated_snapshot) {
547 /* allocate spare buffer */
548 ret = resize_buffer_duplicate_size(&tr->max_buffer,
549 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
553 tr->allocated_snapshot = true;
559 void free_snapshot(struct trace_array *tr)
562 * We don't free the ring buffer. instead, resize it because
563 * The max_tr ring buffer has some state (e.g. ring->clock) and
564 * we want preserve it.
566 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
567 set_buffer_entries(&tr->max_buffer, 1);
568 tracing_reset_online_cpus(&tr->max_buffer);
569 tr->allocated_snapshot = false;
573 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
575 * This is similar to trace_snapshot(), but it will allocate the
576 * snapshot buffer if it isn't already allocated. Use this only
577 * where it is safe to sleep, as the allocation may sleep.
579 * This causes a swap between the snapshot buffer and the current live
580 * tracing buffer. You can use this to take snapshots of the live
581 * trace when some condition is triggered, but continue to trace.
583 void tracing_snapshot_alloc(void)
585 struct trace_array *tr = &global_trace;
588 ret = alloc_snapshot(tr);
589 if (WARN_ON(ret < 0))
594 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
596 void tracing_snapshot(void)
598 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
600 EXPORT_SYMBOL_GPL(tracing_snapshot);
601 void tracing_snapshot_alloc(void)
606 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
607 #endif /* CONFIG_TRACER_SNAPSHOT */
609 static void tracer_tracing_off(struct trace_array *tr)
611 if (tr->trace_buffer.buffer)
612 ring_buffer_record_off(tr->trace_buffer.buffer);
614 * This flag is looked at when buffers haven't been allocated
615 * yet, or by some tracers (like irqsoff), that just want to
616 * know if the ring buffer has been disabled, but it can handle
617 * races of where it gets disabled but we still do a record.
618 * As the check is in the fast path of the tracers, it is more
619 * important to be fast than accurate.
621 tr->buffer_disabled = 1;
622 /* Make the flag seen by readers */
627 * tracing_off - turn off tracing buffers
629 * This function stops the tracing buffers from recording data.
630 * It does not disable any overhead the tracers themselves may
631 * be causing. This function simply causes all recording to
632 * the ring buffers to fail.
634 void tracing_off(void)
636 tracer_tracing_off(&global_trace);
638 EXPORT_SYMBOL_GPL(tracing_off);
640 void disable_trace_on_warning(void)
642 if (__disable_trace_on_warning)
647 * tracer_tracing_is_on - show real state of ring buffer enabled
648 * @tr : the trace array to know if ring buffer is enabled
650 * Shows real state of the ring buffer if it is enabled or not.
652 static int tracer_tracing_is_on(struct trace_array *tr)
654 if (tr->trace_buffer.buffer)
655 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
656 return !tr->buffer_disabled;
660 * tracing_is_on - show state of ring buffers enabled
662 int tracing_is_on(void)
664 return tracer_tracing_is_on(&global_trace);
666 EXPORT_SYMBOL_GPL(tracing_is_on);
668 static int __init set_buf_size(char *str)
670 unsigned long buf_size;
674 buf_size = memparse(str, &str);
675 /* nr_entries can not be zero */
678 trace_buf_size = buf_size;
681 __setup("trace_buf_size=", set_buf_size);
683 static int __init set_tracing_thresh(char *str)
685 unsigned long threshold;
690 ret = kstrtoul(str, 0, &threshold);
693 tracing_thresh = threshold * 1000;
696 __setup("tracing_thresh=", set_tracing_thresh);
698 unsigned long nsecs_to_usecs(unsigned long nsecs)
703 /* These must match the bit postions in trace_iterator_flags */
704 static const char *trace_options[] = {
737 int in_ns; /* is this clock in nanoseconds? */
739 { trace_clock_local, "local", 1 },
740 { trace_clock_global, "global", 1 },
741 { trace_clock_counter, "counter", 0 },
742 { trace_clock_jiffies, "uptime", 1 },
743 { trace_clock, "perf", 1 },
750 * trace_parser_get_init - gets the buffer for trace parser
752 int trace_parser_get_init(struct trace_parser *parser, int size)
754 memset(parser, 0, sizeof(*parser));
756 parser->buffer = kmalloc(size, GFP_KERNEL);
765 * trace_parser_put - frees the buffer for trace parser
767 void trace_parser_put(struct trace_parser *parser)
769 kfree(parser->buffer);
773 * trace_get_user - reads the user input string separated by space
774 * (matched by isspace(ch))
776 * For each string found the 'struct trace_parser' is updated,
777 * and the function returns.
779 * Returns number of bytes read.
781 * See kernel/trace/trace.h for 'struct trace_parser' details.
783 int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
784 size_t cnt, loff_t *ppos)
791 trace_parser_clear(parser);
793 ret = get_user(ch, ubuf++);
801 * The parser is not finished with the last write,
802 * continue reading the user input without skipping spaces.
805 /* skip white space */
806 while (cnt && isspace(ch)) {
807 ret = get_user(ch, ubuf++);
814 /* only spaces were written */
824 /* read the non-space input */
825 while (cnt && !isspace(ch)) {
826 if (parser->idx < parser->size - 1)
827 parser->buffer[parser->idx++] = ch;
832 ret = get_user(ch, ubuf++);
839 /* We either got finished input or we have to wait for another call. */
841 parser->buffer[parser->idx] = 0;
842 parser->cont = false;
845 parser->buffer[parser->idx++] = ch;
855 ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt)
863 if (s->len <= s->readpos)
866 len = s->len - s->readpos;
869 ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt);
879 static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
883 if (s->len <= s->readpos)
886 len = s->len - s->readpos;
889 memcpy(buf, s->buffer + s->readpos, cnt);
896 * ftrace_max_lock is used to protect the swapping of buffers
897 * when taking a max snapshot. The buffers themselves are
898 * protected by per_cpu spinlocks. But the action of the swap
899 * needs its own lock.
901 * This is defined as a arch_spinlock_t in order to help
902 * with performance when lockdep debugging is enabled.
904 * It is also used in other places outside the update_max_tr
905 * so it needs to be defined outside of the
906 * CONFIG_TRACER_MAX_TRACE.
908 static arch_spinlock_t ftrace_max_lock =
909 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
911 unsigned long __read_mostly tracing_thresh;
913 #ifdef CONFIG_TRACER_MAX_TRACE
914 unsigned long __read_mostly tracing_max_latency;
917 * Copy the new maximum trace into the separate maximum-trace
918 * structure. (this way the maximum trace is permanently saved,
919 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
922 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
924 struct trace_buffer *trace_buf = &tr->trace_buffer;
925 struct trace_buffer *max_buf = &tr->max_buffer;
926 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
927 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
930 max_buf->time_start = data->preempt_timestamp;
932 max_data->saved_latency = tracing_max_latency;
933 max_data->critical_start = data->critical_start;
934 max_data->critical_end = data->critical_end;
936 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
937 max_data->pid = tsk->pid;
939 * If tsk == current, then use current_uid(), as that does not use
940 * RCU. The irq tracer can be called out of RCU scope.
943 max_data->uid = current_uid();
945 max_data->uid = task_uid(tsk);
947 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
948 max_data->policy = tsk->policy;
949 max_data->rt_priority = tsk->rt_priority;
951 /* record this tasks comm */
952 tracing_record_cmdline(tsk);
956 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
958 * @tsk: the task with the latency
959 * @cpu: The cpu that initiated the trace.
961 * Flip the buffers between the @tr and the max_tr and record information
962 * about which task was the cause of this latency.
965 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
967 struct ring_buffer *buf;
972 WARN_ON_ONCE(!irqs_disabled());
974 if (!tr->allocated_snapshot) {
975 /* Only the nop tracer should hit this when disabling */
976 WARN_ON_ONCE(tr->current_trace != &nop_trace);
980 arch_spin_lock(&ftrace_max_lock);
982 buf = tr->trace_buffer.buffer;
983 tr->trace_buffer.buffer = tr->max_buffer.buffer;
984 tr->max_buffer.buffer = buf;
986 __update_max_tr(tr, tsk, cpu);
987 arch_spin_unlock(&ftrace_max_lock);
991 * update_max_tr_single - only copy one trace over, and reset the rest
993 * @tsk - task with the latency
994 * @cpu - the cpu of the buffer to copy.
996 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
999 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1006 WARN_ON_ONCE(!irqs_disabled());
1007 if (!tr->allocated_snapshot) {
1008 /* Only the nop tracer should hit this when disabling */
1009 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1013 arch_spin_lock(&ftrace_max_lock);
1015 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
1017 if (ret == -EBUSY) {
1019 * We failed to swap the buffer due to a commit taking
1020 * place on this CPU. We fail to record, but we reset
1021 * the max trace buffer (no one writes directly to it)
1022 * and flag that it failed.
1024 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
1025 "Failed to swap buffers due to commit in progress\n");
1028 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
1030 __update_max_tr(tr, tsk, cpu);
1031 arch_spin_unlock(&ftrace_max_lock);
1033 #endif /* CONFIG_TRACER_MAX_TRACE */
1035 static void default_wait_pipe(struct trace_iterator *iter)
1037 /* Iterators are static, they should be filled or empty */
1038 if (trace_buffer_iter(iter, iter->cpu_file))
1041 ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file);
1044 #ifdef CONFIG_FTRACE_STARTUP_TEST
1045 static int run_tracer_selftest(struct tracer *type)
1047 struct trace_array *tr = &global_trace;
1048 struct tracer *saved_tracer = tr->current_trace;
1051 if (!type->selftest || tracing_selftest_disabled)
1055 * Run a selftest on this tracer.
1056 * Here we reset the trace buffer, and set the current
1057 * tracer to be this tracer. The tracer can then run some
1058 * internal tracing to verify that everything is in order.
1059 * If we fail, we do not register this tracer.
1061 tracing_reset_online_cpus(&tr->trace_buffer);
1063 tr->current_trace = type;
1065 #ifdef CONFIG_TRACER_MAX_TRACE
1066 if (type->use_max_tr) {
1067 /* If we expanded the buffers, make sure the max is expanded too */
1068 if (ring_buffer_expanded)
1069 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1070 RING_BUFFER_ALL_CPUS);
1071 tr->allocated_snapshot = true;
1075 /* the test is responsible for initializing and enabling */
1076 pr_info("Testing tracer %s: ", type->name);
1077 ret = type->selftest(type, tr);
1078 /* the test is responsible for resetting too */
1079 tr->current_trace = saved_tracer;
1081 printk(KERN_CONT "FAILED!\n");
1082 /* Add the warning after printing 'FAILED' */
1086 /* Only reset on passing, to avoid touching corrupted buffers */
1087 tracing_reset_online_cpus(&tr->trace_buffer);
1089 #ifdef CONFIG_TRACER_MAX_TRACE
1090 if (type->use_max_tr) {
1091 tr->allocated_snapshot = false;
1093 /* Shrink the max buffer again */
1094 if (ring_buffer_expanded)
1095 ring_buffer_resize(tr->max_buffer.buffer, 1,
1096 RING_BUFFER_ALL_CPUS);
1100 printk(KERN_CONT "PASSED\n");
1104 static inline int run_tracer_selftest(struct tracer *type)
1108 #endif /* CONFIG_FTRACE_STARTUP_TEST */
1111 * register_tracer - register a tracer with the ftrace system.
1112 * @type - the plugin for the tracer
1114 * Register a new plugin tracer.
1116 int register_tracer(struct tracer *type)
1122 pr_info("Tracer must have a name\n");
1126 if (strlen(type->name) >= MAX_TRACER_SIZE) {
1127 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1131 mutex_lock(&trace_types_lock);
1133 tracing_selftest_running = true;
1135 for (t = trace_types; t; t = t->next) {
1136 if (strcmp(type->name, t->name) == 0) {
1138 pr_info("Tracer %s already registered\n",
1145 if (!type->set_flag)
1146 type->set_flag = &dummy_set_flag;
1148 type->flags = &dummy_tracer_flags;
1150 if (!type->flags->opts)
1151 type->flags->opts = dummy_tracer_opt;
1152 if (!type->wait_pipe)
1153 type->wait_pipe = default_wait_pipe;
1155 ret = run_tracer_selftest(type);
1159 type->next = trace_types;
1163 tracing_selftest_running = false;
1164 mutex_unlock(&trace_types_lock);
1166 if (ret || !default_bootup_tracer)
1169 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
1172 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1173 /* Do we want this tracer to start on bootup? */
1174 tracing_set_tracer(type->name);
1175 default_bootup_tracer = NULL;
1176 /* disable other selftests, since this will break it. */
1177 tracing_selftest_disabled = true;
1178 #ifdef CONFIG_FTRACE_STARTUP_TEST
1179 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1187 void tracing_reset(struct trace_buffer *buf, int cpu)
1189 struct ring_buffer *buffer = buf->buffer;
1194 ring_buffer_record_disable(buffer);
1196 /* Make sure all commits have finished */
1197 synchronize_sched();
1198 ring_buffer_reset_cpu(buffer, cpu);
1200 ring_buffer_record_enable(buffer);
1203 void tracing_reset_online_cpus(struct trace_buffer *buf)
1205 struct ring_buffer *buffer = buf->buffer;
1211 ring_buffer_record_disable(buffer);
1213 /* Make sure all commits have finished */
1214 synchronize_sched();
1216 buf->time_start = ftrace_now(buf->cpu);
1218 for_each_online_cpu(cpu)
1219 ring_buffer_reset_cpu(buffer, cpu);
1221 ring_buffer_record_enable(buffer);
1224 void tracing_reset_current(int cpu)
1226 tracing_reset(&global_trace.trace_buffer, cpu);
1229 void tracing_reset_all_online_cpus(void)
1231 struct trace_array *tr;
1233 mutex_lock(&trace_types_lock);
1234 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
1235 tracing_reset_online_cpus(&tr->trace_buffer);
1236 #ifdef CONFIG_TRACER_MAX_TRACE
1237 tracing_reset_online_cpus(&tr->max_buffer);
1240 mutex_unlock(&trace_types_lock);
1243 #define SAVED_CMDLINES 128
1244 #define NO_CMDLINE_MAP UINT_MAX
1245 static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1246 static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
1247 static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
1248 static int cmdline_idx;
1249 static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
1251 /* temporary disable recording */
1252 static atomic_t trace_record_cmdline_disabled __read_mostly;
1254 static void trace_init_cmdlines(void)
1256 memset(&map_pid_to_cmdline, NO_CMDLINE_MAP, sizeof(map_pid_to_cmdline));
1257 memset(&map_cmdline_to_pid, NO_CMDLINE_MAP, sizeof(map_cmdline_to_pid));
1261 int is_tracing_stopped(void)
1263 return global_trace.stop_count;
1267 * ftrace_off_permanent - disable all ftrace code permanently
1269 * This should only be called when a serious anomally has
1270 * been detected. This will turn off the function tracing,
1271 * ring buffers, and other tracing utilites. It takes no
1272 * locks and can be called from any context.
1274 void ftrace_off_permanent(void)
1276 tracing_disabled = 1;
1278 tracing_off_permanent();
1282 * tracing_start - quick start of the tracer
1284 * If tracing is enabled but was stopped by tracing_stop,
1285 * this will start the tracer back up.
1287 void tracing_start(void)
1289 struct ring_buffer *buffer;
1290 unsigned long flags;
1292 if (tracing_disabled)
1295 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1296 if (--global_trace.stop_count) {
1297 if (global_trace.stop_count < 0) {
1298 /* Someone screwed up their debugging */
1300 global_trace.stop_count = 0;
1305 /* Prevent the buffers from switching */
1306 arch_spin_lock(&ftrace_max_lock);
1308 buffer = global_trace.trace_buffer.buffer;
1310 ring_buffer_record_enable(buffer);
1312 #ifdef CONFIG_TRACER_MAX_TRACE
1313 buffer = global_trace.max_buffer.buffer;
1315 ring_buffer_record_enable(buffer);
1318 arch_spin_unlock(&ftrace_max_lock);
1322 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1325 static void tracing_start_tr(struct trace_array *tr)
1327 struct ring_buffer *buffer;
1328 unsigned long flags;
1330 if (tracing_disabled)
1333 /* If global, we need to also start the max tracer */
1334 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1335 return tracing_start();
1337 raw_spin_lock_irqsave(&tr->start_lock, flags);
1339 if (--tr->stop_count) {
1340 if (tr->stop_count < 0) {
1341 /* Someone screwed up their debugging */
1348 buffer = tr->trace_buffer.buffer;
1350 ring_buffer_record_enable(buffer);
1353 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
1357 * tracing_stop - quick stop of the tracer
1359 * Light weight way to stop tracing. Use in conjunction with
1362 void tracing_stop(void)
1364 struct ring_buffer *buffer;
1365 unsigned long flags;
1368 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1369 if (global_trace.stop_count++)
1372 /* Prevent the buffers from switching */
1373 arch_spin_lock(&ftrace_max_lock);
1375 buffer = global_trace.trace_buffer.buffer;
1377 ring_buffer_record_disable(buffer);
1379 #ifdef CONFIG_TRACER_MAX_TRACE
1380 buffer = global_trace.max_buffer.buffer;
1382 ring_buffer_record_disable(buffer);
1385 arch_spin_unlock(&ftrace_max_lock);
1388 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1391 static void tracing_stop_tr(struct trace_array *tr)
1393 struct ring_buffer *buffer;
1394 unsigned long flags;
1396 /* If global, we need to also stop the max tracer */
1397 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1398 return tracing_stop();
1400 raw_spin_lock_irqsave(&tr->start_lock, flags);
1401 if (tr->stop_count++)
1404 buffer = tr->trace_buffer.buffer;
1406 ring_buffer_record_disable(buffer);
1409 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
1412 void trace_stop_cmdline_recording(void);
1414 static void trace_save_cmdline(struct task_struct *tsk)
1418 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
1422 * It's not the end of the world if we don't get
1423 * the lock, but we also don't want to spin
1424 * nor do we want to disable interrupts,
1425 * so if we miss here, then better luck next time.
1427 if (!arch_spin_trylock(&trace_cmdline_lock))
1430 idx = map_pid_to_cmdline[tsk->pid];
1431 if (idx == NO_CMDLINE_MAP) {
1432 idx = (cmdline_idx + 1) % SAVED_CMDLINES;
1435 * Check whether the cmdline buffer at idx has a pid
1436 * mapped. We are going to overwrite that entry so we
1437 * need to clear the map_pid_to_cmdline. Otherwise we
1438 * would read the new comm for the old pid.
1440 pid = map_cmdline_to_pid[idx];
1441 if (pid != NO_CMDLINE_MAP)
1442 map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
1444 map_cmdline_to_pid[idx] = tsk->pid;
1445 map_pid_to_cmdline[tsk->pid] = idx;
1450 memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
1452 arch_spin_unlock(&trace_cmdline_lock);
1455 void trace_find_cmdline(int pid, char comm[])
1460 strcpy(comm, "<idle>");
1464 if (WARN_ON_ONCE(pid < 0)) {
1465 strcpy(comm, "<XXX>");
1469 if (pid > PID_MAX_DEFAULT) {
1470 strcpy(comm, "<...>");
1475 arch_spin_lock(&trace_cmdline_lock);
1476 map = map_pid_to_cmdline[pid];
1477 if (map != NO_CMDLINE_MAP)
1478 strcpy(comm, saved_cmdlines[map]);
1480 strcpy(comm, "<...>");
1482 arch_spin_unlock(&trace_cmdline_lock);
1486 void tracing_record_cmdline(struct task_struct *tsk)
1488 if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
1491 if (!__this_cpu_read(trace_cmdline_save))
1494 __this_cpu_write(trace_cmdline_save, false);
1496 trace_save_cmdline(tsk);
1500 tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1503 struct task_struct *tsk = current;
1505 entry->preempt_count = pc & 0xff;
1506 entry->pid = (tsk) ? tsk->pid : 0;
1508 #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
1509 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
1511 TRACE_FLAG_IRQS_NOSUPPORT |
1513 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
1514 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
1515 (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
1517 EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
1519 struct ring_buffer_event *
1520 trace_buffer_lock_reserve(struct ring_buffer *buffer,
1523 unsigned long flags, int pc)
1525 struct ring_buffer_event *event;
1527 event = ring_buffer_lock_reserve(buffer, len);
1528 if (event != NULL) {
1529 struct trace_entry *ent = ring_buffer_event_data(event);
1531 tracing_generic_entry_update(ent, flags, pc);
1539 __buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
1541 __this_cpu_write(trace_cmdline_save, true);
1542 ring_buffer_unlock_commit(buffer, event);
1546 __trace_buffer_unlock_commit(struct ring_buffer *buffer,
1547 struct ring_buffer_event *event,
1548 unsigned long flags, int pc)
1550 __buffer_unlock_commit(buffer, event);
1552 ftrace_trace_stack(buffer, flags, 6, pc);
1553 ftrace_trace_userstack(buffer, flags, pc);
1556 void trace_buffer_unlock_commit(struct ring_buffer *buffer,
1557 struct ring_buffer_event *event,
1558 unsigned long flags, int pc)
1560 __trace_buffer_unlock_commit(buffer, event, flags, pc);
1562 EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);
1564 struct ring_buffer_event *
1565 trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
1566 struct ftrace_event_file *ftrace_file,
1567 int type, unsigned long len,
1568 unsigned long flags, int pc)
1570 *current_rb = ftrace_file->tr->trace_buffer.buffer;
1571 return trace_buffer_lock_reserve(*current_rb,
1572 type, len, flags, pc);
1574 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
1576 struct ring_buffer_event *
1577 trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
1578 int type, unsigned long len,
1579 unsigned long flags, int pc)
1581 *current_rb = global_trace.trace_buffer.buffer;
1582 return trace_buffer_lock_reserve(*current_rb,
1583 type, len, flags, pc);
1585 EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
1587 void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
1588 struct ring_buffer_event *event,
1589 unsigned long flags, int pc)
1591 __trace_buffer_unlock_commit(buffer, event, flags, pc);
1593 EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit);
1595 void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer,
1596 struct ring_buffer_event *event,
1597 unsigned long flags, int pc,
1598 struct pt_regs *regs)
1600 __buffer_unlock_commit(buffer, event);
1602 ftrace_trace_stack_regs(buffer, flags, 0, pc, regs);
1603 ftrace_trace_userstack(buffer, flags, pc);
1605 EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs);
1607 void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
1608 struct ring_buffer_event *event)
1610 ring_buffer_discard_commit(buffer, event);
1612 EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit);
1615 trace_function(struct trace_array *tr,
1616 unsigned long ip, unsigned long parent_ip, unsigned long flags,
1619 struct ftrace_event_call *call = &event_function;
1620 struct ring_buffer *buffer = tr->trace_buffer.buffer;
1621 struct ring_buffer_event *event;
1622 struct ftrace_entry *entry;
1624 /* If we are reading the ring buffer, don't trace */
1625 if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
1628 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
1632 entry = ring_buffer_event_data(event);
1634 entry->parent_ip = parent_ip;
1636 if (!filter_check_discard(call, entry, buffer, event))
1637 __buffer_unlock_commit(buffer, event);
1640 #ifdef CONFIG_STACKTRACE
1642 #define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
1643 struct ftrace_stack {
1644 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
1647 static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
1648 static DEFINE_PER_CPU(int, ftrace_stack_reserve);
1650 static void __ftrace_trace_stack(struct ring_buffer *buffer,
1651 unsigned long flags,
1652 int skip, int pc, struct pt_regs *regs)
1654 struct ftrace_event_call *call = &event_kernel_stack;
1655 struct ring_buffer_event *event;
1656 struct stack_entry *entry;
1657 struct stack_trace trace;
1659 int size = FTRACE_STACK_ENTRIES;
1661 trace.nr_entries = 0;
1665 * Since events can happen in NMIs there's no safe way to
1666 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
1667 * or NMI comes in, it will just have to use the default
1668 * FTRACE_STACK_SIZE.
1670 preempt_disable_notrace();
1672 use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
1674 * We don't need any atomic variables, just a barrier.
1675 * If an interrupt comes in, we don't care, because it would
1676 * have exited and put the counter back to what we want.
1677 * We just need a barrier to keep gcc from moving things
1681 if (use_stack == 1) {
1682 trace.entries = &__get_cpu_var(ftrace_stack).calls[0];
1683 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
1686 save_stack_trace_regs(regs, &trace);
1688 save_stack_trace(&trace);
1690 if (trace.nr_entries > size)
1691 size = trace.nr_entries;
1693 /* From now on, use_stack is a boolean */
1696 size *= sizeof(unsigned long);
1698 event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
1699 sizeof(*entry) + size, flags, pc);
1702 entry = ring_buffer_event_data(event);
1704 memset(&entry->caller, 0, size);
1707 memcpy(&entry->caller, trace.entries,
1708 trace.nr_entries * sizeof(unsigned long));
1710 trace.max_entries = FTRACE_STACK_ENTRIES;
1711 trace.entries = entry->caller;
1713 save_stack_trace_regs(regs, &trace);
1715 save_stack_trace(&trace);
1718 entry->size = trace.nr_entries;
1720 if (!filter_check_discard(call, entry, buffer, event))
1721 __buffer_unlock_commit(buffer, event);
1724 /* Again, don't let gcc optimize things here */
1726 __this_cpu_dec(ftrace_stack_reserve);
1727 preempt_enable_notrace();
1731 void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags,
1732 int skip, int pc, struct pt_regs *regs)
1734 if (!(trace_flags & TRACE_ITER_STACKTRACE))
1737 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
1740 void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
1743 if (!(trace_flags & TRACE_ITER_STACKTRACE))
1746 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
1749 void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
1752 __ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL);
1756 * trace_dump_stack - record a stack back trace in the trace buffer
1757 * @skip: Number of functions to skip (helper handlers)
1759 void trace_dump_stack(int skip)
1761 unsigned long flags;
1763 if (tracing_disabled || tracing_selftest_running)
1766 local_save_flags(flags);
1769 * Skip 3 more, seems to get us at the caller of
1773 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
1774 flags, skip, preempt_count(), NULL);
1777 static DEFINE_PER_CPU(int, user_stack_count);
1780 ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
1782 struct ftrace_event_call *call = &event_user_stack;
1783 struct ring_buffer_event *event;
1784 struct userstack_entry *entry;
1785 struct stack_trace trace;
1787 if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
1791 * NMIs can not handle page faults, even with fix ups.
1792 * The save user stack can (and often does) fault.
1794 if (unlikely(in_nmi()))
1798 * prevent recursion, since the user stack tracing may
1799 * trigger other kernel events.
1802 if (__this_cpu_read(user_stack_count))
1805 __this_cpu_inc(user_stack_count);
1807 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
1808 sizeof(*entry), flags, pc);
1810 goto out_drop_count;
1811 entry = ring_buffer_event_data(event);
1813 entry->tgid = current->tgid;
1814 memset(&entry->caller, 0, sizeof(entry->caller));
1816 trace.nr_entries = 0;
1817 trace.max_entries = FTRACE_STACK_ENTRIES;
1819 trace.entries = entry->caller;
1821 save_stack_trace_user(&trace);
1822 if (!filter_check_discard(call, entry, buffer, event))
1823 __buffer_unlock_commit(buffer, event);
1826 __this_cpu_dec(user_stack_count);
1832 static void __trace_userstack(struct trace_array *tr, unsigned long flags)
1834 ftrace_trace_userstack(tr, flags, preempt_count());
1838 #endif /* CONFIG_STACKTRACE */
1840 /* created for use with alloc_percpu */
1841 struct trace_buffer_struct {
1842 char buffer[TRACE_BUF_SIZE];
1845 static struct trace_buffer_struct *trace_percpu_buffer;
1846 static struct trace_buffer_struct *trace_percpu_sirq_buffer;
1847 static struct trace_buffer_struct *trace_percpu_irq_buffer;
1848 static struct trace_buffer_struct *trace_percpu_nmi_buffer;
1851 * The buffer used is dependent on the context. There is a per cpu
1852 * buffer for normal context, softirq contex, hard irq context and
1853 * for NMI context. Thise allows for lockless recording.
1855 * Note, if the buffers failed to be allocated, then this returns NULL
1857 static char *get_trace_buf(void)
1859 struct trace_buffer_struct *percpu_buffer;
1862 * If we have allocated per cpu buffers, then we do not
1863 * need to do any locking.
1866 percpu_buffer = trace_percpu_nmi_buffer;
1868 percpu_buffer = trace_percpu_irq_buffer;
1869 else if (in_softirq())
1870 percpu_buffer = trace_percpu_sirq_buffer;
1872 percpu_buffer = trace_percpu_buffer;
1877 return this_cpu_ptr(&percpu_buffer->buffer[0]);
1880 static int alloc_percpu_trace_buffer(void)
1882 struct trace_buffer_struct *buffers;
1883 struct trace_buffer_struct *sirq_buffers;
1884 struct trace_buffer_struct *irq_buffers;
1885 struct trace_buffer_struct *nmi_buffers;
1887 buffers = alloc_percpu(struct trace_buffer_struct);
1891 sirq_buffers = alloc_percpu(struct trace_buffer_struct);
1895 irq_buffers = alloc_percpu(struct trace_buffer_struct);
1899 nmi_buffers = alloc_percpu(struct trace_buffer_struct);
1903 trace_percpu_buffer = buffers;
1904 trace_percpu_sirq_buffer = sirq_buffers;
1905 trace_percpu_irq_buffer = irq_buffers;
1906 trace_percpu_nmi_buffer = nmi_buffers;
1911 free_percpu(irq_buffers);
1913 free_percpu(sirq_buffers);
1915 free_percpu(buffers);
1917 WARN(1, "Could not allocate percpu trace_printk buffer");
1921 static int buffers_allocated;
1923 void trace_printk_init_buffers(void)
1925 if (buffers_allocated)
1928 if (alloc_percpu_trace_buffer())
1931 pr_info("ftrace: Allocated trace_printk buffers\n");
1933 /* Expand the buffers to set size */
1934 tracing_update_buffers();
1936 buffers_allocated = 1;
1939 * trace_printk_init_buffers() can be called by modules.
1940 * If that happens, then we need to start cmdline recording
1941 * directly here. If the global_trace.buffer is already
1942 * allocated here, then this was called by module code.
1944 if (global_trace.trace_buffer.buffer)
1945 tracing_start_cmdline_record();
1948 void trace_printk_start_comm(void)
1950 /* Start tracing comms if trace printk is set */
1951 if (!buffers_allocated)
1953 tracing_start_cmdline_record();
1956 static void trace_printk_start_stop_comm(int enabled)
1958 if (!buffers_allocated)
1962 tracing_start_cmdline_record();
1964 tracing_stop_cmdline_record();
1968 * trace_vbprintk - write binary msg to tracing buffer
1971 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
1973 struct ftrace_event_call *call = &event_bprint;
1974 struct ring_buffer_event *event;
1975 struct ring_buffer *buffer;
1976 struct trace_array *tr = &global_trace;
1977 struct bprint_entry *entry;
1978 unsigned long flags;
1980 int len = 0, size, pc;
1982 if (unlikely(tracing_selftest_running || tracing_disabled))
1985 /* Don't pollute graph traces with trace_vprintk internals */
1986 pause_graph_tracing();
1988 pc = preempt_count();
1989 preempt_disable_notrace();
1991 tbuffer = get_trace_buf();
1997 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
1999 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
2002 local_save_flags(flags);
2003 size = sizeof(*entry) + sizeof(u32) * len;
2004 buffer = tr->trace_buffer.buffer;
2005 event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2009 entry = ring_buffer_event_data(event);
2013 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
2014 if (!filter_check_discard(call, entry, buffer, event)) {
2015 __buffer_unlock_commit(buffer, event);
2016 ftrace_trace_stack(buffer, flags, 6, pc);
2020 preempt_enable_notrace();
2021 unpause_graph_tracing();
2025 EXPORT_SYMBOL_GPL(trace_vbprintk);
2028 __trace_array_vprintk(struct ring_buffer *buffer,
2029 unsigned long ip, const char *fmt, va_list args)
2031 struct ftrace_event_call *call = &event_print;
2032 struct ring_buffer_event *event;
2033 int len = 0, size, pc;
2034 struct print_entry *entry;
2035 unsigned long flags;
2038 if (tracing_disabled || tracing_selftest_running)
2041 /* Don't pollute graph traces with trace_vprintk internals */
2042 pause_graph_tracing();
2044 pc = preempt_count();
2045 preempt_disable_notrace();
2048 tbuffer = get_trace_buf();
2054 len = vsnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
2055 if (len > TRACE_BUF_SIZE)
2058 local_save_flags(flags);
2059 size = sizeof(*entry) + len + 1;
2060 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
2064 entry = ring_buffer_event_data(event);
2067 memcpy(&entry->buf, tbuffer, len);
2068 entry->buf[len] = '\0';
2069 if (!filter_check_discard(call, entry, buffer, event)) {
2070 __buffer_unlock_commit(buffer, event);
2071 ftrace_trace_stack(buffer, flags, 6, pc);
2074 preempt_enable_notrace();
2075 unpause_graph_tracing();
2080 int trace_array_vprintk(struct trace_array *tr,
2081 unsigned long ip, const char *fmt, va_list args)
2083 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
2086 int trace_array_printk(struct trace_array *tr,
2087 unsigned long ip, const char *fmt, ...)
2092 if (!(trace_flags & TRACE_ITER_PRINTK))
2096 ret = trace_array_vprintk(tr, ip, fmt, ap);
2101 int trace_array_printk_buf(struct ring_buffer *buffer,
2102 unsigned long ip, const char *fmt, ...)
2107 if (!(trace_flags & TRACE_ITER_PRINTK))
2111 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
2116 int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2118 return trace_array_vprintk(&global_trace, ip, fmt, args);
2120 EXPORT_SYMBOL_GPL(trace_vprintk);
2122 static void trace_iterator_increment(struct trace_iterator *iter)
2124 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
2128 ring_buffer_read(buf_iter, NULL);
2131 static struct trace_entry *
2132 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
2133 unsigned long *lost_events)
2135 struct ring_buffer_event *event;
2136 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
2139 event = ring_buffer_iter_peek(buf_iter, ts);
2141 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
2145 iter->ent_size = ring_buffer_event_length(event);
2146 return ring_buffer_event_data(event);
2152 static struct trace_entry *
2153 __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
2154 unsigned long *missing_events, u64 *ent_ts)
2156 struct ring_buffer *buffer = iter->trace_buffer->buffer;
2157 struct trace_entry *ent, *next = NULL;
2158 unsigned long lost_events = 0, next_lost = 0;
2159 int cpu_file = iter->cpu_file;
2160 u64 next_ts = 0, ts;
2166 * If we are in a per_cpu trace file, don't bother by iterating over
2167 * all cpu and peek directly.
2169 if (cpu_file > RING_BUFFER_ALL_CPUS) {
2170 if (ring_buffer_empty_cpu(buffer, cpu_file))
2172 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
2174 *ent_cpu = cpu_file;
2179 for_each_tracing_cpu(cpu) {
2181 if (ring_buffer_empty_cpu(buffer, cpu))
2184 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
2187 * Pick the entry with the smallest timestamp:
2189 if (ent && (!next || ts < next_ts)) {
2193 next_lost = lost_events;
2194 next_size = iter->ent_size;
2198 iter->ent_size = next_size;
2201 *ent_cpu = next_cpu;
2207 *missing_events = next_lost;
2212 /* Find the next real entry, without updating the iterator itself */
2213 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
2214 int *ent_cpu, u64 *ent_ts)
2216 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
2219 /* Find the next real entry, and increment the iterator to the next entry */
2220 void *trace_find_next_entry_inc(struct trace_iterator *iter)
2222 iter->ent = __find_next_entry(iter, &iter->cpu,
2223 &iter->lost_events, &iter->ts);
2226 trace_iterator_increment(iter);
2228 return iter->ent ? iter : NULL;
2231 static void trace_consume(struct trace_iterator *iter)
2233 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
2234 &iter->lost_events);
2237 static void *s_next(struct seq_file *m, void *v, loff_t *pos)
2239 struct trace_iterator *iter = m->private;
2243 WARN_ON_ONCE(iter->leftover);
2247 /* can't go backwards */
2252 ent = trace_find_next_entry_inc(iter);
2256 while (ent && iter->idx < i)
2257 ent = trace_find_next_entry_inc(iter);
2264 void tracing_iter_reset(struct trace_iterator *iter, int cpu)
2266 struct ring_buffer_event *event;
2267 struct ring_buffer_iter *buf_iter;
2268 unsigned long entries = 0;
2271 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
2273 buf_iter = trace_buffer_iter(iter, cpu);
2277 ring_buffer_iter_reset(buf_iter);
2280 * We could have the case with the max latency tracers
2281 * that a reset never took place on a cpu. This is evident
2282 * by the timestamp being before the start of the buffer.
2284 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
2285 if (ts >= iter->trace_buffer->time_start)
2288 ring_buffer_read(buf_iter, NULL);
2291 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
2295 * The current tracer is copied to avoid a global locking
2298 static void *s_start(struct seq_file *m, loff_t *pos)
2300 struct trace_iterator *iter = m->private;
2301 struct trace_array *tr = iter->tr;
2302 int cpu_file = iter->cpu_file;
2308 * copy the tracer to avoid using a global lock all around.
2309 * iter->trace is a copy of current_trace, the pointer to the
2310 * name may be used instead of a strcmp(), as iter->trace->name
2311 * will point to the same string as current_trace->name.
2313 mutex_lock(&trace_types_lock);
2314 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
2315 *iter->trace = *tr->current_trace;
2316 mutex_unlock(&trace_types_lock);
2318 #ifdef CONFIG_TRACER_MAX_TRACE
2319 if (iter->snapshot && iter->trace->use_max_tr)
2320 return ERR_PTR(-EBUSY);
2323 if (!iter->snapshot)
2324 atomic_inc(&trace_record_cmdline_disabled);
2326 if (*pos != iter->pos) {
2331 if (cpu_file == RING_BUFFER_ALL_CPUS) {
2332 for_each_tracing_cpu(cpu)
2333 tracing_iter_reset(iter, cpu);
2335 tracing_iter_reset(iter, cpu_file);
2338 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
2343 * If we overflowed the seq_file before, then we want
2344 * to just reuse the trace_seq buffer again.
2350 p = s_next(m, p, &l);
2354 trace_event_read_lock();
2355 trace_access_lock(cpu_file);
2359 static void s_stop(struct seq_file *m, void *p)
2361 struct trace_iterator *iter = m->private;
2363 #ifdef CONFIG_TRACER_MAX_TRACE
2364 if (iter->snapshot && iter->trace->use_max_tr)
2368 if (!iter->snapshot)
2369 atomic_dec(&trace_record_cmdline_disabled);
2371 trace_access_unlock(iter->cpu_file);
2372 trace_event_read_unlock();
2376 get_total_entries(struct trace_buffer *buf,
2377 unsigned long *total, unsigned long *entries)
2379 unsigned long count;
2385 for_each_tracing_cpu(cpu) {
2386 count = ring_buffer_entries_cpu(buf->buffer, cpu);
2388 * If this buffer has skipped entries, then we hold all
2389 * entries for the trace and we need to ignore the
2390 * ones before the time stamp.
2392 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
2393 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
2394 /* total is the same as the entries */
2398 ring_buffer_overrun_cpu(buf->buffer, cpu);
2403 static void print_lat_help_header(struct seq_file *m)
2405 seq_puts(m, "# _------=> CPU# \n");
2406 seq_puts(m, "# / _-----=> irqs-off \n");
2407 seq_puts(m, "# | / _----=> need-resched \n");
2408 seq_puts(m, "# || / _---=> hardirq/softirq \n");
2409 seq_puts(m, "# ||| / _--=> preempt-depth \n");
2410 seq_puts(m, "# |||| / delay \n");
2411 seq_puts(m, "# cmd pid ||||| time | caller \n");
2412 seq_puts(m, "# \\ / ||||| \\ | / \n");
2415 static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
2417 unsigned long total;
2418 unsigned long entries;
2420 get_total_entries(buf, &total, &entries);
2421 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
2422 entries, total, num_online_cpus());
2426 static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
2428 print_event_info(buf, m);
2429 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n");
2430 seq_puts(m, "# | | | | |\n");
2433 static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
2435 print_event_info(buf, m);
2436 seq_puts(m, "# _-----=> irqs-off\n");
2437 seq_puts(m, "# / _----=> need-resched\n");
2438 seq_puts(m, "# | / _---=> hardirq/softirq\n");
2439 seq_puts(m, "# || / _--=> preempt-depth\n");
2440 seq_puts(m, "# ||| / delay\n");
2441 seq_puts(m, "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n");
2442 seq_puts(m, "# | | | |||| | |\n");
2446 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
2448 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
2449 struct trace_buffer *buf = iter->trace_buffer;
2450 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
2451 struct tracer *type = iter->trace;
2452 unsigned long entries;
2453 unsigned long total;
2454 const char *name = "preemption";
2458 get_total_entries(buf, &total, &entries);
2460 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
2462 seq_puts(m, "# -----------------------------------"
2463 "---------------------------------\n");
2464 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
2465 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
2466 nsecs_to_usecs(data->saved_latency),
2470 #if defined(CONFIG_PREEMPT_NONE)
2472 #elif defined(CONFIG_PREEMPT_VOLUNTARY)
2474 #elif defined(CONFIG_PREEMPT)
2479 /* These are reserved for later use */
2482 seq_printf(m, " #P:%d)\n", num_online_cpus());
2486 seq_puts(m, "# -----------------\n");
2487 seq_printf(m, "# | task: %.16s-%d "
2488 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
2489 data->comm, data->pid,
2490 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
2491 data->policy, data->rt_priority);
2492 seq_puts(m, "# -----------------\n");
2494 if (data->critical_start) {
2495 seq_puts(m, "# => started at: ");
2496 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
2497 trace_print_seq(m, &iter->seq);
2498 seq_puts(m, "\n# => ended at: ");
2499 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
2500 trace_print_seq(m, &iter->seq);
2501 seq_puts(m, "\n#\n");
2507 static void test_cpu_buff_start(struct trace_iterator *iter)
2509 struct trace_seq *s = &iter->seq;
2511 if (!(trace_flags & TRACE_ITER_ANNOTATE))
2514 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
2517 if (cpumask_test_cpu(iter->cpu, iter->started))
2520 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
2523 cpumask_set_cpu(iter->cpu, iter->started);
2525 /* Don't print started cpu buffer for the first entry of the trace */
2527 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
2531 static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
2533 struct trace_seq *s = &iter->seq;
2534 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
2535 struct trace_entry *entry;
2536 struct trace_event *event;
2540 test_cpu_buff_start(iter);
2542 event = ftrace_find_event(entry->type);
2544 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2545 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2546 if (!trace_print_lat_context(iter))
2549 if (!trace_print_context(iter))
2555 return event->funcs->trace(iter, sym_flags, event);
2557 if (!trace_seq_printf(s, "Unknown type %d\n", entry->type))
2560 return TRACE_TYPE_HANDLED;
2562 return TRACE_TYPE_PARTIAL_LINE;
2565 static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
2567 struct trace_seq *s = &iter->seq;
2568 struct trace_entry *entry;
2569 struct trace_event *event;
2573 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2574 if (!trace_seq_printf(s, "%d %d %llu ",
2575 entry->pid, iter->cpu, iter->ts))
2579 event = ftrace_find_event(entry->type);
2581 return event->funcs->raw(iter, 0, event);
2583 if (!trace_seq_printf(s, "%d ?\n", entry->type))
2586 return TRACE_TYPE_HANDLED;
2588 return TRACE_TYPE_PARTIAL_LINE;
2591 static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
2593 struct trace_seq *s = &iter->seq;
2594 unsigned char newline = '\n';
2595 struct trace_entry *entry;
2596 struct trace_event *event;
2600 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2601 SEQ_PUT_HEX_FIELD_RET(s, entry->pid);
2602 SEQ_PUT_HEX_FIELD_RET(s, iter->cpu);
2603 SEQ_PUT_HEX_FIELD_RET(s, iter->ts);
2606 event = ftrace_find_event(entry->type);
2608 enum print_line_t ret = event->funcs->hex(iter, 0, event);
2609 if (ret != TRACE_TYPE_HANDLED)
2613 SEQ_PUT_FIELD_RET(s, newline);
2615 return TRACE_TYPE_HANDLED;
2618 static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
2620 struct trace_seq *s = &iter->seq;
2621 struct trace_entry *entry;
2622 struct trace_event *event;
2626 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2627 SEQ_PUT_FIELD_RET(s, entry->pid);
2628 SEQ_PUT_FIELD_RET(s, iter->cpu);
2629 SEQ_PUT_FIELD_RET(s, iter->ts);
2632 event = ftrace_find_event(entry->type);
2633 return event ? event->funcs->binary(iter, 0, event) :
2637 int trace_empty(struct trace_iterator *iter)
2639 struct ring_buffer_iter *buf_iter;
2642 /* If we are looking at one CPU buffer, only check that one */
2643 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
2644 cpu = iter->cpu_file;
2645 buf_iter = trace_buffer_iter(iter, cpu);
2647 if (!ring_buffer_iter_empty(buf_iter))
2650 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
2656 for_each_tracing_cpu(cpu) {
2657 buf_iter = trace_buffer_iter(iter, cpu);
2659 if (!ring_buffer_iter_empty(buf_iter))
2662 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
2670 /* Called with trace_event_read_lock() held. */
2671 enum print_line_t print_trace_line(struct trace_iterator *iter)
2673 enum print_line_t ret;
2675 if (iter->lost_events &&
2676 !trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
2677 iter->cpu, iter->lost_events))
2678 return TRACE_TYPE_PARTIAL_LINE;
2680 if (iter->trace && iter->trace->print_line) {
2681 ret = iter->trace->print_line(iter);
2682 if (ret != TRACE_TYPE_UNHANDLED)
2686 if (iter->ent->type == TRACE_BPUTS &&
2687 trace_flags & TRACE_ITER_PRINTK &&
2688 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2689 return trace_print_bputs_msg_only(iter);
2691 if (iter->ent->type == TRACE_BPRINT &&
2692 trace_flags & TRACE_ITER_PRINTK &&
2693 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2694 return trace_print_bprintk_msg_only(iter);
2696 if (iter->ent->type == TRACE_PRINT &&
2697 trace_flags & TRACE_ITER_PRINTK &&
2698 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2699 return trace_print_printk_msg_only(iter);
2701 if (trace_flags & TRACE_ITER_BIN)
2702 return print_bin_fmt(iter);
2704 if (trace_flags & TRACE_ITER_HEX)
2705 return print_hex_fmt(iter);
2707 if (trace_flags & TRACE_ITER_RAW)
2708 return print_raw_fmt(iter);
2710 return print_trace_fmt(iter);
2713 void trace_latency_header(struct seq_file *m)
2715 struct trace_iterator *iter = m->private;
2717 /* print nothing if the buffers are empty */
2718 if (trace_empty(iter))
2721 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2722 print_trace_header(m, iter);
2724 if (!(trace_flags & TRACE_ITER_VERBOSE))
2725 print_lat_help_header(m);
2728 void trace_default_header(struct seq_file *m)
2730 struct trace_iterator *iter = m->private;
2732 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
2735 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2736 /* print nothing if the buffers are empty */
2737 if (trace_empty(iter))
2739 print_trace_header(m, iter);
2740 if (!(trace_flags & TRACE_ITER_VERBOSE))
2741 print_lat_help_header(m);
2743 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
2744 if (trace_flags & TRACE_ITER_IRQ_INFO)
2745 print_func_help_header_irq(iter->trace_buffer, m);
2747 print_func_help_header(iter->trace_buffer, m);
2752 static void test_ftrace_alive(struct seq_file *m)
2754 if (!ftrace_is_dead())
2756 seq_printf(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n");
2757 seq_printf(m, "# MAY BE MISSING FUNCTION EVENTS\n");
2760 #ifdef CONFIG_TRACER_MAX_TRACE
2761 static void show_snapshot_main_help(struct seq_file *m)
2763 seq_printf(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n");
2764 seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n");
2765 seq_printf(m, "# Takes a snapshot of the main buffer.\n");
2766 seq_printf(m, "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate)\n");
2767 seq_printf(m, "# (Doesn't have to be '2' works with any number that\n");
2768 seq_printf(m, "# is not a '0' or '1')\n");
2771 static void show_snapshot_percpu_help(struct seq_file *m)
2773 seq_printf(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
2774 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
2775 seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n");
2776 seq_printf(m, "# Takes a snapshot of the main buffer for this cpu.\n");
2778 seq_printf(m, "# echo 1 > snapshot : Not supported with this kernel.\n");
2779 seq_printf(m, "# Must use main snapshot file to allocate.\n");
2781 seq_printf(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n");
2782 seq_printf(m, "# (Doesn't have to be '2' works with any number that\n");
2783 seq_printf(m, "# is not a '0' or '1')\n");
2786 static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
2788 if (iter->tr->allocated_snapshot)
2789 seq_printf(m, "#\n# * Snapshot is allocated *\n#\n");
2791 seq_printf(m, "#\n# * Snapshot is freed *\n#\n");
2793 seq_printf(m, "# Snapshot commands:\n");
2794 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
2795 show_snapshot_main_help(m);
2797 show_snapshot_percpu_help(m);
2800 /* Should never be called */
2801 static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
2804 static int s_show(struct seq_file *m, void *v)
2806 struct trace_iterator *iter = v;
2809 if (iter->ent == NULL) {
2811 seq_printf(m, "# tracer: %s\n", iter->trace->name);
2813 test_ftrace_alive(m);
2815 if (iter->snapshot && trace_empty(iter))
2816 print_snapshot_help(m, iter);
2817 else if (iter->trace && iter->trace->print_header)
2818 iter->trace->print_header(m);
2820 trace_default_header(m);
2822 } else if (iter->leftover) {
2824 * If we filled the seq_file buffer earlier, we
2825 * want to just show it now.
2827 ret = trace_print_seq(m, &iter->seq);
2829 /* ret should this time be zero, but you never know */
2830 iter->leftover = ret;
2833 print_trace_line(iter);
2834 ret = trace_print_seq(m, &iter->seq);
2836 * If we overflow the seq_file buffer, then it will
2837 * ask us for this data again at start up.
2839 * ret is 0 if seq_file write succeeded.
2842 iter->leftover = ret;
2848 static const struct seq_operations tracer_seq_ops = {
2855 static struct trace_iterator *
2856 __tracing_open(struct trace_array *tr, struct trace_cpu *tc,
2857 struct inode *inode, struct file *file, bool snapshot)
2859 struct trace_iterator *iter;
2862 if (tracing_disabled)
2863 return ERR_PTR(-ENODEV);
2865 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
2867 return ERR_PTR(-ENOMEM);
2869 iter->buffer_iter = kzalloc(sizeof(*iter->buffer_iter) * num_possible_cpus(),
2871 if (!iter->buffer_iter)
2875 * We make a copy of the current tracer to avoid concurrent
2876 * changes on it while we are reading.
2878 mutex_lock(&trace_types_lock);
2879 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
2883 *iter->trace = *tr->current_trace;
2885 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
2890 #ifdef CONFIG_TRACER_MAX_TRACE
2891 /* Currently only the top directory has a snapshot */
2892 if (tr->current_trace->print_max || snapshot)
2893 iter->trace_buffer = &tr->max_buffer;
2896 iter->trace_buffer = &tr->trace_buffer;
2897 iter->snapshot = snapshot;
2899 mutex_init(&iter->mutex);
2900 iter->cpu_file = tc->cpu;
2902 /* Notify the tracer early; before we stop tracing. */
2903 if (iter->trace && iter->trace->open)
2904 iter->trace->open(iter);
2906 /* Annotate start of buffers if we had overruns */
2907 if (ring_buffer_overruns(iter->trace_buffer->buffer))
2908 iter->iter_flags |= TRACE_FILE_ANNOTATE;
2910 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
2911 if (trace_clocks[trace_clock_id].in_ns)
2912 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
2914 /* stop the trace while dumping if we are not opening "snapshot" */
2915 if (!iter->snapshot)
2916 tracing_stop_tr(tr);
2918 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
2919 for_each_tracing_cpu(cpu) {
2920 iter->buffer_iter[cpu] =
2921 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
2923 ring_buffer_read_prepare_sync();
2924 for_each_tracing_cpu(cpu) {
2925 ring_buffer_read_start(iter->buffer_iter[cpu]);
2926 tracing_iter_reset(iter, cpu);
2929 cpu = iter->cpu_file;
2930 iter->buffer_iter[cpu] =
2931 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
2932 ring_buffer_read_prepare_sync();
2933 ring_buffer_read_start(iter->buffer_iter[cpu]);
2934 tracing_iter_reset(iter, cpu);
2937 mutex_unlock(&trace_types_lock);
2942 mutex_unlock(&trace_types_lock);
2944 kfree(iter->buffer_iter);
2946 seq_release_private(inode, file);
2947 return ERR_PTR(-ENOMEM);
2950 int tracing_open_generic(struct inode *inode, struct file *filp)
2952 if (tracing_disabled)
2955 filp->private_data = inode->i_private;
2960 * Open and update trace_array ref count.
2961 * Must have the current trace_array passed to it.
2963 int tracing_open_generic_tr(struct inode *inode, struct file *filp)
2965 struct trace_array *tr = inode->i_private;
2967 if (tracing_disabled)
2970 if (trace_array_get(tr) < 0)
2973 filp->private_data = inode->i_private;
2979 int tracing_open_generic_tc(struct inode *inode, struct file *filp)
2981 struct trace_cpu *tc = inode->i_private;
2982 struct trace_array *tr = tc->tr;
2984 if (tracing_disabled)
2987 if (trace_array_get(tr) < 0)
2990 filp->private_data = inode->i_private;
2996 static int tracing_release(struct inode *inode, struct file *file)
2998 struct seq_file *m = file->private_data;
2999 struct trace_iterator *iter;
3000 struct trace_array *tr;
3003 /* Writes do not use seq_file, need to grab tr from inode */
3004 if (!(file->f_mode & FMODE_READ)) {
3005 struct trace_cpu *tc = inode->i_private;
3007 trace_array_put(tc->tr);
3013 trace_array_put(tr);
3015 mutex_lock(&trace_types_lock);
3017 for_each_tracing_cpu(cpu) {
3018 if (iter->buffer_iter[cpu])
3019 ring_buffer_read_finish(iter->buffer_iter[cpu]);
3022 if (iter->trace && iter->trace->close)
3023 iter->trace->close(iter);
3025 if (!iter->snapshot)
3026 /* reenable tracing if it was previously enabled */
3027 tracing_start_tr(tr);
3028 mutex_unlock(&trace_types_lock);
3030 mutex_destroy(&iter->mutex);
3031 free_cpumask_var(iter->started);
3033 kfree(iter->buffer_iter);
3034 seq_release_private(inode, file);
3039 static int tracing_release_generic_tr(struct inode *inode, struct file *file)
3041 struct trace_array *tr = inode->i_private;
3043 trace_array_put(tr);
3047 static int tracing_release_generic_tc(struct inode *inode, struct file *file)
3049 struct trace_cpu *tc = inode->i_private;
3050 struct trace_array *tr = tc->tr;
3052 trace_array_put(tr);
3056 static int tracing_single_release_tr(struct inode *inode, struct file *file)
3058 struct trace_array *tr = inode->i_private;
3060 trace_array_put(tr);
3062 return single_release(inode, file);
3065 static int tracing_open(struct inode *inode, struct file *file)
3067 struct trace_cpu *tc = inode->i_private;
3068 struct trace_array *tr = tc->tr;
3069 struct trace_iterator *iter;
3072 if (trace_array_get(tr) < 0)
3075 /* If this file was open for write, then erase contents */
3076 if ((file->f_mode & FMODE_WRITE) &&
3077 (file->f_flags & O_TRUNC)) {
3078 if (tc->cpu == RING_BUFFER_ALL_CPUS)
3079 tracing_reset_online_cpus(&tr->trace_buffer);
3081 tracing_reset(&tr->trace_buffer, tc->cpu);
3084 if (file->f_mode & FMODE_READ) {
3085 iter = __tracing_open(tr, tc, inode, file, false);
3087 ret = PTR_ERR(iter);
3088 else if (trace_flags & TRACE_ITER_LATENCY_FMT)
3089 iter->iter_flags |= TRACE_FILE_LAT_FMT;
3093 trace_array_put(tr);
3099 t_next(struct seq_file *m, void *v, loff_t *pos)
3101 struct tracer *t = v;
3111 static void *t_start(struct seq_file *m, loff_t *pos)
3116 mutex_lock(&trace_types_lock);
3117 for (t = trace_types; t && l < *pos; t = t_next(m, t, &l))
3123 static void t_stop(struct seq_file *m, void *p)
3125 mutex_unlock(&trace_types_lock);
3128 static int t_show(struct seq_file *m, void *v)
3130 struct tracer *t = v;
3135 seq_printf(m, "%s", t->name);
3144 static const struct seq_operations show_traces_seq_ops = {
3151 static int show_traces_open(struct inode *inode, struct file *file)
3153 if (tracing_disabled)
3156 return seq_open(file, &show_traces_seq_ops);
3160 tracing_write_stub(struct file *filp, const char __user *ubuf,
3161 size_t count, loff_t *ppos)
3166 static loff_t tracing_seek(struct file *file, loff_t offset, int origin)
3168 if (file->f_mode & FMODE_READ)
3169 return seq_lseek(file, offset, origin);
3174 static const struct file_operations tracing_fops = {
3175 .open = tracing_open,
3177 .write = tracing_write_stub,
3178 .llseek = tracing_seek,
3179 .release = tracing_release,
3182 static const struct file_operations show_traces_fops = {
3183 .open = show_traces_open,
3185 .release = seq_release,
3186 .llseek = seq_lseek,
3190 * Only trace on a CPU if the bitmask is set:
3192 static cpumask_var_t tracing_cpumask;
3195 * The tracer itself will not take this lock, but still we want
3196 * to provide a consistent cpumask to user-space:
3198 static DEFINE_MUTEX(tracing_cpumask_update_lock);
3201 * Temporary storage for the character representation of the
3202 * CPU bitmask (and one more byte for the newline):
3204 static char mask_str[NR_CPUS + 1];
3207 tracing_cpumask_read(struct file *filp, char __user *ubuf,
3208 size_t count, loff_t *ppos)
3212 mutex_lock(&tracing_cpumask_update_lock);
3214 len = cpumask_scnprintf(mask_str, count, tracing_cpumask);
3215 if (count - len < 2) {
3219 len += sprintf(mask_str + len, "\n");
3220 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
3223 mutex_unlock(&tracing_cpumask_update_lock);
3229 tracing_cpumask_write(struct file *filp, const char __user *ubuf,
3230 size_t count, loff_t *ppos)
3232 struct trace_array *tr = filp->private_data;
3233 cpumask_var_t tracing_cpumask_new;
3236 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
3239 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
3243 mutex_lock(&tracing_cpumask_update_lock);
3245 local_irq_disable();
3246 arch_spin_lock(&ftrace_max_lock);
3247 for_each_tracing_cpu(cpu) {
3249 * Increase/decrease the disabled counter if we are
3250 * about to flip a bit in the cpumask:
3252 if (cpumask_test_cpu(cpu, tracing_cpumask) &&
3253 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
3254 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3255 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
3257 if (!cpumask_test_cpu(cpu, tracing_cpumask) &&
3258 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
3259 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3260 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
3263 arch_spin_unlock(&ftrace_max_lock);
3266 cpumask_copy(tracing_cpumask, tracing_cpumask_new);
3268 mutex_unlock(&tracing_cpumask_update_lock);
3269 free_cpumask_var(tracing_cpumask_new);
3274 free_cpumask_var(tracing_cpumask_new);
3279 static const struct file_operations tracing_cpumask_fops = {
3280 .open = tracing_open_generic,
3281 .read = tracing_cpumask_read,
3282 .write = tracing_cpumask_write,
3283 .llseek = generic_file_llseek,
3286 static int tracing_trace_options_show(struct seq_file *m, void *v)
3288 struct tracer_opt *trace_opts;
3289 struct trace_array *tr = m->private;
3293 mutex_lock(&trace_types_lock);
3294 tracer_flags = tr->current_trace->flags->val;
3295 trace_opts = tr->current_trace->flags->opts;
3297 for (i = 0; trace_options[i]; i++) {
3298 if (trace_flags & (1 << i))
3299 seq_printf(m, "%s\n", trace_options[i]);
3301 seq_printf(m, "no%s\n", trace_options[i]);
3304 for (i = 0; trace_opts[i].name; i++) {
3305 if (tracer_flags & trace_opts[i].bit)
3306 seq_printf(m, "%s\n", trace_opts[i].name);
3308 seq_printf(m, "no%s\n", trace_opts[i].name);
3310 mutex_unlock(&trace_types_lock);
3315 static int __set_tracer_option(struct tracer *trace,
3316 struct tracer_flags *tracer_flags,
3317 struct tracer_opt *opts, int neg)
3321 ret = trace->set_flag(tracer_flags->val, opts->bit, !neg);
3326 tracer_flags->val &= ~opts->bit;
3328 tracer_flags->val |= opts->bit;
3332 /* Try to assign a tracer specific option */
3333 static int set_tracer_option(struct tracer *trace, char *cmp, int neg)
3335 struct tracer_flags *tracer_flags = trace->flags;
3336 struct tracer_opt *opts = NULL;
3339 for (i = 0; tracer_flags->opts[i].name; i++) {
3340 opts = &tracer_flags->opts[i];
3342 if (strcmp(cmp, opts->name) == 0)
3343 return __set_tracer_option(trace, trace->flags,
3350 /* Some tracers require overwrite to stay enabled */
3351 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
3353 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
3359 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
3361 /* do nothing if flag is already set */
3362 if (!!(trace_flags & mask) == !!enabled)
3365 /* Give the tracer a chance to approve the change */
3366 if (tr->current_trace->flag_changed)
3367 if (tr->current_trace->flag_changed(tr->current_trace, mask, !!enabled))
3371 trace_flags |= mask;
3373 trace_flags &= ~mask;
3375 if (mask == TRACE_ITER_RECORD_CMD)
3376 trace_event_enable_cmd_record(enabled);
3378 if (mask == TRACE_ITER_OVERWRITE) {
3379 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
3380 #ifdef CONFIG_TRACER_MAX_TRACE
3381 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
3385 if (mask == TRACE_ITER_PRINTK)
3386 trace_printk_start_stop_comm(enabled);
3391 static int trace_set_options(struct trace_array *tr, char *option)
3398 cmp = strstrip(option);
3400 if (strncmp(cmp, "no", 2) == 0) {
3405 mutex_lock(&trace_types_lock);
3407 for (i = 0; trace_options[i]; i++) {
3408 if (strcmp(cmp, trace_options[i]) == 0) {
3409 ret = set_tracer_flag(tr, 1 << i, !neg);
3414 /* If no option could be set, test the specific tracer options */
3415 if (!trace_options[i])
3416 ret = set_tracer_option(tr->current_trace, cmp, neg);
3418 mutex_unlock(&trace_types_lock);
3424 tracing_trace_options_write(struct file *filp, const char __user *ubuf,
3425 size_t cnt, loff_t *ppos)
3427 struct seq_file *m = filp->private_data;
3428 struct trace_array *tr = m->private;
3432 if (cnt >= sizeof(buf))
3435 if (copy_from_user(&buf, ubuf, cnt))
3440 ret = trace_set_options(tr, buf);
3449 static int tracing_trace_options_open(struct inode *inode, struct file *file)
3451 struct trace_array *tr = inode->i_private;
3453 if (tracing_disabled)
3456 if (trace_array_get(tr) < 0)
3459 return single_open(file, tracing_trace_options_show, inode->i_private);
3462 static const struct file_operations tracing_iter_fops = {
3463 .open = tracing_trace_options_open,
3465 .llseek = seq_lseek,
3466 .release = tracing_single_release_tr,
3467 .write = tracing_trace_options_write,
3470 static const char readme_msg[] =
3471 "tracing mini-HOWTO:\n\n"
3472 "# echo 0 > tracing_on : quick way to disable tracing\n"
3473 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
3474 " Important files:\n"
3475 " trace\t\t\t- The static contents of the buffer\n"
3476 "\t\t\t To clear the buffer write into this file: echo > trace\n"
3477 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
3478 " current_tracer\t- function and latency tracers\n"
3479 " available_tracers\t- list of configured tracers for current_tracer\n"
3480 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
3481 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
3482 " trace_clock\t\t-change the clock used to order events\n"
3483 " local: Per cpu clock but may not be synced across CPUs\n"
3484 " global: Synced across CPUs but slows tracing down.\n"
3485 " counter: Not a clock, but just an increment\n"
3486 " uptime: Jiffy counter from time of boot\n"
3487 " perf: Same clock that perf events use\n"
3488 #ifdef CONFIG_X86_64
3489 " x86-tsc: TSC cycle counter\n"
3491 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
3492 " tracing_cpumask\t- Limit which CPUs to trace\n"
3493 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
3494 "\t\t\t Remove sub-buffer with rmdir\n"
3495 " trace_options\t\t- Set format or modify how tracing happens\n"
3496 "\t\t\t Disable an option by adding a suffix 'no' to the option name\n"
3497 #ifdef CONFIG_DYNAMIC_FTRACE
3498 "\n available_filter_functions - list of functions that can be filtered on\n"
3499 " set_ftrace_filter\t- echo function name in here to only trace these functions\n"
3500 " accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3501 " modules: Can select a group via module\n"
3502 " Format: :mod:<module-name>\n"
3503 " example: echo :mod:ext3 > set_ftrace_filter\n"
3504 " triggers: a command to perform when function is hit\n"
3505 " Format: <function>:<trigger>[:count]\n"
3506 " trigger: traceon, traceoff\n"
3507 " enable_event:<system>:<event>\n"
3508 " disable_event:<system>:<event>\n"
3509 #ifdef CONFIG_STACKTRACE
3512 #ifdef CONFIG_TRACER_SNAPSHOT
3515 " example: echo do_fault:traceoff > set_ftrace_filter\n"
3516 " echo do_trap:traceoff:3 > set_ftrace_filter\n"
3517 " The first one will disable tracing every time do_fault is hit\n"
3518 " The second will disable tracing at most 3 times when do_trap is hit\n"
3519 " The first time do trap is hit and it disables tracing, the counter\n"
3520 " will decrement to 2. If tracing is already disabled, the counter\n"
3521 " will not decrement. It only decrements when the trigger did work\n"
3522 " To remove trigger without count:\n"
3523 " echo '!<function>:<trigger> > set_ftrace_filter\n"
3524 " To remove trigger with a count:\n"
3525 " echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
3526 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
3527 " accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3528 " modules: Can select a group via module command :mod:\n"
3529 " Does not accept triggers\n"
3530 #endif /* CONFIG_DYNAMIC_FTRACE */
3531 #ifdef CONFIG_FUNCTION_TRACER
3532 " set_ftrace_pid\t- Write pid(s) to only function trace those pids (function)\n"
3534 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3535 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
3536 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
3538 #ifdef CONFIG_TRACER_SNAPSHOT
3539 "\n snapshot\t\t- Like 'trace' but shows the content of the static snapshot buffer\n"
3540 "\t\t\t Read the contents for more information\n"
3542 #ifdef CONFIG_STACKTRACE
3543 " stack_trace\t\t- Shows the max stack trace when active\n"
3544 " stack_max_size\t- Shows current max stack size that was traced\n"
3545 "\t\t\t Write into this file to reset the max size (trigger a new trace)\n"
3546 #ifdef CONFIG_DYNAMIC_FTRACE
3547 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace traces\n"
3549 #endif /* CONFIG_STACKTRACE */
3553 tracing_readme_read(struct file *filp, char __user *ubuf,
3554 size_t cnt, loff_t *ppos)
3556 return simple_read_from_buffer(ubuf, cnt, ppos,
3557 readme_msg, strlen(readme_msg));
3560 static const struct file_operations tracing_readme_fops = {
3561 .open = tracing_open_generic,
3562 .read = tracing_readme_read,
3563 .llseek = generic_file_llseek,
3567 tracing_saved_cmdlines_read(struct file *file, char __user *ubuf,
3568 size_t cnt, loff_t *ppos)
3577 file_buf = kmalloc(SAVED_CMDLINES*(16+TASK_COMM_LEN), GFP_KERNEL);
3581 buf_comm = kmalloc(TASK_COMM_LEN, GFP_KERNEL);
3589 for (i = 0; i < SAVED_CMDLINES; i++) {
3592 pid = map_cmdline_to_pid[i];
3593 if (pid == -1 || pid == NO_CMDLINE_MAP)
3596 trace_find_cmdline(pid, buf_comm);
3597 r = sprintf(buf, "%d %s\n", pid, buf_comm);
3602 len = simple_read_from_buffer(ubuf, cnt, ppos,
3611 static const struct file_operations tracing_saved_cmdlines_fops = {
3612 .open = tracing_open_generic,
3613 .read = tracing_saved_cmdlines_read,
3614 .llseek = generic_file_llseek,
3618 tracing_set_trace_read(struct file *filp, char __user *ubuf,
3619 size_t cnt, loff_t *ppos)
3621 struct trace_array *tr = filp->private_data;
3622 char buf[MAX_TRACER_SIZE+2];
3625 mutex_lock(&trace_types_lock);
3626 r = sprintf(buf, "%s\n", tr->current_trace->name);
3627 mutex_unlock(&trace_types_lock);
3629 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3632 int tracer_init(struct tracer *t, struct trace_array *tr)
3634 tracing_reset_online_cpus(&tr->trace_buffer);
3638 static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
3642 for_each_tracing_cpu(cpu)
3643 per_cpu_ptr(buf->data, cpu)->entries = val;
3646 #ifdef CONFIG_TRACER_MAX_TRACE
3647 /* resize @tr's buffer to the size of @size_tr's entries */
3648 static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
3649 struct trace_buffer *size_buf, int cpu_id)
3653 if (cpu_id == RING_BUFFER_ALL_CPUS) {
3654 for_each_tracing_cpu(cpu) {
3655 ret = ring_buffer_resize(trace_buf->buffer,
3656 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
3659 per_cpu_ptr(trace_buf->data, cpu)->entries =
3660 per_cpu_ptr(size_buf->data, cpu)->entries;
3663 ret = ring_buffer_resize(trace_buf->buffer,
3664 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
3666 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
3667 per_cpu_ptr(size_buf->data, cpu_id)->entries;
3672 #endif /* CONFIG_TRACER_MAX_TRACE */
3674 static int __tracing_resize_ring_buffer(struct trace_array *tr,
3675 unsigned long size, int cpu)
3680 * If kernel or user changes the size of the ring buffer
3681 * we use the size that was given, and we can forget about
3682 * expanding it later.
3684 ring_buffer_expanded = true;
3686 /* May be called before buffers are initialized */
3687 if (!tr->trace_buffer.buffer)
3690 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
3694 #ifdef CONFIG_TRACER_MAX_TRACE
3695 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
3696 !tr->current_trace->use_max_tr)
3699 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
3701 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
3702 &tr->trace_buffer, cpu);
3705 * AARGH! We are left with different
3706 * size max buffer!!!!
3707 * The max buffer is our "snapshot" buffer.
3708 * When a tracer needs a snapshot (one of the
3709 * latency tracers), it swaps the max buffer
3710 * with the saved snap shot. We succeeded to
3711 * update the size of the main buffer, but failed to
3712 * update the size of the max buffer. But when we tried
3713 * to reset the main buffer to the original size, we
3714 * failed there too. This is very unlikely to
3715 * happen, but if it does, warn and kill all
3719 tracing_disabled = 1;
3724 if (cpu == RING_BUFFER_ALL_CPUS)
3725 set_buffer_entries(&tr->max_buffer, size);
3727 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
3730 #endif /* CONFIG_TRACER_MAX_TRACE */
3732 if (cpu == RING_BUFFER_ALL_CPUS)
3733 set_buffer_entries(&tr->trace_buffer, size);
3735 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
3740 static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
3741 unsigned long size, int cpu_id)
3745 mutex_lock(&trace_types_lock);
3747 if (cpu_id != RING_BUFFER_ALL_CPUS) {
3748 /* make sure, this cpu is enabled in the mask */
3749 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
3755 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
3760 mutex_unlock(&trace_types_lock);
3767 * tracing_update_buffers - used by tracing facility to expand ring buffers
3769 * To save on memory when the tracing is never used on a system with it
3770 * configured in. The ring buffers are set to a minimum size. But once
3771 * a user starts to use the tracing facility, then they need to grow
3772 * to their default size.
3774 * This function is to be called when a tracer is about to be used.
3776 int tracing_update_buffers(void)
3780 mutex_lock(&trace_types_lock);
3781 if (!ring_buffer_expanded)
3782 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
3783 RING_BUFFER_ALL_CPUS);
3784 mutex_unlock(&trace_types_lock);
3789 struct trace_option_dentry;
3791 static struct trace_option_dentry *
3792 create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
3795 destroy_trace_option_files(struct trace_option_dentry *topts);
3797 static int tracing_set_tracer(const char *buf)
3799 static struct trace_option_dentry *topts;
3800 struct trace_array *tr = &global_trace;
3802 #ifdef CONFIG_TRACER_MAX_TRACE
3807 mutex_lock(&trace_types_lock);
3809 if (!ring_buffer_expanded) {
3810 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
3811 RING_BUFFER_ALL_CPUS);
3817 for (t = trace_types; t; t = t->next) {
3818 if (strcmp(t->name, buf) == 0)
3825 if (t == tr->current_trace)
3828 trace_branch_disable();
3830 tr->current_trace->enabled = false;
3832 if (tr->current_trace->reset)
3833 tr->current_trace->reset(tr);
3835 /* Current trace needs to be nop_trace before synchronize_sched */
3836 tr->current_trace = &nop_trace;
3838 #ifdef CONFIG_TRACER_MAX_TRACE
3839 had_max_tr = tr->allocated_snapshot;
3841 if (had_max_tr && !t->use_max_tr) {
3843 * We need to make sure that the update_max_tr sees that
3844 * current_trace changed to nop_trace to keep it from
3845 * swapping the buffers after we resize it.
3846 * The update_max_tr is called from interrupts disabled
3847 * so a synchronized_sched() is sufficient.
3849 synchronize_sched();
3853 destroy_trace_option_files(topts);
3855 topts = create_trace_option_files(tr, t);
3857 #ifdef CONFIG_TRACER_MAX_TRACE
3858 if (t->use_max_tr && !had_max_tr) {
3859 ret = alloc_snapshot(tr);
3866 ret = tracer_init(t, tr);
3871 tr->current_trace = t;
3872 tr->current_trace->enabled = true;
3873 trace_branch_enable(tr);
3875 mutex_unlock(&trace_types_lock);
3881 tracing_set_trace_write(struct file *filp, const char __user *ubuf,
3882 size_t cnt, loff_t *ppos)
3884 char buf[MAX_TRACER_SIZE+1];
3891 if (cnt > MAX_TRACER_SIZE)
3892 cnt = MAX_TRACER_SIZE;
3894 if (copy_from_user(&buf, ubuf, cnt))
3899 /* strip ending whitespace. */
3900 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
3903 err = tracing_set_tracer(buf);
3913 tracing_max_lat_read(struct file *filp, char __user *ubuf,
3914 size_t cnt, loff_t *ppos)
3916 unsigned long *ptr = filp->private_data;
3920 r = snprintf(buf, sizeof(buf), "%ld\n",
3921 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
3922 if (r > sizeof(buf))
3924 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3928 tracing_max_lat_write(struct file *filp, const char __user *ubuf,
3929 size_t cnt, loff_t *ppos)
3931 unsigned long *ptr = filp->private_data;
3935 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
3944 static int tracing_open_pipe(struct inode *inode, struct file *filp)
3946 struct trace_cpu *tc = inode->i_private;
3947 struct trace_array *tr = tc->tr;
3948 struct trace_iterator *iter;
3951 if (tracing_disabled)
3954 if (trace_array_get(tr) < 0)
3957 mutex_lock(&trace_types_lock);
3959 /* create a buffer to store the information to pass to userspace */
3960 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
3967 * We make a copy of the current tracer to avoid concurrent
3968 * changes on it while we are reading.
3970 iter->trace = kmalloc(sizeof(*iter->trace), GFP_KERNEL);
3975 *iter->trace = *tr->current_trace;
3977 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
3982 /* trace pipe does not show start of buffer */
3983 cpumask_setall(iter->started);
3985 if (trace_flags & TRACE_ITER_LATENCY_FMT)
3986 iter->iter_flags |= TRACE_FILE_LAT_FMT;
3988 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
3989 if (trace_clocks[trace_clock_id].in_ns)
3990 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3992 iter->cpu_file = tc->cpu;
3994 iter->trace_buffer = &tc->tr->trace_buffer;
3995 mutex_init(&iter->mutex);
3996 filp->private_data = iter;
3998 if (iter->trace->pipe_open)
3999 iter->trace->pipe_open(iter);
4001 nonseekable_open(inode, filp);
4003 mutex_unlock(&trace_types_lock);
4009 __trace_array_put(tr);
4010 mutex_unlock(&trace_types_lock);
4014 static int tracing_release_pipe(struct inode *inode, struct file *file)
4016 struct trace_iterator *iter = file->private_data;
4017 struct trace_cpu *tc = inode->i_private;
4018 struct trace_array *tr = tc->tr;
4020 mutex_lock(&trace_types_lock);
4022 if (iter->trace->pipe_close)
4023 iter->trace->pipe_close(iter);
4025 mutex_unlock(&trace_types_lock);
4027 free_cpumask_var(iter->started);
4028 mutex_destroy(&iter->mutex);
4032 trace_array_put(tr);
4038 trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
4040 /* Iterators are static, they should be filled or empty */
4041 if (trace_buffer_iter(iter, iter->cpu_file))
4042 return POLLIN | POLLRDNORM;
4044 if (trace_flags & TRACE_ITER_BLOCK)
4046 * Always select as readable when in blocking mode
4048 return POLLIN | POLLRDNORM;
4050 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
4055 tracing_poll_pipe(struct file *filp, poll_table *poll_table)
4057 struct trace_iterator *iter = filp->private_data;
4059 return trace_poll(iter, filp, poll_table);
4063 * This is a make-shift waitqueue.
4064 * A tracer might use this callback on some rare cases:
4066 * 1) the current tracer might hold the runqueue lock when it wakes up
4067 * a reader, hence a deadlock (sched, function, and function graph tracers)
4068 * 2) the function tracers, trace all functions, we don't want
4069 * the overhead of calling wake_up and friends
4070 * (and tracing them too)
4072 * Anyway, this is really very primitive wakeup.
4074 void poll_wait_pipe(struct trace_iterator *iter)
4076 set_current_state(TASK_INTERRUPTIBLE);
4077 /* sleep for 100 msecs, and try again. */
4078 schedule_timeout(HZ / 10);
4081 /* Must be called with trace_types_lock mutex held. */
4082 static int tracing_wait_pipe(struct file *filp)
4084 struct trace_iterator *iter = filp->private_data;
4086 while (trace_empty(iter)) {
4088 if ((filp->f_flags & O_NONBLOCK)) {
4092 mutex_unlock(&iter->mutex);
4094 iter->trace->wait_pipe(iter);
4096 mutex_lock(&iter->mutex);
4098 if (signal_pending(current))
4102 * We block until we read something and tracing is disabled.
4103 * We still block if tracing is disabled, but we have never
4104 * read anything. This allows a user to cat this file, and
4105 * then enable tracing. But after we have read something,
4106 * we give an EOF when tracing is again disabled.
4108 * iter->pos will be 0 if we haven't read anything.
4110 if (!tracing_is_on() && iter->pos)
4121 tracing_read_pipe(struct file *filp, char __user *ubuf,
4122 size_t cnt, loff_t *ppos)
4124 struct trace_iterator *iter = filp->private_data;
4125 struct trace_array *tr = iter->tr;
4128 /* return any leftover data */
4129 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4133 trace_seq_init(&iter->seq);
4135 /* copy the tracer to avoid using a global lock all around */
4136 mutex_lock(&trace_types_lock);
4137 if (unlikely(iter->trace->name != tr->current_trace->name))
4138 *iter->trace = *tr->current_trace;
4139 mutex_unlock(&trace_types_lock);
4142 * Avoid more than one consumer on a single file descriptor
4143 * This is just a matter of traces coherency, the ring buffer itself
4146 mutex_lock(&iter->mutex);
4147 if (iter->trace->read) {
4148 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
4154 sret = tracing_wait_pipe(filp);
4158 /* stop when tracing is finished */
4159 if (trace_empty(iter)) {
4164 if (cnt >= PAGE_SIZE)
4165 cnt = PAGE_SIZE - 1;
4167 /* reset all but tr, trace, and overruns */
4168 memset(&iter->seq, 0,
4169 sizeof(struct trace_iterator) -
4170 offsetof(struct trace_iterator, seq));
4173 trace_event_read_lock();
4174 trace_access_lock(iter->cpu_file);
4175 while (trace_find_next_entry_inc(iter) != NULL) {
4176 enum print_line_t ret;
4177 int len = iter->seq.len;
4179 ret = print_trace_line(iter);
4180 if (ret == TRACE_TYPE_PARTIAL_LINE) {
4181 /* don't print partial lines */
4182 iter->seq.len = len;
4185 if (ret != TRACE_TYPE_NO_CONSUME)
4186 trace_consume(iter);
4188 if (iter->seq.len >= cnt)
4192 * Setting the full flag means we reached the trace_seq buffer
4193 * size and we should leave by partial output condition above.
4194 * One of the trace_seq_* functions is not used properly.
4196 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
4199 trace_access_unlock(iter->cpu_file);
4200 trace_event_read_unlock();
4202 /* Now copy what we have to the user */
4203 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4204 if (iter->seq.readpos >= iter->seq.len)
4205 trace_seq_init(&iter->seq);
4208 * If there was nothing to send to user, in spite of consuming trace
4209 * entries, go back to wait for more entries.
4215 mutex_unlock(&iter->mutex);
4220 static void tracing_pipe_buf_release(struct pipe_inode_info *pipe,
4221 struct pipe_buffer *buf)
4223 __free_page(buf->page);
4226 static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
4229 __free_page(spd->pages[idx]);
4232 static const struct pipe_buf_operations tracing_pipe_buf_ops = {
4234 .map = generic_pipe_buf_map,
4235 .unmap = generic_pipe_buf_unmap,
4236 .confirm = generic_pipe_buf_confirm,
4237 .release = tracing_pipe_buf_release,
4238 .steal = generic_pipe_buf_steal,
4239 .get = generic_pipe_buf_get,
4243 tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
4248 /* Seq buffer is page-sized, exactly what we need. */
4250 count = iter->seq.len;
4251 ret = print_trace_line(iter);
4252 count = iter->seq.len - count;
4255 iter->seq.len -= count;
4258 if (ret == TRACE_TYPE_PARTIAL_LINE) {
4259 iter->seq.len -= count;
4263 if (ret != TRACE_TYPE_NO_CONSUME)
4264 trace_consume(iter);
4266 if (!trace_find_next_entry_inc(iter)) {
4276 static ssize_t tracing_splice_read_pipe(struct file *filp,
4278 struct pipe_inode_info *pipe,
4282 struct page *pages_def[PIPE_DEF_BUFFERS];
4283 struct partial_page partial_def[PIPE_DEF_BUFFERS];
4284 struct trace_iterator *iter = filp->private_data;
4285 struct splice_pipe_desc spd = {
4287 .partial = partial_def,
4288 .nr_pages = 0, /* This gets updated below. */
4289 .nr_pages_max = PIPE_DEF_BUFFERS,
4291 .ops = &tracing_pipe_buf_ops,
4292 .spd_release = tracing_spd_release_pipe,
4294 struct trace_array *tr = iter->tr;
4299 if (splice_grow_spd(pipe, &spd))
4302 /* copy the tracer to avoid using a global lock all around */
4303 mutex_lock(&trace_types_lock);
4304 if (unlikely(iter->trace->name != tr->current_trace->name))
4305 *iter->trace = *tr->current_trace;
4306 mutex_unlock(&trace_types_lock);
4308 mutex_lock(&iter->mutex);
4310 if (iter->trace->splice_read) {
4311 ret = iter->trace->splice_read(iter, filp,
4312 ppos, pipe, len, flags);
4317 ret = tracing_wait_pipe(filp);
4321 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
4326 trace_event_read_lock();
4327 trace_access_lock(iter->cpu_file);
4329 /* Fill as many pages as possible. */
4330 for (i = 0, rem = len; i < pipe->buffers && rem; i++) {
4331 spd.pages[i] = alloc_page(GFP_KERNEL);
4335 rem = tracing_fill_pipe_page(rem, iter);
4337 /* Copy the data into the page, so we can start over. */
4338 ret = trace_seq_to_buffer(&iter->seq,
4339 page_address(spd.pages[i]),
4342 __free_page(spd.pages[i]);
4345 spd.partial[i].offset = 0;
4346 spd.partial[i].len = iter->seq.len;
4348 trace_seq_init(&iter->seq);
4351 trace_access_unlock(iter->cpu_file);
4352 trace_event_read_unlock();
4353 mutex_unlock(&iter->mutex);
4357 ret = splice_to_pipe(pipe, &spd);
4359 splice_shrink_spd(&spd);
4363 mutex_unlock(&iter->mutex);
4368 tracing_entries_read(struct file *filp, char __user *ubuf,
4369 size_t cnt, loff_t *ppos)
4371 struct trace_cpu *tc = filp->private_data;
4372 struct trace_array *tr = tc->tr;
4377 mutex_lock(&trace_types_lock);
4379 if (tc->cpu == RING_BUFFER_ALL_CPUS) {
4380 int cpu, buf_size_same;
4385 /* check if all cpu sizes are same */
4386 for_each_tracing_cpu(cpu) {
4387 /* fill in the size from first enabled cpu */
4389 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
4390 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
4396 if (buf_size_same) {
4397 if (!ring_buffer_expanded)
4398 r = sprintf(buf, "%lu (expanded: %lu)\n",
4400 trace_buf_size >> 10);
4402 r = sprintf(buf, "%lu\n", size >> 10);
4404 r = sprintf(buf, "X\n");
4406 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, tc->cpu)->entries >> 10);
4408 mutex_unlock(&trace_types_lock);
4410 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4415 tracing_entries_write(struct file *filp, const char __user *ubuf,
4416 size_t cnt, loff_t *ppos)
4418 struct trace_cpu *tc = filp->private_data;
4422 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4426 /* must have at least 1 entry */
4430 /* value is in KB */
4433 ret = tracing_resize_ring_buffer(tc->tr, val, tc->cpu);
4443 tracing_total_entries_read(struct file *filp, char __user *ubuf,
4444 size_t cnt, loff_t *ppos)
4446 struct trace_array *tr = filp->private_data;
4449 unsigned long size = 0, expanded_size = 0;
4451 mutex_lock(&trace_types_lock);
4452 for_each_tracing_cpu(cpu) {
4453 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
4454 if (!ring_buffer_expanded)
4455 expanded_size += trace_buf_size >> 10;
4457 if (ring_buffer_expanded)
4458 r = sprintf(buf, "%lu\n", size);
4460 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
4461 mutex_unlock(&trace_types_lock);
4463 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4467 tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
4468 size_t cnt, loff_t *ppos)
4471 * There is no need to read what the user has written, this function
4472 * is just to make sure that there is no error when "echo" is used
4481 tracing_free_buffer_release(struct inode *inode, struct file *filp)
4483 struct trace_array *tr = inode->i_private;
4485 /* disable tracing ? */
4486 if (trace_flags & TRACE_ITER_STOP_ON_FREE)
4488 /* resize the ring buffer to 0 */
4489 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
4491 trace_array_put(tr);
4497 tracing_mark_write(struct file *filp, const char __user *ubuf,
4498 size_t cnt, loff_t *fpos)
4500 unsigned long addr = (unsigned long)ubuf;
4501 struct trace_array *tr = filp->private_data;
4502 struct ring_buffer_event *event;
4503 struct ring_buffer *buffer;
4504 struct print_entry *entry;
4505 unsigned long irq_flags;
4506 struct page *pages[2];
4516 if (tracing_disabled)
4519 if (!(trace_flags & TRACE_ITER_MARKERS))
4522 if (cnt > TRACE_BUF_SIZE)
4523 cnt = TRACE_BUF_SIZE;
4526 * Userspace is injecting traces into the kernel trace buffer.
4527 * We want to be as non intrusive as possible.
4528 * To do so, we do not want to allocate any special buffers
4529 * or take any locks, but instead write the userspace data
4530 * straight into the ring buffer.
4532 * First we need to pin the userspace buffer into memory,
4533 * which, most likely it is, because it just referenced it.
4534 * But there's no guarantee that it is. By using get_user_pages_fast()
4535 * and kmap_atomic/kunmap_atomic() we can get access to the
4536 * pages directly. We then write the data directly into the
4539 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
4541 /* check if we cross pages */
4542 if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
4545 offset = addr & (PAGE_SIZE - 1);
4548 ret = get_user_pages_fast(addr, nr_pages, 0, pages);
4549 if (ret < nr_pages) {
4551 put_page(pages[ret]);
4556 for (i = 0; i < nr_pages; i++)
4557 map_page[i] = kmap_atomic(pages[i]);
4559 local_save_flags(irq_flags);
4560 size = sizeof(*entry) + cnt + 2; /* possible \n added */
4561 buffer = tr->trace_buffer.buffer;
4562 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
4563 irq_flags, preempt_count());
4565 /* Ring buffer disabled, return as if not open for write */
4570 entry = ring_buffer_event_data(event);
4571 entry->ip = _THIS_IP_;
4573 if (nr_pages == 2) {
4574 len = PAGE_SIZE - offset;
4575 memcpy(&entry->buf, map_page[0] + offset, len);
4576 memcpy(&entry->buf[len], map_page[1], cnt - len);
4578 memcpy(&entry->buf, map_page[0] + offset, cnt);
4580 if (entry->buf[cnt - 1] != '\n') {
4581 entry->buf[cnt] = '\n';
4582 entry->buf[cnt + 1] = '\0';
4584 entry->buf[cnt] = '\0';
4586 __buffer_unlock_commit(buffer, event);
4593 for (i = 0; i < nr_pages; i++){
4594 kunmap_atomic(map_page[i]);
4601 static int tracing_clock_show(struct seq_file *m, void *v)
4603 struct trace_array *tr = m->private;
4606 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
4608 "%s%s%s%s", i ? " " : "",
4609 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
4610 i == tr->clock_id ? "]" : "");
4616 static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
4617 size_t cnt, loff_t *fpos)
4619 struct seq_file *m = filp->private_data;
4620 struct trace_array *tr = m->private;
4622 const char *clockstr;
4625 if (cnt >= sizeof(buf))
4628 if (copy_from_user(&buf, ubuf, cnt))
4633 clockstr = strstrip(buf);
4635 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
4636 if (strcmp(trace_clocks[i].name, clockstr) == 0)
4639 if (i == ARRAY_SIZE(trace_clocks))
4642 mutex_lock(&trace_types_lock);
4646 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
4649 * New clock may not be consistent with the previous clock.
4650 * Reset the buffer so that it doesn't have incomparable timestamps.
4652 tracing_reset_online_cpus(&global_trace.trace_buffer);
4654 #ifdef CONFIG_TRACER_MAX_TRACE
4655 if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
4656 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
4657 tracing_reset_online_cpus(&global_trace.max_buffer);
4660 mutex_unlock(&trace_types_lock);
4667 static int tracing_clock_open(struct inode *inode, struct file *file)
4669 struct trace_array *tr = inode->i_private;
4672 if (tracing_disabled)
4675 if (trace_array_get(tr))
4678 ret = single_open(file, tracing_clock_show, inode->i_private);
4680 trace_array_put(tr);
4685 struct ftrace_buffer_info {
4686 struct trace_iterator iter;
4691 #ifdef CONFIG_TRACER_SNAPSHOT
4692 static int tracing_snapshot_open(struct inode *inode, struct file *file)
4694 struct trace_cpu *tc = inode->i_private;
4695 struct trace_array *tr = tc->tr;
4696 struct trace_iterator *iter;
4700 if (trace_array_get(tr) < 0)
4703 if (file->f_mode & FMODE_READ) {
4704 iter = __tracing_open(tr, tc, inode, file, true);
4706 ret = PTR_ERR(iter);
4708 /* Writes still need the seq_file to hold the private data */
4709 m = kzalloc(sizeof(*m), GFP_KERNEL);
4712 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
4718 iter->trace_buffer = &tc->tr->max_buffer;
4719 iter->cpu_file = tc->cpu;
4721 file->private_data = m;
4725 trace_array_put(tr);
4731 tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
4734 struct seq_file *m = filp->private_data;
4735 struct trace_iterator *iter = m->private;
4736 struct trace_array *tr = iter->tr;
4740 ret = tracing_update_buffers();
4744 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4748 mutex_lock(&trace_types_lock);
4750 if (tr->current_trace->use_max_tr) {
4757 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
4761 if (tr->allocated_snapshot)
4765 /* Only allow per-cpu swap if the ring buffer supports it */
4766 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
4767 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
4772 if (!tr->allocated_snapshot) {
4773 ret = alloc_snapshot(tr);
4777 local_irq_disable();
4778 /* Now, we're going to swap */
4779 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
4780 update_max_tr(tr, current, smp_processor_id());
4782 update_max_tr_single(tr, current, iter->cpu_file);
4786 if (tr->allocated_snapshot) {
4787 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
4788 tracing_reset_online_cpus(&tr->max_buffer);
4790 tracing_reset(&tr->max_buffer, iter->cpu_file);
4800 mutex_unlock(&trace_types_lock);
4804 static int tracing_snapshot_release(struct inode *inode, struct file *file)
4806 struct seq_file *m = file->private_data;
4809 ret = tracing_release(inode, file);
4811 if (file->f_mode & FMODE_READ)
4814 /* If write only, the seq_file is just a stub */
4822 static int tracing_buffers_open(struct inode *inode, struct file *filp);
4823 static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
4824 size_t count, loff_t *ppos);
4825 static int tracing_buffers_release(struct inode *inode, struct file *file);
4826 static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
4827 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
4829 static int snapshot_raw_open(struct inode *inode, struct file *filp)
4831 struct ftrace_buffer_info *info;
4834 ret = tracing_buffers_open(inode, filp);
4838 info = filp->private_data;
4840 if (info->iter.trace->use_max_tr) {
4841 tracing_buffers_release(inode, filp);
4845 info->iter.snapshot = true;
4846 info->iter.trace_buffer = &info->iter.tr->max_buffer;
4851 #endif /* CONFIG_TRACER_SNAPSHOT */
4854 static const struct file_operations tracing_max_lat_fops = {
4855 .open = tracing_open_generic,
4856 .read = tracing_max_lat_read,
4857 .write = tracing_max_lat_write,
4858 .llseek = generic_file_llseek,
4861 static const struct file_operations set_tracer_fops = {
4862 .open = tracing_open_generic,
4863 .read = tracing_set_trace_read,
4864 .write = tracing_set_trace_write,
4865 .llseek = generic_file_llseek,
4868 static const struct file_operations tracing_pipe_fops = {
4869 .open = tracing_open_pipe,
4870 .poll = tracing_poll_pipe,
4871 .read = tracing_read_pipe,
4872 .splice_read = tracing_splice_read_pipe,
4873 .release = tracing_release_pipe,
4874 .llseek = no_llseek,
4877 static const struct file_operations tracing_entries_fops = {
4878 .open = tracing_open_generic_tc,
4879 .read = tracing_entries_read,
4880 .write = tracing_entries_write,
4881 .llseek = generic_file_llseek,
4882 .release = tracing_release_generic_tc,
4885 static const struct file_operations tracing_total_entries_fops = {
4886 .open = tracing_open_generic_tr,
4887 .read = tracing_total_entries_read,
4888 .llseek = generic_file_llseek,
4889 .release = tracing_release_generic_tr,
4892 static const struct file_operations tracing_free_buffer_fops = {
4893 .open = tracing_open_generic_tr,
4894 .write = tracing_free_buffer_write,
4895 .release = tracing_free_buffer_release,
4898 static const struct file_operations tracing_mark_fops = {
4899 .open = tracing_open_generic_tr,
4900 .write = tracing_mark_write,
4901 .llseek = generic_file_llseek,
4902 .release = tracing_release_generic_tr,
4905 static const struct file_operations trace_clock_fops = {
4906 .open = tracing_clock_open,
4908 .llseek = seq_lseek,
4909 .release = tracing_single_release_tr,
4910 .write = tracing_clock_write,
4913 #ifdef CONFIG_TRACER_SNAPSHOT
4914 static const struct file_operations snapshot_fops = {
4915 .open = tracing_snapshot_open,
4917 .write = tracing_snapshot_write,
4918 .llseek = tracing_seek,
4919 .release = tracing_snapshot_release,
4922 static const struct file_operations snapshot_raw_fops = {
4923 .open = snapshot_raw_open,
4924 .read = tracing_buffers_read,
4925 .release = tracing_buffers_release,
4926 .splice_read = tracing_buffers_splice_read,
4927 .llseek = no_llseek,
4930 #endif /* CONFIG_TRACER_SNAPSHOT */
4932 static int tracing_buffers_open(struct inode *inode, struct file *filp)
4934 struct trace_cpu *tc = inode->i_private;
4935 struct trace_array *tr = tc->tr;
4936 struct ftrace_buffer_info *info;
4939 if (tracing_disabled)
4942 if (trace_array_get(tr) < 0)
4945 info = kzalloc(sizeof(*info), GFP_KERNEL);
4947 trace_array_put(tr);
4951 mutex_lock(&trace_types_lock);
4956 info->iter.cpu_file = tc->cpu;
4957 info->iter.trace = tr->current_trace;
4958 info->iter.trace_buffer = &tr->trace_buffer;
4960 /* Force reading ring buffer for first read */
4961 info->read = (unsigned int)-1;
4963 filp->private_data = info;
4965 mutex_unlock(&trace_types_lock);
4967 ret = nonseekable_open(inode, filp);
4969 trace_array_put(tr);
4975 tracing_buffers_poll(struct file *filp, poll_table *poll_table)
4977 struct ftrace_buffer_info *info = filp->private_data;
4978 struct trace_iterator *iter = &info->iter;
4980 return trace_poll(iter, filp, poll_table);
4984 tracing_buffers_read(struct file *filp, char __user *ubuf,
4985 size_t count, loff_t *ppos)
4987 struct ftrace_buffer_info *info = filp->private_data;
4988 struct trace_iterator *iter = &info->iter;
4995 mutex_lock(&trace_types_lock);
4997 #ifdef CONFIG_TRACER_MAX_TRACE
4998 if (iter->snapshot && iter->tr->current_trace->use_max_tr) {
5005 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
5011 /* Do we have previous read data to read? */
5012 if (info->read < PAGE_SIZE)
5016 trace_access_lock(iter->cpu_file);
5017 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
5021 trace_access_unlock(iter->cpu_file);
5024 if (trace_empty(iter)) {
5025 if ((filp->f_flags & O_NONBLOCK)) {
5029 mutex_unlock(&trace_types_lock);
5030 iter->trace->wait_pipe(iter);
5031 mutex_lock(&trace_types_lock);
5032 if (signal_pending(current)) {
5044 size = PAGE_SIZE - info->read;
5048 ret = copy_to_user(ubuf, info->spare + info->read, size);
5059 mutex_unlock(&trace_types_lock);
5064 static int tracing_buffers_release(struct inode *inode, struct file *file)
5066 struct ftrace_buffer_info *info = file->private_data;
5067 struct trace_iterator *iter = &info->iter;
5069 mutex_lock(&trace_types_lock);
5071 __trace_array_put(iter->tr);
5074 ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare);
5077 mutex_unlock(&trace_types_lock);
5083 struct ring_buffer *buffer;
5088 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
5089 struct pipe_buffer *buf)
5091 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5096 ring_buffer_free_read_page(ref->buffer, ref->page);
5101 static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
5102 struct pipe_buffer *buf)
5104 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5109 /* Pipe buffer operations for a buffer. */
5110 static const struct pipe_buf_operations buffer_pipe_buf_ops = {
5112 .map = generic_pipe_buf_map,
5113 .unmap = generic_pipe_buf_unmap,
5114 .confirm = generic_pipe_buf_confirm,
5115 .release = buffer_pipe_buf_release,
5116 .steal = generic_pipe_buf_steal,
5117 .get = buffer_pipe_buf_get,
5121 * Callback from splice_to_pipe(), if we need to release some pages
5122 * at the end of the spd in case we error'ed out in filling the pipe.
5124 static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
5126 struct buffer_ref *ref =
5127 (struct buffer_ref *)spd->partial[i].private;
5132 ring_buffer_free_read_page(ref->buffer, ref->page);
5134 spd->partial[i].private = 0;
5138 tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5139 struct pipe_inode_info *pipe, size_t len,
5142 struct ftrace_buffer_info *info = file->private_data;
5143 struct trace_iterator *iter = &info->iter;
5144 struct partial_page partial_def[PIPE_DEF_BUFFERS];
5145 struct page *pages_def[PIPE_DEF_BUFFERS];
5146 struct splice_pipe_desc spd = {
5148 .partial = partial_def,
5149 .nr_pages_max = PIPE_DEF_BUFFERS,
5151 .ops = &buffer_pipe_buf_ops,
5152 .spd_release = buffer_spd_release,
5154 struct buffer_ref *ref;
5155 int entries, size, i;
5158 mutex_lock(&trace_types_lock);
5160 #ifdef CONFIG_TRACER_MAX_TRACE
5161 if (iter->snapshot && iter->tr->current_trace->use_max_tr) {
5167 if (splice_grow_spd(pipe, &spd)) {
5172 if (*ppos & (PAGE_SIZE - 1)) {
5177 if (len & (PAGE_SIZE - 1)) {
5178 if (len < PAGE_SIZE) {
5186 trace_access_lock(iter->cpu_file);
5187 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
5189 for (i = 0; i < pipe->buffers && len && entries; i++, len -= PAGE_SIZE) {
5193 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
5198 ref->buffer = iter->trace_buffer->buffer;
5199 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
5205 r = ring_buffer_read_page(ref->buffer, &ref->page,
5206 len, iter->cpu_file, 1);
5208 ring_buffer_free_read_page(ref->buffer, ref->page);
5214 * zero out any left over data, this is going to
5217 size = ring_buffer_page_len(ref->page);
5218 if (size < PAGE_SIZE)
5219 memset(ref->page + size, 0, PAGE_SIZE - size);
5221 page = virt_to_page(ref->page);
5223 spd.pages[i] = page;
5224 spd.partial[i].len = PAGE_SIZE;
5225 spd.partial[i].offset = 0;
5226 spd.partial[i].private = (unsigned long)ref;
5230 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
5233 trace_access_unlock(iter->cpu_file);
5236 /* did we read anything? */
5237 if (!spd.nr_pages) {
5238 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK)) {
5242 mutex_unlock(&trace_types_lock);
5243 iter->trace->wait_pipe(iter);
5244 mutex_lock(&trace_types_lock);
5245 if (signal_pending(current)) {
5252 ret = splice_to_pipe(pipe, &spd);
5253 splice_shrink_spd(&spd);
5255 mutex_unlock(&trace_types_lock);
5260 static const struct file_operations tracing_buffers_fops = {
5261 .open = tracing_buffers_open,
5262 .read = tracing_buffers_read,
5263 .poll = tracing_buffers_poll,
5264 .release = tracing_buffers_release,
5265 .splice_read = tracing_buffers_splice_read,
5266 .llseek = no_llseek,
5270 tracing_stats_read(struct file *filp, char __user *ubuf,
5271 size_t count, loff_t *ppos)
5273 struct trace_cpu *tc = filp->private_data;
5274 struct trace_array *tr = tc->tr;
5275 struct trace_buffer *trace_buf = &tr->trace_buffer;
5276 struct trace_seq *s;
5278 unsigned long long t;
5279 unsigned long usec_rem;
5282 s = kmalloc(sizeof(*s), GFP_KERNEL);
5288 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
5289 trace_seq_printf(s, "entries: %ld\n", cnt);
5291 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
5292 trace_seq_printf(s, "overrun: %ld\n", cnt);
5294 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
5295 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
5297 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
5298 trace_seq_printf(s, "bytes: %ld\n", cnt);
5300 if (trace_clocks[trace_clock_id].in_ns) {
5301 /* local or global for trace_clock */
5302 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
5303 usec_rem = do_div(t, USEC_PER_SEC);
5304 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
5307 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
5308 usec_rem = do_div(t, USEC_PER_SEC);
5309 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
5311 /* counter or tsc mode for trace_clock */
5312 trace_seq_printf(s, "oldest event ts: %llu\n",
5313 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
5315 trace_seq_printf(s, "now ts: %llu\n",
5316 ring_buffer_time_stamp(trace_buf->buffer, cpu));
5319 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
5320 trace_seq_printf(s, "dropped events: %ld\n", cnt);
5322 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
5323 trace_seq_printf(s, "read events: %ld\n", cnt);
5325 count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len);
5332 static const struct file_operations tracing_stats_fops = {
5333 .open = tracing_open_generic,
5334 .read = tracing_stats_read,
5335 .llseek = generic_file_llseek,
5338 #ifdef CONFIG_DYNAMIC_FTRACE
5340 int __weak ftrace_arch_read_dyn_info(char *buf, int size)
5346 tracing_read_dyn_info(struct file *filp, char __user *ubuf,
5347 size_t cnt, loff_t *ppos)
5349 static char ftrace_dyn_info_buffer[1024];
5350 static DEFINE_MUTEX(dyn_info_mutex);
5351 unsigned long *p = filp->private_data;
5352 char *buf = ftrace_dyn_info_buffer;
5353 int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
5356 mutex_lock(&dyn_info_mutex);
5357 r = sprintf(buf, "%ld ", *p);
5359 r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
5362 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5364 mutex_unlock(&dyn_info_mutex);
5369 static const struct file_operations tracing_dyn_info_fops = {
5370 .open = tracing_open_generic,
5371 .read = tracing_read_dyn_info,
5372 .llseek = generic_file_llseek,
5374 #endif /* CONFIG_DYNAMIC_FTRACE */
5376 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
5378 ftrace_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
5384 ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
5386 unsigned long *count = (long *)data;
5398 ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
5399 struct ftrace_probe_ops *ops, void *data)
5401 long count = (long)data;
5403 seq_printf(m, "%ps:", (void *)ip);
5405 seq_printf(m, "snapshot");
5408 seq_printf(m, ":unlimited\n");
5410 seq_printf(m, ":count=%ld\n", count);
5415 static struct ftrace_probe_ops snapshot_probe_ops = {
5416 .func = ftrace_snapshot,
5417 .print = ftrace_snapshot_print,
5420 static struct ftrace_probe_ops snapshot_count_probe_ops = {
5421 .func = ftrace_count_snapshot,
5422 .print = ftrace_snapshot_print,
5426 ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
5427 char *glob, char *cmd, char *param, int enable)
5429 struct ftrace_probe_ops *ops;
5430 void *count = (void *)-1;
5434 /* hash funcs only work with set_ftrace_filter */
5438 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
5440 if (glob[0] == '!') {
5441 unregister_ftrace_function_probe_func(glob+1, ops);
5448 number = strsep(¶m, ":");
5450 if (!strlen(number))
5454 * We use the callback data field (which is a pointer)
5457 ret = kstrtoul(number, 0, (unsigned long *)&count);
5462 ret = register_ftrace_function_probe(glob, ops, count);
5465 alloc_snapshot(&global_trace);
5467 return ret < 0 ? ret : 0;
5470 static struct ftrace_func_command ftrace_snapshot_cmd = {
5472 .func = ftrace_trace_snapshot_callback,
5475 static int register_snapshot_cmd(void)
5477 return register_ftrace_command(&ftrace_snapshot_cmd);
5480 static inline int register_snapshot_cmd(void) { return 0; }
5481 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
5483 struct dentry *tracing_init_dentry_tr(struct trace_array *tr)
5488 if (!debugfs_initialized())
5491 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
5492 tr->dir = debugfs_create_dir("tracing", NULL);
5495 pr_warn_once("Could not create debugfs directory 'tracing'\n");
5500 struct dentry *tracing_init_dentry(void)
5502 return tracing_init_dentry_tr(&global_trace);
5505 static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
5507 struct dentry *d_tracer;
5510 return tr->percpu_dir;
5512 d_tracer = tracing_init_dentry_tr(tr);
5516 tr->percpu_dir = debugfs_create_dir("per_cpu", d_tracer);
5518 WARN_ONCE(!tr->percpu_dir,
5519 "Could not create debugfs directory 'per_cpu/%d'\n", cpu);
5521 return tr->percpu_dir;
5525 tracing_init_debugfs_percpu(struct trace_array *tr, long cpu)
5527 struct trace_array_cpu *data = per_cpu_ptr(tr->trace_buffer.data, cpu);
5528 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
5529 struct dentry *d_cpu;
5530 char cpu_dir[30]; /* 30 characters should be more than enough */
5535 snprintf(cpu_dir, 30, "cpu%ld", cpu);
5536 d_cpu = debugfs_create_dir(cpu_dir, d_percpu);
5538 pr_warning("Could not create debugfs '%s' entry\n", cpu_dir);
5542 /* per cpu trace_pipe */
5543 trace_create_file("trace_pipe", 0444, d_cpu,
5544 (void *)&data->trace_cpu, &tracing_pipe_fops);
5547 trace_create_file("trace", 0644, d_cpu,
5548 (void *)&data->trace_cpu, &tracing_fops);
5550 trace_create_file("trace_pipe_raw", 0444, d_cpu,
5551 (void *)&data->trace_cpu, &tracing_buffers_fops);
5553 trace_create_file("stats", 0444, d_cpu,
5554 (void *)&data->trace_cpu, &tracing_stats_fops);
5556 trace_create_file("buffer_size_kb", 0444, d_cpu,
5557 (void *)&data->trace_cpu, &tracing_entries_fops);
5559 #ifdef CONFIG_TRACER_SNAPSHOT
5560 trace_create_file("snapshot", 0644, d_cpu,
5561 (void *)&data->trace_cpu, &snapshot_fops);
5563 trace_create_file("snapshot_raw", 0444, d_cpu,
5564 (void *)&data->trace_cpu, &snapshot_raw_fops);
5568 #ifdef CONFIG_FTRACE_SELFTEST
5569 /* Let selftest have access to static functions in this file */
5570 #include "trace_selftest.c"
5573 struct trace_option_dentry {
5574 struct tracer_opt *opt;
5575 struct tracer_flags *flags;
5576 struct trace_array *tr;
5577 struct dentry *entry;
5581 trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
5584 struct trace_option_dentry *topt = filp->private_data;
5587 if (topt->flags->val & topt->opt->bit)
5592 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
5596 trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
5599 struct trace_option_dentry *topt = filp->private_data;
5603 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5607 if (val != 0 && val != 1)
5610 if (!!(topt->flags->val & topt->opt->bit) != val) {
5611 mutex_lock(&trace_types_lock);
5612 ret = __set_tracer_option(topt->tr->current_trace, topt->flags,
5614 mutex_unlock(&trace_types_lock);
5625 static const struct file_operations trace_options_fops = {
5626 .open = tracing_open_generic,
5627 .read = trace_options_read,
5628 .write = trace_options_write,
5629 .llseek = generic_file_llseek,
5633 trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
5636 long index = (long)filp->private_data;
5639 if (trace_flags & (1 << index))
5644 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
5648 trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
5651 struct trace_array *tr = &global_trace;
5652 long index = (long)filp->private_data;
5656 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5660 if (val != 0 && val != 1)
5663 mutex_lock(&trace_types_lock);
5664 ret = set_tracer_flag(tr, 1 << index, val);
5665 mutex_unlock(&trace_types_lock);
5675 static const struct file_operations trace_options_core_fops = {
5676 .open = tracing_open_generic,
5677 .read = trace_options_core_read,
5678 .write = trace_options_core_write,
5679 .llseek = generic_file_llseek,
5682 struct dentry *trace_create_file(const char *name,
5684 struct dentry *parent,
5686 const struct file_operations *fops)
5690 ret = debugfs_create_file(name, mode, parent, data, fops);
5692 pr_warning("Could not create debugfs '%s' entry\n", name);
5698 static struct dentry *trace_options_init_dentry(struct trace_array *tr)
5700 struct dentry *d_tracer;
5705 d_tracer = tracing_init_dentry_tr(tr);
5709 tr->options = debugfs_create_dir("options", d_tracer);
5711 pr_warning("Could not create debugfs directory 'options'\n");
5719 create_trace_option_file(struct trace_array *tr,
5720 struct trace_option_dentry *topt,
5721 struct tracer_flags *flags,
5722 struct tracer_opt *opt)
5724 struct dentry *t_options;
5726 t_options = trace_options_init_dentry(tr);
5730 topt->flags = flags;
5734 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
5735 &trace_options_fops);
5739 static struct trace_option_dentry *
5740 create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
5742 struct trace_option_dentry *topts;
5743 struct tracer_flags *flags;
5744 struct tracer_opt *opts;
5750 flags = tracer->flags;
5752 if (!flags || !flags->opts)
5757 for (cnt = 0; opts[cnt].name; cnt++)
5760 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
5764 for (cnt = 0; opts[cnt].name; cnt++)
5765 create_trace_option_file(tr, &topts[cnt], flags,
5772 destroy_trace_option_files(struct trace_option_dentry *topts)
5779 for (cnt = 0; topts[cnt].opt; cnt++) {
5780 if (topts[cnt].entry)
5781 debugfs_remove(topts[cnt].entry);
5787 static struct dentry *
5788 create_trace_option_core_file(struct trace_array *tr,
5789 const char *option, long index)
5791 struct dentry *t_options;
5793 t_options = trace_options_init_dentry(tr);
5797 return trace_create_file(option, 0644, t_options, (void *)index,
5798 &trace_options_core_fops);
5801 static __init void create_trace_options_dir(struct trace_array *tr)
5803 struct dentry *t_options;
5806 t_options = trace_options_init_dentry(tr);
5810 for (i = 0; trace_options[i]; i++)
5811 create_trace_option_core_file(tr, trace_options[i], i);
5815 rb_simple_read(struct file *filp, char __user *ubuf,
5816 size_t cnt, loff_t *ppos)
5818 struct trace_array *tr = filp->private_data;
5822 r = tracer_tracing_is_on(tr);
5823 r = sprintf(buf, "%d\n", r);
5825 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5829 rb_simple_write(struct file *filp, const char __user *ubuf,
5830 size_t cnt, loff_t *ppos)
5832 struct trace_array *tr = filp->private_data;
5833 struct ring_buffer *buffer = tr->trace_buffer.buffer;
5837 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5842 mutex_lock(&trace_types_lock);
5844 tracer_tracing_on(tr);
5845 if (tr->current_trace->start)
5846 tr->current_trace->start(tr);
5848 tracer_tracing_off(tr);
5849 if (tr->current_trace->stop)
5850 tr->current_trace->stop(tr);
5852 mutex_unlock(&trace_types_lock);
5860 static const struct file_operations rb_simple_fops = {
5861 .open = tracing_open_generic_tr,
5862 .read = rb_simple_read,
5863 .write = rb_simple_write,
5864 .release = tracing_release_generic_tr,
5865 .llseek = default_llseek,
5868 struct dentry *trace_instance_dir;
5871 init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer);
5873 static void init_trace_buffers(struct trace_array *tr, struct trace_buffer *buf)
5877 for_each_tracing_cpu(cpu) {
5878 memset(per_cpu_ptr(buf->data, cpu), 0, sizeof(struct trace_array_cpu));
5879 per_cpu_ptr(buf->data, cpu)->trace_cpu.cpu = cpu;
5880 per_cpu_ptr(buf->data, cpu)->trace_cpu.tr = tr;
5885 allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
5887 enum ring_buffer_flags rb_flags;
5889 rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
5891 buf->buffer = ring_buffer_alloc(size, rb_flags);
5895 buf->data = alloc_percpu(struct trace_array_cpu);
5897 ring_buffer_free(buf->buffer);
5901 init_trace_buffers(tr, buf);
5903 /* Allocate the first page for all buffers */
5904 set_buffer_entries(&tr->trace_buffer,
5905 ring_buffer_size(tr->trace_buffer.buffer, 0));
5910 static int allocate_trace_buffers(struct trace_array *tr, int size)
5914 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
5918 #ifdef CONFIG_TRACER_MAX_TRACE
5919 ret = allocate_trace_buffer(tr, &tr->max_buffer,
5920 allocate_snapshot ? size : 1);
5922 ring_buffer_free(tr->trace_buffer.buffer);
5923 free_percpu(tr->trace_buffer.data);
5926 tr->allocated_snapshot = allocate_snapshot;
5929 * Only the top level trace array gets its snapshot allocated
5930 * from the kernel command line.
5932 allocate_snapshot = false;
5937 static int new_instance_create(const char *name)
5939 struct trace_array *tr;
5942 mutex_lock(&trace_types_lock);
5945 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
5946 if (tr->name && strcmp(tr->name, name) == 0)
5951 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
5955 tr->name = kstrdup(name, GFP_KERNEL);
5959 raw_spin_lock_init(&tr->start_lock);
5961 tr->current_trace = &nop_trace;
5963 INIT_LIST_HEAD(&tr->systems);
5964 INIT_LIST_HEAD(&tr->events);
5966 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
5969 /* Holder for file callbacks */
5970 tr->trace_cpu.cpu = RING_BUFFER_ALL_CPUS;
5971 tr->trace_cpu.tr = tr;
5973 tr->dir = debugfs_create_dir(name, trace_instance_dir);
5977 ret = event_trace_add_tracer(tr->dir, tr);
5981 init_tracer_debugfs(tr, tr->dir);
5983 list_add(&tr->list, &ftrace_trace_arrays);
5985 mutex_unlock(&trace_types_lock);
5990 if (tr->trace_buffer.buffer)
5991 ring_buffer_free(tr->trace_buffer.buffer);
5996 mutex_unlock(&trace_types_lock);
6002 static int instance_delete(const char *name)
6004 struct trace_array *tr;
6008 mutex_lock(&trace_types_lock);
6011 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6012 if (tr->name && strcmp(tr->name, name) == 0) {
6024 list_del(&tr->list);
6026 event_trace_del_tracer(tr);
6027 debugfs_remove_recursive(tr->dir);
6028 free_percpu(tr->trace_buffer.data);
6029 ring_buffer_free(tr->trace_buffer.buffer);
6037 mutex_unlock(&trace_types_lock);
6042 static int instance_mkdir (struct inode *inode, struct dentry *dentry, umode_t mode)
6044 struct dentry *parent;
6047 /* Paranoid: Make sure the parent is the "instances" directory */
6048 parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
6049 if (WARN_ON_ONCE(parent != trace_instance_dir))
6053 * The inode mutex is locked, but debugfs_create_dir() will also
6054 * take the mutex. As the instances directory can not be destroyed
6055 * or changed in any other way, it is safe to unlock it, and
6056 * let the dentry try. If two users try to make the same dir at
6057 * the same time, then the new_instance_create() will determine the
6060 mutex_unlock(&inode->i_mutex);
6062 ret = new_instance_create(dentry->d_iname);
6064 mutex_lock(&inode->i_mutex);
6069 static int instance_rmdir(struct inode *inode, struct dentry *dentry)
6071 struct dentry *parent;
6074 /* Paranoid: Make sure the parent is the "instances" directory */
6075 parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
6076 if (WARN_ON_ONCE(parent != trace_instance_dir))
6079 /* The caller did a dget() on dentry */
6080 mutex_unlock(&dentry->d_inode->i_mutex);
6083 * The inode mutex is locked, but debugfs_create_dir() will also
6084 * take the mutex. As the instances directory can not be destroyed
6085 * or changed in any other way, it is safe to unlock it, and
6086 * let the dentry try. If two users try to make the same dir at
6087 * the same time, then the instance_delete() will determine the
6090 mutex_unlock(&inode->i_mutex);
6092 ret = instance_delete(dentry->d_iname);
6094 mutex_lock_nested(&inode->i_mutex, I_MUTEX_PARENT);
6095 mutex_lock(&dentry->d_inode->i_mutex);
6100 static const struct inode_operations instance_dir_inode_operations = {
6101 .lookup = simple_lookup,
6102 .mkdir = instance_mkdir,
6103 .rmdir = instance_rmdir,
6106 static __init void create_trace_instances(struct dentry *d_tracer)
6108 trace_instance_dir = debugfs_create_dir("instances", d_tracer);
6109 if (WARN_ON(!trace_instance_dir))
6112 /* Hijack the dir inode operations, to allow mkdir */
6113 trace_instance_dir->d_inode->i_op = &instance_dir_inode_operations;
6117 init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer)
6121 trace_create_file("trace_options", 0644, d_tracer,
6122 tr, &tracing_iter_fops);
6124 trace_create_file("trace", 0644, d_tracer,
6125 (void *)&tr->trace_cpu, &tracing_fops);
6127 trace_create_file("trace_pipe", 0444, d_tracer,
6128 (void *)&tr->trace_cpu, &tracing_pipe_fops);
6130 trace_create_file("buffer_size_kb", 0644, d_tracer,
6131 (void *)&tr->trace_cpu, &tracing_entries_fops);
6133 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
6134 tr, &tracing_total_entries_fops);
6136 trace_create_file("free_buffer", 0200, d_tracer,
6137 tr, &tracing_free_buffer_fops);
6139 trace_create_file("trace_marker", 0220, d_tracer,
6140 tr, &tracing_mark_fops);
6142 trace_create_file("trace_clock", 0644, d_tracer, tr,
6145 trace_create_file("tracing_on", 0644, d_tracer,
6146 tr, &rb_simple_fops);
6148 #ifdef CONFIG_TRACER_SNAPSHOT
6149 trace_create_file("snapshot", 0644, d_tracer,
6150 (void *)&tr->trace_cpu, &snapshot_fops);
6153 for_each_tracing_cpu(cpu)
6154 tracing_init_debugfs_percpu(tr, cpu);
6158 static __init int tracer_init_debugfs(void)
6160 struct dentry *d_tracer;
6162 trace_access_lock_init();
6164 d_tracer = tracing_init_dentry();
6168 init_tracer_debugfs(&global_trace, d_tracer);
6170 trace_create_file("tracing_cpumask", 0644, d_tracer,
6171 &global_trace, &tracing_cpumask_fops);
6173 trace_create_file("available_tracers", 0444, d_tracer,
6174 &global_trace, &show_traces_fops);
6176 trace_create_file("current_tracer", 0644, d_tracer,
6177 &global_trace, &set_tracer_fops);
6179 #ifdef CONFIG_TRACER_MAX_TRACE
6180 trace_create_file("tracing_max_latency", 0644, d_tracer,
6181 &tracing_max_latency, &tracing_max_lat_fops);
6184 trace_create_file("tracing_thresh", 0644, d_tracer,
6185 &tracing_thresh, &tracing_max_lat_fops);
6187 trace_create_file("README", 0444, d_tracer,
6188 NULL, &tracing_readme_fops);
6190 trace_create_file("saved_cmdlines", 0444, d_tracer,
6191 NULL, &tracing_saved_cmdlines_fops);
6193 #ifdef CONFIG_DYNAMIC_FTRACE
6194 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
6195 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
6198 create_trace_instances(d_tracer);
6200 create_trace_options_dir(&global_trace);
6205 static int trace_panic_handler(struct notifier_block *this,
6206 unsigned long event, void *unused)
6208 if (ftrace_dump_on_oops)
6209 ftrace_dump(ftrace_dump_on_oops);
6213 static struct notifier_block trace_panic_notifier = {
6214 .notifier_call = trace_panic_handler,
6216 .priority = 150 /* priority: INT_MAX >= x >= 0 */
6219 static int trace_die_handler(struct notifier_block *self,
6225 if (ftrace_dump_on_oops)
6226 ftrace_dump(ftrace_dump_on_oops);
6234 static struct notifier_block trace_die_notifier = {
6235 .notifier_call = trace_die_handler,
6240 * printk is set to max of 1024, we really don't need it that big.
6241 * Nothing should be printing 1000 characters anyway.
6243 #define TRACE_MAX_PRINT 1000
6246 * Define here KERN_TRACE so that we have one place to modify
6247 * it if we decide to change what log level the ftrace dump
6250 #define KERN_TRACE KERN_EMERG
6253 trace_printk_seq(struct trace_seq *s)
6255 /* Probably should print a warning here. */
6256 if (s->len >= TRACE_MAX_PRINT)
6257 s->len = TRACE_MAX_PRINT;
6259 /* should be zero ended, but we are paranoid. */
6260 s->buffer[s->len] = 0;
6262 printk(KERN_TRACE "%s", s->buffer);
6267 void trace_init_global_iter(struct trace_iterator *iter)
6269 iter->tr = &global_trace;
6270 iter->trace = iter->tr->current_trace;
6271 iter->cpu_file = RING_BUFFER_ALL_CPUS;
6272 iter->trace_buffer = &global_trace.trace_buffer;
6275 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
6277 /* use static because iter can be a bit big for the stack */
6278 static struct trace_iterator iter;
6279 static atomic_t dump_running;
6280 unsigned int old_userobj;
6281 unsigned long flags;
6284 /* Only allow one dump user at a time. */
6285 if (atomic_inc_return(&dump_running) != 1) {
6286 atomic_dec(&dump_running);
6291 * Always turn off tracing when we dump.
6292 * We don't need to show trace output of what happens
6293 * between multiple crashes.
6295 * If the user does a sysrq-z, then they can re-enable
6296 * tracing with echo 1 > tracing_on.
6300 local_irq_save(flags);
6302 /* Simulate the iterator */
6303 trace_init_global_iter(&iter);
6305 for_each_tracing_cpu(cpu) {
6306 atomic_inc(&per_cpu_ptr(iter.tr->trace_buffer.data, cpu)->disabled);
6309 old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ;
6311 /* don't look at user memory in panic mode */
6312 trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
6314 switch (oops_dump_mode) {
6316 iter.cpu_file = RING_BUFFER_ALL_CPUS;
6319 iter.cpu_file = raw_smp_processor_id();
6324 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
6325 iter.cpu_file = RING_BUFFER_ALL_CPUS;
6328 printk(KERN_TRACE "Dumping ftrace buffer:\n");
6330 /* Did function tracer already get disabled? */
6331 if (ftrace_is_dead()) {
6332 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
6333 printk("# MAY BE MISSING FUNCTION EVENTS\n");
6337 * We need to stop all tracing on all CPUS to read the
6338 * the next buffer. This is a bit expensive, but is
6339 * not done often. We fill all what we can read,
6340 * and then release the locks again.
6343 while (!trace_empty(&iter)) {
6346 printk(KERN_TRACE "---------------------------------\n");
6350 /* reset all but tr, trace, and overruns */
6351 memset(&iter.seq, 0,
6352 sizeof(struct trace_iterator) -
6353 offsetof(struct trace_iterator, seq));
6354 iter.iter_flags |= TRACE_FILE_LAT_FMT;
6357 if (trace_find_next_entry_inc(&iter) != NULL) {
6360 ret = print_trace_line(&iter);
6361 if (ret != TRACE_TYPE_NO_CONSUME)
6362 trace_consume(&iter);
6364 touch_nmi_watchdog();
6366 trace_printk_seq(&iter.seq);
6370 printk(KERN_TRACE " (ftrace buffer empty)\n");
6372 printk(KERN_TRACE "---------------------------------\n");
6375 trace_flags |= old_userobj;
6377 for_each_tracing_cpu(cpu) {
6378 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
6380 atomic_dec(&dump_running);
6381 local_irq_restore(flags);
6383 EXPORT_SYMBOL_GPL(ftrace_dump);
6385 __init static int tracer_alloc_buffers(void)
6391 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
6394 if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL))
6395 goto out_free_buffer_mask;
6397 /* Only allocate trace_printk buffers if a trace_printk exists */
6398 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
6399 /* Must be called before global_trace.buffer is allocated */
6400 trace_printk_init_buffers();
6402 /* To save memory, keep the ring buffer size to its minimum */
6403 if (ring_buffer_expanded)
6404 ring_buf_size = trace_buf_size;
6408 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
6409 cpumask_copy(tracing_cpumask, cpu_all_mask);
6411 raw_spin_lock_init(&global_trace.start_lock);
6413 /* TODO: make the number of buffers hot pluggable with CPUS */
6414 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
6415 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
6417 goto out_free_cpumask;
6420 if (global_trace.buffer_disabled)
6423 trace_init_cmdlines();
6426 * register_tracer() might reference current_trace, so it
6427 * needs to be set before we register anything. This is
6428 * just a bootstrap of current_trace anyway.
6430 global_trace.current_trace = &nop_trace;
6432 register_tracer(&nop_trace);
6434 /* All seems OK, enable tracing */
6435 tracing_disabled = 0;
6437 atomic_notifier_chain_register(&panic_notifier_list,
6438 &trace_panic_notifier);
6440 register_die_notifier(&trace_die_notifier);
6442 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
6444 /* Holder for file callbacks */
6445 global_trace.trace_cpu.cpu = RING_BUFFER_ALL_CPUS;
6446 global_trace.trace_cpu.tr = &global_trace;
6448 INIT_LIST_HEAD(&global_trace.systems);
6449 INIT_LIST_HEAD(&global_trace.events);
6450 list_add(&global_trace.list, &ftrace_trace_arrays);
6452 while (trace_boot_options) {
6455 option = strsep(&trace_boot_options, ",");
6456 trace_set_options(&global_trace, option);
6459 register_snapshot_cmd();
6464 free_percpu(global_trace.trace_buffer.data);
6465 #ifdef CONFIG_TRACER_MAX_TRACE
6466 free_percpu(global_trace.max_buffer.data);
6468 free_cpumask_var(tracing_cpumask);
6469 out_free_buffer_mask:
6470 free_cpumask_var(tracing_buffer_mask);
6475 __init static int clear_boot_tracer(void)
6478 * The default tracer at boot buffer is an init section.
6479 * This function is called in lateinit. If we did not
6480 * find the boot tracer, then clear it out, to prevent
6481 * later registration from accessing the buffer that is
6482 * about to be freed.
6484 if (!default_bootup_tracer)
6487 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
6488 default_bootup_tracer);
6489 default_bootup_tracer = NULL;
6494 early_initcall(tracer_alloc_buffers);
6495 fs_initcall(tracer_init_debugfs);
6496 late_initcall(clear_boot_tracer);