2 * ring buffer based function tracer
4 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
12 * Copyright (C) 2004 Nadia Yvette Chambers
14 #include <linux/ring_buffer.h>
15 #include <generated/utsrelease.h>
16 #include <linux/stacktrace.h>
17 #include <linux/writeback.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/notifier.h>
21 #include <linux/irqflags.h>
22 #include <linux/debugfs.h>
23 #include <linux/tracefs.h>
24 #include <linux/pagemap.h>
25 #include <linux/hardirq.h>
26 #include <linux/linkage.h>
27 #include <linux/uaccess.h>
28 #include <linux/kprobes.h>
29 #include <linux/ftrace.h>
30 #include <linux/module.h>
31 #include <linux/percpu.h>
32 #include <linux/splice.h>
33 #include <linux/kdebug.h>
34 #include <linux/string.h>
35 #include <linux/mount.h>
36 #include <linux/rwsem.h>
37 #include <linux/slab.h>
38 #include <linux/ctype.h>
39 #include <linux/init.h>
40 #include <linux/poll.h>
41 #include <linux/nmi.h>
43 #include <linux/sched/rt.h>
46 #include "trace_output.h"
49 * On boot up, the ring buffer is set to the minimum size, so that
50 * we do not waste memory on systems that are not using tracing.
52 bool ring_buffer_expanded;
55 * We need to change this state when a selftest is running.
56 * A selftest will lurk into the ring-buffer to count the
57 * entries inserted during the selftest although some concurrent
58 * insertions into the ring-buffer such as trace_printk could occurred
59 * at the same time, giving false positive or negative results.
61 static bool __read_mostly tracing_selftest_running;
64 * If a tracer is running, we do not want to run SELFTEST.
66 bool __read_mostly tracing_selftest_disabled;
68 /* Pipe tracepoints to printk */
69 struct trace_iterator *tracepoint_print_iter;
70 int tracepoint_printk;
72 /* For tracers that don't implement custom flags */
73 static struct tracer_opt dummy_tracer_opt[] = {
77 static struct tracer_flags dummy_tracer_flags = {
79 .opts = dummy_tracer_opt
83 dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
89 * To prevent the comm cache from being overwritten when no
90 * tracing is active, only save the comm when a trace event
93 static DEFINE_PER_CPU(bool, trace_cmdline_save);
96 * Kill all tracing for good (never come back).
97 * It is initialized to 1 but will turn to zero if the initialization
98 * of the tracer is successful. But that is the only place that sets
101 static int tracing_disabled = 1;
103 cpumask_var_t __read_mostly tracing_buffer_mask;
106 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
108 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
109 * is set, then ftrace_dump is called. This will output the contents
110 * of the ftrace buffers to the console. This is very useful for
111 * capturing traces that lead to crashes and outputing it to a
114 * It is default off, but you can enable it with either specifying
115 * "ftrace_dump_on_oops" in the kernel command line, or setting
116 * /proc/sys/kernel/ftrace_dump_on_oops
117 * Set 1 if you want to dump buffers of all CPUs
118 * Set 2 if you want to dump the buffer of the CPU that triggered oops
121 enum ftrace_dump_mode ftrace_dump_on_oops;
123 /* When set, tracing will stop when a WARN*() is hit */
124 int __disable_trace_on_warning;
126 #ifdef CONFIG_TRACE_ENUM_MAP_FILE
127 /* Map of enums to their values, for "enum_map" file */
128 struct trace_enum_map_head {
130 unsigned long length;
133 union trace_enum_map_item;
135 struct trace_enum_map_tail {
137 * "end" is first and points to NULL as it must be different
138 * than "mod" or "enum_string"
140 union trace_enum_map_item *next;
141 const char *end; /* points to NULL */
144 static DEFINE_MUTEX(trace_enum_mutex);
147 * The trace_enum_maps are saved in an array with two extra elements,
148 * one at the beginning, and one at the end. The beginning item contains
149 * the count of the saved maps (head.length), and the module they
150 * belong to if not built in (head.mod). The ending item contains a
151 * pointer to the next array of saved enum_map items.
153 union trace_enum_map_item {
154 struct trace_enum_map map;
155 struct trace_enum_map_head head;
156 struct trace_enum_map_tail tail;
159 static union trace_enum_map_item *trace_enum_maps;
160 #endif /* CONFIG_TRACE_ENUM_MAP_FILE */
162 static int tracing_set_tracer(struct trace_array *tr, const char *buf);
164 #define MAX_TRACER_SIZE 100
165 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
166 static char *default_bootup_tracer;
168 static bool allocate_snapshot;
170 static int __init set_cmdline_ftrace(char *str)
172 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
173 default_bootup_tracer = bootup_tracer_buf;
174 /* We are using ftrace early, expand it */
175 ring_buffer_expanded = true;
178 __setup("ftrace=", set_cmdline_ftrace);
180 static int __init set_ftrace_dump_on_oops(char *str)
182 if (*str++ != '=' || !*str) {
183 ftrace_dump_on_oops = DUMP_ALL;
187 if (!strcmp("orig_cpu", str)) {
188 ftrace_dump_on_oops = DUMP_ORIG;
194 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
196 static int __init stop_trace_on_warning(char *str)
198 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
199 __disable_trace_on_warning = 1;
202 __setup("traceoff_on_warning", stop_trace_on_warning);
204 static int __init boot_alloc_snapshot(char *str)
206 allocate_snapshot = true;
207 /* We also need the main ring buffer expanded */
208 ring_buffer_expanded = true;
211 __setup("alloc_snapshot", boot_alloc_snapshot);
214 static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
216 static int __init set_trace_boot_options(char *str)
218 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
221 __setup("trace_options=", set_trace_boot_options);
223 static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
224 static char *trace_boot_clock __initdata;
226 static int __init set_trace_boot_clock(char *str)
228 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
229 trace_boot_clock = trace_boot_clock_buf;
232 __setup("trace_clock=", set_trace_boot_clock);
234 static int __init set_tracepoint_printk(char *str)
236 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
237 tracepoint_printk = 1;
240 __setup("tp_printk", set_tracepoint_printk);
242 unsigned long long ns2usecs(cycle_t nsec)
249 /* trace_flags holds trace_options default values */
250 #define TRACE_DEFAULT_FLAGS \
251 (FUNCTION_DEFAULT_FLAGS | \
252 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
253 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
254 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
255 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS)
257 /* trace_options that are only supported by global_trace */
258 #define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
259 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
263 * The global_trace is the descriptor that holds the tracing
264 * buffers for the live tracing. For each CPU, it contains
265 * a link list of pages that will store trace entries. The
266 * page descriptor of the pages in the memory is used to hold
267 * the link list by linking the lru item in the page descriptor
268 * to each of the pages in the buffer per CPU.
270 * For each active CPU there is a data field that holds the
271 * pages for the buffer for that CPU. Each CPU has the same number
272 * of pages allocated for its buffer.
274 static struct trace_array global_trace = {
275 .trace_flags = TRACE_DEFAULT_FLAGS,
278 LIST_HEAD(ftrace_trace_arrays);
280 int trace_array_get(struct trace_array *this_tr)
282 struct trace_array *tr;
285 mutex_lock(&trace_types_lock);
286 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
293 mutex_unlock(&trace_types_lock);
298 static void __trace_array_put(struct trace_array *this_tr)
300 WARN_ON(!this_tr->ref);
304 void trace_array_put(struct trace_array *this_tr)
306 mutex_lock(&trace_types_lock);
307 __trace_array_put(this_tr);
308 mutex_unlock(&trace_types_lock);
311 int filter_check_discard(struct trace_event_file *file, void *rec,
312 struct ring_buffer *buffer,
313 struct ring_buffer_event *event)
315 if (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
316 !filter_match_preds(file->filter, rec)) {
317 ring_buffer_discard_commit(buffer, event);
323 EXPORT_SYMBOL_GPL(filter_check_discard);
325 int call_filter_check_discard(struct trace_event_call *call, void *rec,
326 struct ring_buffer *buffer,
327 struct ring_buffer_event *event)
329 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
330 !filter_match_preds(call->filter, rec)) {
331 ring_buffer_discard_commit(buffer, event);
337 EXPORT_SYMBOL_GPL(call_filter_check_discard);
339 static cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
343 /* Early boot up does not have a buffer yet */
345 return trace_clock_local();
347 ts = ring_buffer_time_stamp(buf->buffer, cpu);
348 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
353 cycle_t ftrace_now(int cpu)
355 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
359 * tracing_is_enabled - Show if global_trace has been disabled
361 * Shows if the global trace has been enabled or not. It uses the
362 * mirror flag "buffer_disabled" to be used in fast paths such as for
363 * the irqsoff tracer. But it may be inaccurate due to races. If you
364 * need to know the accurate state, use tracing_is_on() which is a little
365 * slower, but accurate.
367 int tracing_is_enabled(void)
370 * For quick access (irqsoff uses this in fast path), just
371 * return the mirror variable of the state of the ring buffer.
372 * It's a little racy, but we don't really care.
375 return !global_trace.buffer_disabled;
379 * trace_buf_size is the size in bytes that is allocated
380 * for a buffer. Note, the number of bytes is always rounded
383 * This number is purposely set to a low number of 16384.
384 * If the dump on oops happens, it will be much appreciated
385 * to not have to wait for all that output. Anyway this can be
386 * boot time and run time configurable.
388 #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
390 static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
392 /* trace_types holds a link list of available tracers. */
393 static struct tracer *trace_types __read_mostly;
396 * trace_types_lock is used to protect the trace_types list.
398 DEFINE_MUTEX(trace_types_lock);
401 * serialize the access of the ring buffer
403 * ring buffer serializes readers, but it is low level protection.
404 * The validity of the events (which returns by ring_buffer_peek() ..etc)
405 * are not protected by ring buffer.
407 * The content of events may become garbage if we allow other process consumes
408 * these events concurrently:
409 * A) the page of the consumed events may become a normal page
410 * (not reader page) in ring buffer, and this page will be rewrited
411 * by events producer.
412 * B) The page of the consumed events may become a page for splice_read,
413 * and this page will be returned to system.
415 * These primitives allow multi process access to different cpu ring buffer
418 * These primitives don't distinguish read-only and read-consume access.
419 * Multi read-only access are also serialized.
423 static DECLARE_RWSEM(all_cpu_access_lock);
424 static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
426 static inline void trace_access_lock(int cpu)
428 if (cpu == RING_BUFFER_ALL_CPUS) {
429 /* gain it for accessing the whole ring buffer. */
430 down_write(&all_cpu_access_lock);
432 /* gain it for accessing a cpu ring buffer. */
434 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
435 down_read(&all_cpu_access_lock);
437 /* Secondly block other access to this @cpu ring buffer. */
438 mutex_lock(&per_cpu(cpu_access_lock, cpu));
442 static inline void trace_access_unlock(int cpu)
444 if (cpu == RING_BUFFER_ALL_CPUS) {
445 up_write(&all_cpu_access_lock);
447 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
448 up_read(&all_cpu_access_lock);
452 static inline void trace_access_lock_init(void)
456 for_each_possible_cpu(cpu)
457 mutex_init(&per_cpu(cpu_access_lock, cpu));
462 static DEFINE_MUTEX(access_lock);
464 static inline void trace_access_lock(int cpu)
467 mutex_lock(&access_lock);
470 static inline void trace_access_unlock(int cpu)
473 mutex_unlock(&access_lock);
476 static inline void trace_access_lock_init(void)
482 #ifdef CONFIG_STACKTRACE
483 static void __ftrace_trace_stack(struct ring_buffer *buffer,
485 int skip, int pc, struct pt_regs *regs);
486 static inline void ftrace_trace_stack(struct trace_array *tr,
487 struct ring_buffer *buffer,
489 int skip, int pc, struct pt_regs *regs);
492 static inline void __ftrace_trace_stack(struct ring_buffer *buffer,
494 int skip, int pc, struct pt_regs *regs)
497 static inline void ftrace_trace_stack(struct trace_array *tr,
498 struct ring_buffer *buffer,
500 int skip, int pc, struct pt_regs *regs)
506 static void tracer_tracing_on(struct trace_array *tr)
508 if (tr->trace_buffer.buffer)
509 ring_buffer_record_on(tr->trace_buffer.buffer);
511 * This flag is looked at when buffers haven't been allocated
512 * yet, or by some tracers (like irqsoff), that just want to
513 * know if the ring buffer has been disabled, but it can handle
514 * races of where it gets disabled but we still do a record.
515 * As the check is in the fast path of the tracers, it is more
516 * important to be fast than accurate.
518 tr->buffer_disabled = 0;
519 /* Make the flag seen by readers */
524 * tracing_on - enable tracing buffers
526 * This function enables tracing buffers that may have been
527 * disabled with tracing_off.
529 void tracing_on(void)
531 tracer_tracing_on(&global_trace);
533 EXPORT_SYMBOL_GPL(tracing_on);
536 * __trace_puts - write a constant string into the trace buffer.
537 * @ip: The address of the caller
538 * @str: The constant string to write
539 * @size: The size of the string.
541 int __trace_puts(unsigned long ip, const char *str, int size)
543 struct ring_buffer_event *event;
544 struct ring_buffer *buffer;
545 struct print_entry *entry;
546 unsigned long irq_flags;
550 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
553 pc = preempt_count();
555 if (unlikely(tracing_selftest_running || tracing_disabled))
558 alloc = sizeof(*entry) + size + 2; /* possible \n added */
560 local_save_flags(irq_flags);
561 buffer = global_trace.trace_buffer.buffer;
562 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
567 entry = ring_buffer_event_data(event);
570 memcpy(&entry->buf, str, size);
572 /* Add a newline if necessary */
573 if (entry->buf[size - 1] != '\n') {
574 entry->buf[size] = '\n';
575 entry->buf[size + 1] = '\0';
577 entry->buf[size] = '\0';
579 __buffer_unlock_commit(buffer, event);
580 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
584 EXPORT_SYMBOL_GPL(__trace_puts);
587 * __trace_bputs - write the pointer to a constant string into trace buffer
588 * @ip: The address of the caller
589 * @str: The constant string to write to the buffer to
591 int __trace_bputs(unsigned long ip, const char *str)
593 struct ring_buffer_event *event;
594 struct ring_buffer *buffer;
595 struct bputs_entry *entry;
596 unsigned long irq_flags;
597 int size = sizeof(struct bputs_entry);
600 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
603 pc = preempt_count();
605 if (unlikely(tracing_selftest_running || tracing_disabled))
608 local_save_flags(irq_flags);
609 buffer = global_trace.trace_buffer.buffer;
610 event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
615 entry = ring_buffer_event_data(event);
619 __buffer_unlock_commit(buffer, event);
620 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
624 EXPORT_SYMBOL_GPL(__trace_bputs);
626 #ifdef CONFIG_TRACER_SNAPSHOT
628 * trace_snapshot - take a snapshot of the current buffer.
630 * This causes a swap between the snapshot buffer and the current live
631 * tracing buffer. You can use this to take snapshots of the live
632 * trace when some condition is triggered, but continue to trace.
634 * Note, make sure to allocate the snapshot with either
635 * a tracing_snapshot_alloc(), or by doing it manually
636 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
638 * If the snapshot buffer is not allocated, it will stop tracing.
639 * Basically making a permanent snapshot.
641 void tracing_snapshot(void)
643 struct trace_array *tr = &global_trace;
644 struct tracer *tracer = tr->current_trace;
648 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
649 internal_trace_puts("*** snapshot is being ignored ***\n");
653 if (!tr->allocated_snapshot) {
654 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
655 internal_trace_puts("*** stopping trace here! ***\n");
660 /* Note, snapshot can not be used when the tracer uses it */
661 if (tracer->use_max_tr) {
662 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
663 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
667 local_irq_save(flags);
668 update_max_tr(tr, current, smp_processor_id());
669 local_irq_restore(flags);
671 EXPORT_SYMBOL_GPL(tracing_snapshot);
673 static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
674 struct trace_buffer *size_buf, int cpu_id);
675 static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
677 static int alloc_snapshot(struct trace_array *tr)
681 if (!tr->allocated_snapshot) {
683 /* allocate spare buffer */
684 ret = resize_buffer_duplicate_size(&tr->max_buffer,
685 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
689 tr->allocated_snapshot = true;
695 static void free_snapshot(struct trace_array *tr)
698 * We don't free the ring buffer. instead, resize it because
699 * The max_tr ring buffer has some state (e.g. ring->clock) and
700 * we want preserve it.
702 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
703 set_buffer_entries(&tr->max_buffer, 1);
704 tracing_reset_online_cpus(&tr->max_buffer);
705 tr->allocated_snapshot = false;
709 * tracing_alloc_snapshot - allocate snapshot buffer.
711 * This only allocates the snapshot buffer if it isn't already
712 * allocated - it doesn't also take a snapshot.
714 * This is meant to be used in cases where the snapshot buffer needs
715 * to be set up for events that can't sleep but need to be able to
716 * trigger a snapshot.
718 int tracing_alloc_snapshot(void)
720 struct trace_array *tr = &global_trace;
723 ret = alloc_snapshot(tr);
728 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
731 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
733 * This is similar to trace_snapshot(), but it will allocate the
734 * snapshot buffer if it isn't already allocated. Use this only
735 * where it is safe to sleep, as the allocation may sleep.
737 * This causes a swap between the snapshot buffer and the current live
738 * tracing buffer. You can use this to take snapshots of the live
739 * trace when some condition is triggered, but continue to trace.
741 void tracing_snapshot_alloc(void)
745 ret = tracing_alloc_snapshot();
751 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
753 void tracing_snapshot(void)
755 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
757 EXPORT_SYMBOL_GPL(tracing_snapshot);
758 int tracing_alloc_snapshot(void)
760 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
763 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
764 void tracing_snapshot_alloc(void)
769 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
770 #endif /* CONFIG_TRACER_SNAPSHOT */
772 static void tracer_tracing_off(struct trace_array *tr)
774 if (tr->trace_buffer.buffer)
775 ring_buffer_record_off(tr->trace_buffer.buffer);
777 * This flag is looked at when buffers haven't been allocated
778 * yet, or by some tracers (like irqsoff), that just want to
779 * know if the ring buffer has been disabled, but it can handle
780 * races of where it gets disabled but we still do a record.
781 * As the check is in the fast path of the tracers, it is more
782 * important to be fast than accurate.
784 tr->buffer_disabled = 1;
785 /* Make the flag seen by readers */
790 * tracing_off - turn off tracing buffers
792 * This function stops the tracing buffers from recording data.
793 * It does not disable any overhead the tracers themselves may
794 * be causing. This function simply causes all recording to
795 * the ring buffers to fail.
797 void tracing_off(void)
799 tracer_tracing_off(&global_trace);
801 EXPORT_SYMBOL_GPL(tracing_off);
803 void disable_trace_on_warning(void)
805 if (__disable_trace_on_warning)
810 * tracer_tracing_is_on - show real state of ring buffer enabled
811 * @tr : the trace array to know if ring buffer is enabled
813 * Shows real state of the ring buffer if it is enabled or not.
815 static int tracer_tracing_is_on(struct trace_array *tr)
817 if (tr->trace_buffer.buffer)
818 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
819 return !tr->buffer_disabled;
823 * tracing_is_on - show state of ring buffers enabled
825 int tracing_is_on(void)
827 return tracer_tracing_is_on(&global_trace);
829 EXPORT_SYMBOL_GPL(tracing_is_on);
831 static int __init set_buf_size(char *str)
833 unsigned long buf_size;
837 buf_size = memparse(str, &str);
838 /* nr_entries can not be zero */
841 trace_buf_size = buf_size;
844 __setup("trace_buf_size=", set_buf_size);
846 static int __init set_tracing_thresh(char *str)
848 unsigned long threshold;
853 ret = kstrtoul(str, 0, &threshold);
856 tracing_thresh = threshold * 1000;
859 __setup("tracing_thresh=", set_tracing_thresh);
861 unsigned long nsecs_to_usecs(unsigned long nsecs)
867 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
868 * It uses C(a, b) where 'a' is the enum name and 'b' is the string that
869 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
870 * of strings in the order that the enums were defined.
875 /* These must match the bit postions in trace_iterator_flags */
876 static const char *trace_options[] = {
884 int in_ns; /* is this clock in nanoseconds? */
886 { trace_clock_local, "local", 1 },
887 { trace_clock_global, "global", 1 },
888 { trace_clock_counter, "counter", 0 },
889 { trace_clock_jiffies, "uptime", 0 },
890 { trace_clock, "perf", 1 },
891 { ktime_get_mono_fast_ns, "mono", 1 },
892 { ktime_get_raw_fast_ns, "mono_raw", 1 },
897 * trace_parser_get_init - gets the buffer for trace parser
899 int trace_parser_get_init(struct trace_parser *parser, int size)
901 memset(parser, 0, sizeof(*parser));
903 parser->buffer = kmalloc(size, GFP_KERNEL);
912 * trace_parser_put - frees the buffer for trace parser
914 void trace_parser_put(struct trace_parser *parser)
916 kfree(parser->buffer);
920 * trace_get_user - reads the user input string separated by space
921 * (matched by isspace(ch))
923 * For each string found the 'struct trace_parser' is updated,
924 * and the function returns.
926 * Returns number of bytes read.
928 * See kernel/trace/trace.h for 'struct trace_parser' details.
930 int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
931 size_t cnt, loff_t *ppos)
938 trace_parser_clear(parser);
940 ret = get_user(ch, ubuf++);
948 * The parser is not finished with the last write,
949 * continue reading the user input without skipping spaces.
952 /* skip white space */
953 while (cnt && isspace(ch)) {
954 ret = get_user(ch, ubuf++);
961 /* only spaces were written */
971 /* read the non-space input */
972 while (cnt && !isspace(ch)) {
973 if (parser->idx < parser->size - 1)
974 parser->buffer[parser->idx++] = ch;
979 ret = get_user(ch, ubuf++);
986 /* We either got finished input or we have to wait for another call. */
988 parser->buffer[parser->idx] = 0;
989 parser->cont = false;
990 } else if (parser->idx < parser->size - 1) {
992 parser->buffer[parser->idx++] = ch;
1005 /* TODO add a seq_buf_to_buffer() */
1006 static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
1010 if (trace_seq_used(s) <= s->seq.readpos)
1013 len = trace_seq_used(s) - s->seq.readpos;
1016 memcpy(buf, s->buffer + s->seq.readpos, cnt);
1018 s->seq.readpos += cnt;
1022 unsigned long __read_mostly tracing_thresh;
1024 #ifdef CONFIG_TRACER_MAX_TRACE
1026 * Copy the new maximum trace into the separate maximum-trace
1027 * structure. (this way the maximum trace is permanently saved,
1028 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
1031 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1033 struct trace_buffer *trace_buf = &tr->trace_buffer;
1034 struct trace_buffer *max_buf = &tr->max_buffer;
1035 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1036 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
1039 max_buf->time_start = data->preempt_timestamp;
1041 max_data->saved_latency = tr->max_latency;
1042 max_data->critical_start = data->critical_start;
1043 max_data->critical_end = data->critical_end;
1045 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
1046 max_data->pid = tsk->pid;
1048 * If tsk == current, then use current_uid(), as that does not use
1049 * RCU. The irq tracer can be called out of RCU scope.
1052 max_data->uid = current_uid();
1054 max_data->uid = task_uid(tsk);
1056 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1057 max_data->policy = tsk->policy;
1058 max_data->rt_priority = tsk->rt_priority;
1060 /* record this tasks comm */
1061 tracing_record_cmdline(tsk);
1065 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1067 * @tsk: the task with the latency
1068 * @cpu: The cpu that initiated the trace.
1070 * Flip the buffers between the @tr and the max_tr and record information
1071 * about which task was the cause of this latency.
1074 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1076 struct ring_buffer *buf;
1081 WARN_ON_ONCE(!irqs_disabled());
1083 if (!tr->allocated_snapshot) {
1084 /* Only the nop tracer should hit this when disabling */
1085 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1089 arch_spin_lock(&tr->max_lock);
1091 /* Inherit the recordable setting from trace_buffer */
1092 if (ring_buffer_record_is_set_on(tr->trace_buffer.buffer))
1093 ring_buffer_record_on(tr->max_buffer.buffer);
1095 ring_buffer_record_off(tr->max_buffer.buffer);
1097 buf = tr->trace_buffer.buffer;
1098 tr->trace_buffer.buffer = tr->max_buffer.buffer;
1099 tr->max_buffer.buffer = buf;
1101 __update_max_tr(tr, tsk, cpu);
1102 arch_spin_unlock(&tr->max_lock);
1106 * update_max_tr_single - only copy one trace over, and reset the rest
1108 * @tsk - task with the latency
1109 * @cpu - the cpu of the buffer to copy.
1111 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1114 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1121 WARN_ON_ONCE(!irqs_disabled());
1122 if (!tr->allocated_snapshot) {
1123 /* Only the nop tracer should hit this when disabling */
1124 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1128 arch_spin_lock(&tr->max_lock);
1130 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
1132 if (ret == -EBUSY) {
1134 * We failed to swap the buffer due to a commit taking
1135 * place on this CPU. We fail to record, but we reset
1136 * the max trace buffer (no one writes directly to it)
1137 * and flag that it failed.
1139 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
1140 "Failed to swap buffers due to commit in progress\n");
1143 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
1145 __update_max_tr(tr, tsk, cpu);
1146 arch_spin_unlock(&tr->max_lock);
1148 #endif /* CONFIG_TRACER_MAX_TRACE */
1150 static int wait_on_pipe(struct trace_iterator *iter, bool full)
1152 /* Iterators are static, they should be filled or empty */
1153 if (trace_buffer_iter(iter, iter->cpu_file))
1156 return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
1160 #ifdef CONFIG_FTRACE_STARTUP_TEST
1161 static int run_tracer_selftest(struct tracer *type)
1163 struct trace_array *tr = &global_trace;
1164 struct tracer *saved_tracer = tr->current_trace;
1167 if (!type->selftest || tracing_selftest_disabled)
1171 * Run a selftest on this tracer.
1172 * Here we reset the trace buffer, and set the current
1173 * tracer to be this tracer. The tracer can then run some
1174 * internal tracing to verify that everything is in order.
1175 * If we fail, we do not register this tracer.
1177 tracing_reset_online_cpus(&tr->trace_buffer);
1179 tr->current_trace = type;
1181 #ifdef CONFIG_TRACER_MAX_TRACE
1182 if (type->use_max_tr) {
1183 /* If we expanded the buffers, make sure the max is expanded too */
1184 if (ring_buffer_expanded)
1185 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1186 RING_BUFFER_ALL_CPUS);
1187 tr->allocated_snapshot = true;
1191 /* the test is responsible for initializing and enabling */
1192 pr_info("Testing tracer %s: ", type->name);
1193 ret = type->selftest(type, tr);
1194 /* the test is responsible for resetting too */
1195 tr->current_trace = saved_tracer;
1197 printk(KERN_CONT "FAILED!\n");
1198 /* Add the warning after printing 'FAILED' */
1202 /* Only reset on passing, to avoid touching corrupted buffers */
1203 tracing_reset_online_cpus(&tr->trace_buffer);
1205 #ifdef CONFIG_TRACER_MAX_TRACE
1206 if (type->use_max_tr) {
1207 tr->allocated_snapshot = false;
1209 /* Shrink the max buffer again */
1210 if (ring_buffer_expanded)
1211 ring_buffer_resize(tr->max_buffer.buffer, 1,
1212 RING_BUFFER_ALL_CPUS);
1216 printk(KERN_CONT "PASSED\n");
1220 static inline int run_tracer_selftest(struct tracer *type)
1224 #endif /* CONFIG_FTRACE_STARTUP_TEST */
1226 static void add_tracer_options(struct trace_array *tr, struct tracer *t);
1228 static void __init apply_trace_boot_options(void);
1231 * register_tracer - register a tracer with the ftrace system.
1232 * @type - the plugin for the tracer
1234 * Register a new plugin tracer.
1236 int __init register_tracer(struct tracer *type)
1242 pr_info("Tracer must have a name\n");
1246 if (strlen(type->name) >= MAX_TRACER_SIZE) {
1247 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1251 mutex_lock(&trace_types_lock);
1253 tracing_selftest_running = true;
1255 for (t = trace_types; t; t = t->next) {
1256 if (strcmp(type->name, t->name) == 0) {
1258 pr_info("Tracer %s already registered\n",
1265 if (!type->set_flag)
1266 type->set_flag = &dummy_set_flag;
1268 type->flags = &dummy_tracer_flags;
1270 if (!type->flags->opts)
1271 type->flags->opts = dummy_tracer_opt;
1273 ret = run_tracer_selftest(type);
1277 type->next = trace_types;
1279 add_tracer_options(&global_trace, type);
1282 tracing_selftest_running = false;
1283 mutex_unlock(&trace_types_lock);
1285 if (ret || !default_bootup_tracer)
1288 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
1291 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1292 /* Do we want this tracer to start on bootup? */
1293 tracing_set_tracer(&global_trace, type->name);
1294 default_bootup_tracer = NULL;
1296 apply_trace_boot_options();
1298 /* disable other selftests, since this will break it. */
1299 tracing_selftest_disabled = true;
1300 #ifdef CONFIG_FTRACE_STARTUP_TEST
1301 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1309 void tracing_reset(struct trace_buffer *buf, int cpu)
1311 struct ring_buffer *buffer = buf->buffer;
1316 ring_buffer_record_disable(buffer);
1318 /* Make sure all commits have finished */
1319 synchronize_sched();
1320 ring_buffer_reset_cpu(buffer, cpu);
1322 ring_buffer_record_enable(buffer);
1325 void tracing_reset_online_cpus(struct trace_buffer *buf)
1327 struct ring_buffer *buffer = buf->buffer;
1333 ring_buffer_record_disable(buffer);
1335 /* Make sure all commits have finished */
1336 synchronize_sched();
1338 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
1340 for_each_online_cpu(cpu)
1341 ring_buffer_reset_cpu(buffer, cpu);
1343 ring_buffer_record_enable(buffer);
1346 /* Must have trace_types_lock held */
1347 void tracing_reset_all_online_cpus(void)
1349 struct trace_array *tr;
1351 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
1352 tracing_reset_online_cpus(&tr->trace_buffer);
1353 #ifdef CONFIG_TRACER_MAX_TRACE
1354 tracing_reset_online_cpus(&tr->max_buffer);
1359 #define SAVED_CMDLINES_DEFAULT 128
1360 #define NO_CMDLINE_MAP UINT_MAX
1361 static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
1362 struct saved_cmdlines_buffer {
1363 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1364 unsigned *map_cmdline_to_pid;
1365 unsigned cmdline_num;
1367 char *saved_cmdlines;
1369 static struct saved_cmdlines_buffer *savedcmd;
1371 /* temporary disable recording */
1372 static atomic_t trace_record_cmdline_disabled __read_mostly;
1374 static inline char *get_saved_cmdlines(int idx)
1376 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1379 static inline void set_cmdline(int idx, const char *cmdline)
1381 memcpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
1384 static int allocate_cmdlines_buffer(unsigned int val,
1385 struct saved_cmdlines_buffer *s)
1387 s->map_cmdline_to_pid = kmalloc(val * sizeof(*s->map_cmdline_to_pid),
1389 if (!s->map_cmdline_to_pid)
1392 s->saved_cmdlines = kmalloc(val * TASK_COMM_LEN, GFP_KERNEL);
1393 if (!s->saved_cmdlines) {
1394 kfree(s->map_cmdline_to_pid);
1399 s->cmdline_num = val;
1400 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1401 sizeof(s->map_pid_to_cmdline));
1402 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1403 val * sizeof(*s->map_cmdline_to_pid));
1408 static int trace_create_savedcmd(void)
1412 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
1416 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1426 int is_tracing_stopped(void)
1428 return global_trace.stop_count;
1432 * tracing_start - quick start of the tracer
1434 * If tracing is enabled but was stopped by tracing_stop,
1435 * this will start the tracer back up.
1437 void tracing_start(void)
1439 struct ring_buffer *buffer;
1440 unsigned long flags;
1442 if (tracing_disabled)
1445 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1446 if (--global_trace.stop_count) {
1447 if (global_trace.stop_count < 0) {
1448 /* Someone screwed up their debugging */
1450 global_trace.stop_count = 0;
1455 /* Prevent the buffers from switching */
1456 arch_spin_lock(&global_trace.max_lock);
1458 buffer = global_trace.trace_buffer.buffer;
1460 ring_buffer_record_enable(buffer);
1462 #ifdef CONFIG_TRACER_MAX_TRACE
1463 buffer = global_trace.max_buffer.buffer;
1465 ring_buffer_record_enable(buffer);
1468 arch_spin_unlock(&global_trace.max_lock);
1471 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1474 static void tracing_start_tr(struct trace_array *tr)
1476 struct ring_buffer *buffer;
1477 unsigned long flags;
1479 if (tracing_disabled)
1482 /* If global, we need to also start the max tracer */
1483 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1484 return tracing_start();
1486 raw_spin_lock_irqsave(&tr->start_lock, flags);
1488 if (--tr->stop_count) {
1489 if (tr->stop_count < 0) {
1490 /* Someone screwed up their debugging */
1497 buffer = tr->trace_buffer.buffer;
1499 ring_buffer_record_enable(buffer);
1502 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
1506 * tracing_stop - quick stop of the tracer
1508 * Light weight way to stop tracing. Use in conjunction with
1511 void tracing_stop(void)
1513 struct ring_buffer *buffer;
1514 unsigned long flags;
1516 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1517 if (global_trace.stop_count++)
1520 /* Prevent the buffers from switching */
1521 arch_spin_lock(&global_trace.max_lock);
1523 buffer = global_trace.trace_buffer.buffer;
1525 ring_buffer_record_disable(buffer);
1527 #ifdef CONFIG_TRACER_MAX_TRACE
1528 buffer = global_trace.max_buffer.buffer;
1530 ring_buffer_record_disable(buffer);
1533 arch_spin_unlock(&global_trace.max_lock);
1536 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1539 static void tracing_stop_tr(struct trace_array *tr)
1541 struct ring_buffer *buffer;
1542 unsigned long flags;
1544 /* If global, we need to also stop the max tracer */
1545 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1546 return tracing_stop();
1548 raw_spin_lock_irqsave(&tr->start_lock, flags);
1549 if (tr->stop_count++)
1552 buffer = tr->trace_buffer.buffer;
1554 ring_buffer_record_disable(buffer);
1557 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
1560 void trace_stop_cmdline_recording(void);
1562 static int trace_save_cmdline(struct task_struct *tsk)
1566 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
1570 * It's not the end of the world if we don't get
1571 * the lock, but we also don't want to spin
1572 * nor do we want to disable interrupts,
1573 * so if we miss here, then better luck next time.
1575 if (!arch_spin_trylock(&trace_cmdline_lock))
1578 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
1579 if (idx == NO_CMDLINE_MAP) {
1580 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
1583 * Check whether the cmdline buffer at idx has a pid
1584 * mapped. We are going to overwrite that entry so we
1585 * need to clear the map_pid_to_cmdline. Otherwise we
1586 * would read the new comm for the old pid.
1588 pid = savedcmd->map_cmdline_to_pid[idx];
1589 if (pid != NO_CMDLINE_MAP)
1590 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
1592 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
1593 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
1595 savedcmd->cmdline_idx = idx;
1598 set_cmdline(idx, tsk->comm);
1600 arch_spin_unlock(&trace_cmdline_lock);
1605 static void __trace_find_cmdline(int pid, char comm[])
1610 strcpy(comm, "<idle>");
1614 if (WARN_ON_ONCE(pid < 0)) {
1615 strcpy(comm, "<XXX>");
1619 if (pid > PID_MAX_DEFAULT) {
1620 strcpy(comm, "<...>");
1624 map = savedcmd->map_pid_to_cmdline[pid];
1625 if (map != NO_CMDLINE_MAP)
1626 strcpy(comm, get_saved_cmdlines(map));
1628 strcpy(comm, "<...>");
1631 void trace_find_cmdline(int pid, char comm[])
1634 arch_spin_lock(&trace_cmdline_lock);
1636 __trace_find_cmdline(pid, comm);
1638 arch_spin_unlock(&trace_cmdline_lock);
1642 void tracing_record_cmdline(struct task_struct *tsk)
1644 if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
1647 if (!__this_cpu_read(trace_cmdline_save))
1650 if (trace_save_cmdline(tsk))
1651 __this_cpu_write(trace_cmdline_save, false);
1655 tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1658 struct task_struct *tsk = current;
1660 entry->preempt_count = pc & 0xff;
1661 entry->pid = (tsk) ? tsk->pid : 0;
1663 #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
1664 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
1666 TRACE_FLAG_IRQS_NOSUPPORT |
1668 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
1669 ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
1670 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
1671 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
1673 EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
1675 struct ring_buffer_event *
1676 trace_buffer_lock_reserve(struct ring_buffer *buffer,
1679 unsigned long flags, int pc)
1681 struct ring_buffer_event *event;
1683 event = ring_buffer_lock_reserve(buffer, len);
1684 if (event != NULL) {
1685 struct trace_entry *ent = ring_buffer_event_data(event);
1687 tracing_generic_entry_update(ent, flags, pc);
1695 __buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
1697 __this_cpu_write(trace_cmdline_save, true);
1698 ring_buffer_unlock_commit(buffer, event);
1701 void trace_buffer_unlock_commit(struct trace_array *tr,
1702 struct ring_buffer *buffer,
1703 struct ring_buffer_event *event,
1704 unsigned long flags, int pc)
1706 __buffer_unlock_commit(buffer, event);
1708 ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
1709 ftrace_trace_userstack(buffer, flags, pc);
1711 EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);
1713 static struct ring_buffer *temp_buffer;
1715 struct ring_buffer_event *
1716 trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
1717 struct trace_event_file *trace_file,
1718 int type, unsigned long len,
1719 unsigned long flags, int pc)
1721 struct ring_buffer_event *entry;
1723 *current_rb = trace_file->tr->trace_buffer.buffer;
1724 entry = trace_buffer_lock_reserve(*current_rb,
1725 type, len, flags, pc);
1727 * If tracing is off, but we have triggers enabled
1728 * we still need to look at the event data. Use the temp_buffer
1729 * to store the trace event for the tigger to use. It's recusive
1730 * safe and will not be recorded anywhere.
1732 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
1733 *current_rb = temp_buffer;
1734 entry = trace_buffer_lock_reserve(*current_rb,
1735 type, len, flags, pc);
1739 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
1741 struct ring_buffer_event *
1742 trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
1743 int type, unsigned long len,
1744 unsigned long flags, int pc)
1746 *current_rb = global_trace.trace_buffer.buffer;
1747 return trace_buffer_lock_reserve(*current_rb,
1748 type, len, flags, pc);
1750 EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
1752 void trace_buffer_unlock_commit_regs(struct trace_array *tr,
1753 struct ring_buffer *buffer,
1754 struct ring_buffer_event *event,
1755 unsigned long flags, int pc,
1756 struct pt_regs *regs)
1758 __buffer_unlock_commit(buffer, event);
1760 ftrace_trace_stack(tr, buffer, flags, 0, pc, regs);
1761 ftrace_trace_userstack(buffer, flags, pc);
1763 EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs);
1765 void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
1766 struct ring_buffer_event *event)
1768 ring_buffer_discard_commit(buffer, event);
1770 EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit);
1773 trace_function(struct trace_array *tr,
1774 unsigned long ip, unsigned long parent_ip, unsigned long flags,
1777 struct trace_event_call *call = &event_function;
1778 struct ring_buffer *buffer = tr->trace_buffer.buffer;
1779 struct ring_buffer_event *event;
1780 struct ftrace_entry *entry;
1782 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
1786 entry = ring_buffer_event_data(event);
1788 entry->parent_ip = parent_ip;
1790 if (!call_filter_check_discard(call, entry, buffer, event))
1791 __buffer_unlock_commit(buffer, event);
1794 #ifdef CONFIG_STACKTRACE
1796 #define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
1797 struct ftrace_stack {
1798 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
1801 static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
1802 static DEFINE_PER_CPU(int, ftrace_stack_reserve);
1804 static void __ftrace_trace_stack(struct ring_buffer *buffer,
1805 unsigned long flags,
1806 int skip, int pc, struct pt_regs *regs)
1808 struct trace_event_call *call = &event_kernel_stack;
1809 struct ring_buffer_event *event;
1810 struct stack_entry *entry;
1811 struct stack_trace trace;
1813 int size = FTRACE_STACK_ENTRIES;
1815 trace.nr_entries = 0;
1819 * Since events can happen in NMIs there's no safe way to
1820 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
1821 * or NMI comes in, it will just have to use the default
1822 * FTRACE_STACK_SIZE.
1824 preempt_disable_notrace();
1826 use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
1828 * We don't need any atomic variables, just a barrier.
1829 * If an interrupt comes in, we don't care, because it would
1830 * have exited and put the counter back to what we want.
1831 * We just need a barrier to keep gcc from moving things
1835 if (use_stack == 1) {
1836 trace.entries = this_cpu_ptr(ftrace_stack.calls);
1837 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
1840 save_stack_trace_regs(regs, &trace);
1842 save_stack_trace(&trace);
1844 if (trace.nr_entries > size)
1845 size = trace.nr_entries;
1847 /* From now on, use_stack is a boolean */
1850 size *= sizeof(unsigned long);
1852 event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
1853 sizeof(*entry) + size, flags, pc);
1856 entry = ring_buffer_event_data(event);
1858 memset(&entry->caller, 0, size);
1861 memcpy(&entry->caller, trace.entries,
1862 trace.nr_entries * sizeof(unsigned long));
1864 trace.max_entries = FTRACE_STACK_ENTRIES;
1865 trace.entries = entry->caller;
1867 save_stack_trace_regs(regs, &trace);
1869 save_stack_trace(&trace);
1872 entry->size = trace.nr_entries;
1874 if (!call_filter_check_discard(call, entry, buffer, event))
1875 __buffer_unlock_commit(buffer, event);
1878 /* Again, don't let gcc optimize things here */
1880 __this_cpu_dec(ftrace_stack_reserve);
1881 preempt_enable_notrace();
1885 static inline void ftrace_trace_stack(struct trace_array *tr,
1886 struct ring_buffer *buffer,
1887 unsigned long flags,
1888 int skip, int pc, struct pt_regs *regs)
1890 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
1893 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
1896 void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
1899 __ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL);
1903 * trace_dump_stack - record a stack back trace in the trace buffer
1904 * @skip: Number of functions to skip (helper handlers)
1906 void trace_dump_stack(int skip)
1908 unsigned long flags;
1910 if (tracing_disabled || tracing_selftest_running)
1913 local_save_flags(flags);
1916 * Skip 3 more, seems to get us at the caller of
1920 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
1921 flags, skip, preempt_count(), NULL);
1924 static DEFINE_PER_CPU(int, user_stack_count);
1927 ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
1929 struct trace_event_call *call = &event_user_stack;
1930 struct ring_buffer_event *event;
1931 struct userstack_entry *entry;
1932 struct stack_trace trace;
1934 if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE))
1938 * NMIs can not handle page faults, even with fix ups.
1939 * The save user stack can (and often does) fault.
1941 if (unlikely(in_nmi()))
1945 * prevent recursion, since the user stack tracing may
1946 * trigger other kernel events.
1949 if (__this_cpu_read(user_stack_count))
1952 __this_cpu_inc(user_stack_count);
1954 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
1955 sizeof(*entry), flags, pc);
1957 goto out_drop_count;
1958 entry = ring_buffer_event_data(event);
1960 entry->tgid = current->tgid;
1961 memset(&entry->caller, 0, sizeof(entry->caller));
1963 trace.nr_entries = 0;
1964 trace.max_entries = FTRACE_STACK_ENTRIES;
1966 trace.entries = entry->caller;
1968 save_stack_trace_user(&trace);
1969 if (!call_filter_check_discard(call, entry, buffer, event))
1970 __buffer_unlock_commit(buffer, event);
1973 __this_cpu_dec(user_stack_count);
1979 static void __trace_userstack(struct trace_array *tr, unsigned long flags)
1981 ftrace_trace_userstack(tr, flags, preempt_count());
1985 #endif /* CONFIG_STACKTRACE */
1987 /* created for use with alloc_percpu */
1988 struct trace_buffer_struct {
1989 char buffer[TRACE_BUF_SIZE];
1992 static struct trace_buffer_struct *trace_percpu_buffer;
1993 static struct trace_buffer_struct *trace_percpu_sirq_buffer;
1994 static struct trace_buffer_struct *trace_percpu_irq_buffer;
1995 static struct trace_buffer_struct *trace_percpu_nmi_buffer;
1998 * The buffer used is dependent on the context. There is a per cpu
1999 * buffer for normal context, softirq contex, hard irq context and
2000 * for NMI context. Thise allows for lockless recording.
2002 * Note, if the buffers failed to be allocated, then this returns NULL
2004 static char *get_trace_buf(void)
2006 struct trace_buffer_struct *percpu_buffer;
2009 * If we have allocated per cpu buffers, then we do not
2010 * need to do any locking.
2013 percpu_buffer = trace_percpu_nmi_buffer;
2015 percpu_buffer = trace_percpu_irq_buffer;
2016 else if (in_softirq())
2017 percpu_buffer = trace_percpu_sirq_buffer;
2019 percpu_buffer = trace_percpu_buffer;
2024 return this_cpu_ptr(&percpu_buffer->buffer[0]);
2027 static int alloc_percpu_trace_buffer(void)
2029 struct trace_buffer_struct *buffers;
2030 struct trace_buffer_struct *sirq_buffers;
2031 struct trace_buffer_struct *irq_buffers;
2032 struct trace_buffer_struct *nmi_buffers;
2034 buffers = alloc_percpu(struct trace_buffer_struct);
2038 sirq_buffers = alloc_percpu(struct trace_buffer_struct);
2042 irq_buffers = alloc_percpu(struct trace_buffer_struct);
2046 nmi_buffers = alloc_percpu(struct trace_buffer_struct);
2050 trace_percpu_buffer = buffers;
2051 trace_percpu_sirq_buffer = sirq_buffers;
2052 trace_percpu_irq_buffer = irq_buffers;
2053 trace_percpu_nmi_buffer = nmi_buffers;
2058 free_percpu(irq_buffers);
2060 free_percpu(sirq_buffers);
2062 free_percpu(buffers);
2064 WARN(1, "Could not allocate percpu trace_printk buffer");
2068 static int buffers_allocated;
2070 void trace_printk_init_buffers(void)
2072 if (buffers_allocated)
2075 if (alloc_percpu_trace_buffer())
2078 /* trace_printk() is for debug use only. Don't use it in production. */
2081 pr_warning("**********************************************************\n");
2082 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2083 pr_warning("** **\n");
2084 pr_warning("** trace_printk() being used. Allocating extra memory. **\n");
2085 pr_warning("** **\n");
2086 pr_warning("** This means that this is a DEBUG kernel and it is **\n");
2087 pr_warning("** unsafe for production use. **\n");
2088 pr_warning("** **\n");
2089 pr_warning("** If you see this message and you are not debugging **\n");
2090 pr_warning("** the kernel, report this immediately to your vendor! **\n");
2091 pr_warning("** **\n");
2092 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2093 pr_warning("**********************************************************\n");
2095 /* Expand the buffers to set size */
2096 tracing_update_buffers();
2098 buffers_allocated = 1;
2101 * trace_printk_init_buffers() can be called by modules.
2102 * If that happens, then we need to start cmdline recording
2103 * directly here. If the global_trace.buffer is already
2104 * allocated here, then this was called by module code.
2106 if (global_trace.trace_buffer.buffer)
2107 tracing_start_cmdline_record();
2110 void trace_printk_start_comm(void)
2112 /* Start tracing comms if trace printk is set */
2113 if (!buffers_allocated)
2115 tracing_start_cmdline_record();
2118 static void trace_printk_start_stop_comm(int enabled)
2120 if (!buffers_allocated)
2124 tracing_start_cmdline_record();
2126 tracing_stop_cmdline_record();
2130 * trace_vbprintk - write binary msg to tracing buffer
2133 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
2135 struct trace_event_call *call = &event_bprint;
2136 struct ring_buffer_event *event;
2137 struct ring_buffer *buffer;
2138 struct trace_array *tr = &global_trace;
2139 struct bprint_entry *entry;
2140 unsigned long flags;
2142 int len = 0, size, pc;
2144 if (unlikely(tracing_selftest_running || tracing_disabled))
2147 /* Don't pollute graph traces with trace_vprintk internals */
2148 pause_graph_tracing();
2150 pc = preempt_count();
2151 preempt_disable_notrace();
2153 tbuffer = get_trace_buf();
2159 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
2161 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
2164 local_save_flags(flags);
2165 size = sizeof(*entry) + sizeof(u32) * len;
2166 buffer = tr->trace_buffer.buffer;
2167 event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2171 entry = ring_buffer_event_data(event);
2175 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
2176 if (!call_filter_check_discard(call, entry, buffer, event)) {
2177 __buffer_unlock_commit(buffer, event);
2178 ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
2182 preempt_enable_notrace();
2183 unpause_graph_tracing();
2187 EXPORT_SYMBOL_GPL(trace_vbprintk);
2190 __trace_array_vprintk(struct ring_buffer *buffer,
2191 unsigned long ip, const char *fmt, va_list args)
2193 struct trace_event_call *call = &event_print;
2194 struct ring_buffer_event *event;
2195 int len = 0, size, pc;
2196 struct print_entry *entry;
2197 unsigned long flags;
2200 if (tracing_disabled || tracing_selftest_running)
2203 /* Don't pollute graph traces with trace_vprintk internals */
2204 pause_graph_tracing();
2206 pc = preempt_count();
2207 preempt_disable_notrace();
2210 tbuffer = get_trace_buf();
2216 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
2218 local_save_flags(flags);
2219 size = sizeof(*entry) + len + 1;
2220 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
2224 entry = ring_buffer_event_data(event);
2227 memcpy(&entry->buf, tbuffer, len + 1);
2228 if (!call_filter_check_discard(call, entry, buffer, event)) {
2229 __buffer_unlock_commit(buffer, event);
2230 ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL);
2233 preempt_enable_notrace();
2234 unpause_graph_tracing();
2239 int trace_array_vprintk(struct trace_array *tr,
2240 unsigned long ip, const char *fmt, va_list args)
2242 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
2245 int trace_array_printk(struct trace_array *tr,
2246 unsigned long ip, const char *fmt, ...)
2251 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
2255 ret = trace_array_vprintk(tr, ip, fmt, ap);
2260 int trace_array_printk_buf(struct ring_buffer *buffer,
2261 unsigned long ip, const char *fmt, ...)
2266 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
2270 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
2275 int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2277 return trace_array_vprintk(&global_trace, ip, fmt, args);
2279 EXPORT_SYMBOL_GPL(trace_vprintk);
2281 static void trace_iterator_increment(struct trace_iterator *iter)
2283 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
2287 ring_buffer_read(buf_iter, NULL);
2290 static struct trace_entry *
2291 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
2292 unsigned long *lost_events)
2294 struct ring_buffer_event *event;
2295 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
2298 event = ring_buffer_iter_peek(buf_iter, ts);
2300 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
2304 iter->ent_size = ring_buffer_event_length(event);
2305 return ring_buffer_event_data(event);
2311 static struct trace_entry *
2312 __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
2313 unsigned long *missing_events, u64 *ent_ts)
2315 struct ring_buffer *buffer = iter->trace_buffer->buffer;
2316 struct trace_entry *ent, *next = NULL;
2317 unsigned long lost_events = 0, next_lost = 0;
2318 int cpu_file = iter->cpu_file;
2319 u64 next_ts = 0, ts;
2325 * If we are in a per_cpu trace file, don't bother by iterating over
2326 * all cpu and peek directly.
2328 if (cpu_file > RING_BUFFER_ALL_CPUS) {
2329 if (ring_buffer_empty_cpu(buffer, cpu_file))
2331 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
2333 *ent_cpu = cpu_file;
2338 for_each_tracing_cpu(cpu) {
2340 if (ring_buffer_empty_cpu(buffer, cpu))
2343 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
2346 * Pick the entry with the smallest timestamp:
2348 if (ent && (!next || ts < next_ts)) {
2352 next_lost = lost_events;
2353 next_size = iter->ent_size;
2357 iter->ent_size = next_size;
2360 *ent_cpu = next_cpu;
2366 *missing_events = next_lost;
2371 /* Find the next real entry, without updating the iterator itself */
2372 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
2373 int *ent_cpu, u64 *ent_ts)
2375 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
2378 /* Find the next real entry, and increment the iterator to the next entry */
2379 void *trace_find_next_entry_inc(struct trace_iterator *iter)
2381 iter->ent = __find_next_entry(iter, &iter->cpu,
2382 &iter->lost_events, &iter->ts);
2385 trace_iterator_increment(iter);
2387 return iter->ent ? iter : NULL;
2390 static void trace_consume(struct trace_iterator *iter)
2392 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
2393 &iter->lost_events);
2396 static void *s_next(struct seq_file *m, void *v, loff_t *pos)
2398 struct trace_iterator *iter = m->private;
2402 WARN_ON_ONCE(iter->leftover);
2406 /* can't go backwards */
2411 ent = trace_find_next_entry_inc(iter);
2415 while (ent && iter->idx < i)
2416 ent = trace_find_next_entry_inc(iter);
2423 void tracing_iter_reset(struct trace_iterator *iter, int cpu)
2425 struct ring_buffer_event *event;
2426 struct ring_buffer_iter *buf_iter;
2427 unsigned long entries = 0;
2430 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
2432 buf_iter = trace_buffer_iter(iter, cpu);
2436 ring_buffer_iter_reset(buf_iter);
2439 * We could have the case with the max latency tracers
2440 * that a reset never took place on a cpu. This is evident
2441 * by the timestamp being before the start of the buffer.
2443 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
2444 if (ts >= iter->trace_buffer->time_start)
2447 ring_buffer_read(buf_iter, NULL);
2450 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
2454 * The current tracer is copied to avoid a global locking
2457 static void *s_start(struct seq_file *m, loff_t *pos)
2459 struct trace_iterator *iter = m->private;
2460 struct trace_array *tr = iter->tr;
2461 int cpu_file = iter->cpu_file;
2467 * copy the tracer to avoid using a global lock all around.
2468 * iter->trace is a copy of current_trace, the pointer to the
2469 * name may be used instead of a strcmp(), as iter->trace->name
2470 * will point to the same string as current_trace->name.
2472 mutex_lock(&trace_types_lock);
2473 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
2474 *iter->trace = *tr->current_trace;
2475 mutex_unlock(&trace_types_lock);
2477 #ifdef CONFIG_TRACER_MAX_TRACE
2478 if (iter->snapshot && iter->trace->use_max_tr)
2479 return ERR_PTR(-EBUSY);
2482 if (!iter->snapshot)
2483 atomic_inc(&trace_record_cmdline_disabled);
2485 if (*pos != iter->pos) {
2490 if (cpu_file == RING_BUFFER_ALL_CPUS) {
2491 for_each_tracing_cpu(cpu)
2492 tracing_iter_reset(iter, cpu);
2494 tracing_iter_reset(iter, cpu_file);
2497 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
2502 * If we overflowed the seq_file before, then we want
2503 * to just reuse the trace_seq buffer again.
2509 p = s_next(m, p, &l);
2513 trace_event_read_lock();
2514 trace_access_lock(cpu_file);
2518 static void s_stop(struct seq_file *m, void *p)
2520 struct trace_iterator *iter = m->private;
2522 #ifdef CONFIG_TRACER_MAX_TRACE
2523 if (iter->snapshot && iter->trace->use_max_tr)
2527 if (!iter->snapshot)
2528 atomic_dec(&trace_record_cmdline_disabled);
2530 trace_access_unlock(iter->cpu_file);
2531 trace_event_read_unlock();
2535 get_total_entries(struct trace_buffer *buf,
2536 unsigned long *total, unsigned long *entries)
2538 unsigned long count;
2544 for_each_tracing_cpu(cpu) {
2545 count = ring_buffer_entries_cpu(buf->buffer, cpu);
2547 * If this buffer has skipped entries, then we hold all
2548 * entries for the trace and we need to ignore the
2549 * ones before the time stamp.
2551 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
2552 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
2553 /* total is the same as the entries */
2557 ring_buffer_overrun_cpu(buf->buffer, cpu);
2562 static void print_lat_help_header(struct seq_file *m)
2564 seq_puts(m, "# _------=> CPU# \n"
2565 "# / _-----=> irqs-off \n"
2566 "# | / _----=> need-resched \n"
2567 "# || / _---=> hardirq/softirq \n"
2568 "# ||| / _--=> preempt-depth \n"
2570 "# cmd pid ||||| time | caller \n"
2571 "# \\ / ||||| \\ | / \n");
2574 static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
2576 unsigned long total;
2577 unsigned long entries;
2579 get_total_entries(buf, &total, &entries);
2580 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
2581 entries, total, num_online_cpus());
2585 static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
2587 print_event_info(buf, m);
2588 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n"
2592 static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
2594 print_event_info(buf, m);
2595 seq_puts(m, "# _-----=> irqs-off\n"
2596 "# / _----=> need-resched\n"
2597 "# | / _---=> hardirq/softirq\n"
2598 "# || / _--=> preempt-depth\n"
2600 "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n"
2601 "# | | | |||| | |\n");
2605 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
2607 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
2608 struct trace_buffer *buf = iter->trace_buffer;
2609 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
2610 struct tracer *type = iter->trace;
2611 unsigned long entries;
2612 unsigned long total;
2613 const char *name = "preemption";
2617 get_total_entries(buf, &total, &entries);
2619 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
2621 seq_puts(m, "# -----------------------------------"
2622 "---------------------------------\n");
2623 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
2624 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
2625 nsecs_to_usecs(data->saved_latency),
2629 #if defined(CONFIG_PREEMPT_NONE)
2631 #elif defined(CONFIG_PREEMPT_VOLUNTARY)
2633 #elif defined(CONFIG_PREEMPT)
2638 /* These are reserved for later use */
2641 seq_printf(m, " #P:%d)\n", num_online_cpus());
2645 seq_puts(m, "# -----------------\n");
2646 seq_printf(m, "# | task: %.16s-%d "
2647 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
2648 data->comm, data->pid,
2649 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
2650 data->policy, data->rt_priority);
2651 seq_puts(m, "# -----------------\n");
2653 if (data->critical_start) {
2654 seq_puts(m, "# => started at: ");
2655 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
2656 trace_print_seq(m, &iter->seq);
2657 seq_puts(m, "\n# => ended at: ");
2658 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
2659 trace_print_seq(m, &iter->seq);
2660 seq_puts(m, "\n#\n");
2666 static void test_cpu_buff_start(struct trace_iterator *iter)
2668 struct trace_seq *s = &iter->seq;
2669 struct trace_array *tr = iter->tr;
2671 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
2674 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
2677 if (iter->started && cpumask_test_cpu(iter->cpu, iter->started))
2680 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
2684 cpumask_set_cpu(iter->cpu, iter->started);
2686 /* Don't print started cpu buffer for the first entry of the trace */
2688 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
2692 static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
2694 struct trace_array *tr = iter->tr;
2695 struct trace_seq *s = &iter->seq;
2696 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
2697 struct trace_entry *entry;
2698 struct trace_event *event;
2702 test_cpu_buff_start(iter);
2704 event = ftrace_find_event(entry->type);
2706 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
2707 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2708 trace_print_lat_context(iter);
2710 trace_print_context(iter);
2713 if (trace_seq_has_overflowed(s))
2714 return TRACE_TYPE_PARTIAL_LINE;
2717 return event->funcs->trace(iter, sym_flags, event);
2719 trace_seq_printf(s, "Unknown type %d\n", entry->type);
2721 return trace_handle_return(s);
2724 static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
2726 struct trace_array *tr = iter->tr;
2727 struct trace_seq *s = &iter->seq;
2728 struct trace_entry *entry;
2729 struct trace_event *event;
2733 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
2734 trace_seq_printf(s, "%d %d %llu ",
2735 entry->pid, iter->cpu, iter->ts);
2737 if (trace_seq_has_overflowed(s))
2738 return TRACE_TYPE_PARTIAL_LINE;
2740 event = ftrace_find_event(entry->type);
2742 return event->funcs->raw(iter, 0, event);
2744 trace_seq_printf(s, "%d ?\n", entry->type);
2746 return trace_handle_return(s);
2749 static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
2751 struct trace_array *tr = iter->tr;
2752 struct trace_seq *s = &iter->seq;
2753 unsigned char newline = '\n';
2754 struct trace_entry *entry;
2755 struct trace_event *event;
2759 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
2760 SEQ_PUT_HEX_FIELD(s, entry->pid);
2761 SEQ_PUT_HEX_FIELD(s, iter->cpu);
2762 SEQ_PUT_HEX_FIELD(s, iter->ts);
2763 if (trace_seq_has_overflowed(s))
2764 return TRACE_TYPE_PARTIAL_LINE;
2767 event = ftrace_find_event(entry->type);
2769 enum print_line_t ret = event->funcs->hex(iter, 0, event);
2770 if (ret != TRACE_TYPE_HANDLED)
2774 SEQ_PUT_FIELD(s, newline);
2776 return trace_handle_return(s);
2779 static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
2781 struct trace_array *tr = iter->tr;
2782 struct trace_seq *s = &iter->seq;
2783 struct trace_entry *entry;
2784 struct trace_event *event;
2788 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
2789 SEQ_PUT_FIELD(s, entry->pid);
2790 SEQ_PUT_FIELD(s, iter->cpu);
2791 SEQ_PUT_FIELD(s, iter->ts);
2792 if (trace_seq_has_overflowed(s))
2793 return TRACE_TYPE_PARTIAL_LINE;
2796 event = ftrace_find_event(entry->type);
2797 return event ? event->funcs->binary(iter, 0, event) :
2801 int trace_empty(struct trace_iterator *iter)
2803 struct ring_buffer_iter *buf_iter;
2806 /* If we are looking at one CPU buffer, only check that one */
2807 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
2808 cpu = iter->cpu_file;
2809 buf_iter = trace_buffer_iter(iter, cpu);
2811 if (!ring_buffer_iter_empty(buf_iter))
2814 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
2820 for_each_tracing_cpu(cpu) {
2821 buf_iter = trace_buffer_iter(iter, cpu);
2823 if (!ring_buffer_iter_empty(buf_iter))
2826 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
2834 /* Called with trace_event_read_lock() held. */
2835 enum print_line_t print_trace_line(struct trace_iterator *iter)
2837 struct trace_array *tr = iter->tr;
2838 unsigned long trace_flags = tr->trace_flags;
2839 enum print_line_t ret;
2841 if (iter->lost_events) {
2842 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
2843 iter->cpu, iter->lost_events);
2844 if (trace_seq_has_overflowed(&iter->seq))
2845 return TRACE_TYPE_PARTIAL_LINE;
2848 if (iter->trace && iter->trace->print_line) {
2849 ret = iter->trace->print_line(iter);
2850 if (ret != TRACE_TYPE_UNHANDLED)
2854 if (iter->ent->type == TRACE_BPUTS &&
2855 trace_flags & TRACE_ITER_PRINTK &&
2856 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2857 return trace_print_bputs_msg_only(iter);
2859 if (iter->ent->type == TRACE_BPRINT &&
2860 trace_flags & TRACE_ITER_PRINTK &&
2861 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2862 return trace_print_bprintk_msg_only(iter);
2864 if (iter->ent->type == TRACE_PRINT &&
2865 trace_flags & TRACE_ITER_PRINTK &&
2866 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2867 return trace_print_printk_msg_only(iter);
2869 if (trace_flags & TRACE_ITER_BIN)
2870 return print_bin_fmt(iter);
2872 if (trace_flags & TRACE_ITER_HEX)
2873 return print_hex_fmt(iter);
2875 if (trace_flags & TRACE_ITER_RAW)
2876 return print_raw_fmt(iter);
2878 return print_trace_fmt(iter);
2881 void trace_latency_header(struct seq_file *m)
2883 struct trace_iterator *iter = m->private;
2884 struct trace_array *tr = iter->tr;
2886 /* print nothing if the buffers are empty */
2887 if (trace_empty(iter))
2890 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2891 print_trace_header(m, iter);
2893 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
2894 print_lat_help_header(m);
2897 void trace_default_header(struct seq_file *m)
2899 struct trace_iterator *iter = m->private;
2900 struct trace_array *tr = iter->tr;
2901 unsigned long trace_flags = tr->trace_flags;
2903 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
2906 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2907 /* print nothing if the buffers are empty */
2908 if (trace_empty(iter))
2910 print_trace_header(m, iter);
2911 if (!(trace_flags & TRACE_ITER_VERBOSE))
2912 print_lat_help_header(m);
2914 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
2915 if (trace_flags & TRACE_ITER_IRQ_INFO)
2916 print_func_help_header_irq(iter->trace_buffer, m);
2918 print_func_help_header(iter->trace_buffer, m);
2923 static void test_ftrace_alive(struct seq_file *m)
2925 if (!ftrace_is_dead())
2927 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
2928 "# MAY BE MISSING FUNCTION EVENTS\n");
2931 #ifdef CONFIG_TRACER_MAX_TRACE
2932 static void show_snapshot_main_help(struct seq_file *m)
2934 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
2935 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
2936 "# Takes a snapshot of the main buffer.\n"
2937 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
2938 "# (Doesn't have to be '2' works with any number that\n"
2939 "# is not a '0' or '1')\n");
2942 static void show_snapshot_percpu_help(struct seq_file *m)
2944 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
2945 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
2946 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
2947 "# Takes a snapshot of the main buffer for this cpu.\n");
2949 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
2950 "# Must use main snapshot file to allocate.\n");
2952 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
2953 "# (Doesn't have to be '2' works with any number that\n"
2954 "# is not a '0' or '1')\n");
2957 static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
2959 if (iter->tr->allocated_snapshot)
2960 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
2962 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
2964 seq_puts(m, "# Snapshot commands:\n");
2965 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
2966 show_snapshot_main_help(m);
2968 show_snapshot_percpu_help(m);
2971 /* Should never be called */
2972 static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
2975 static int s_show(struct seq_file *m, void *v)
2977 struct trace_iterator *iter = v;
2980 if (iter->ent == NULL) {
2982 seq_printf(m, "# tracer: %s\n", iter->trace->name);
2984 test_ftrace_alive(m);
2986 if (iter->snapshot && trace_empty(iter))
2987 print_snapshot_help(m, iter);
2988 else if (iter->trace && iter->trace->print_header)
2989 iter->trace->print_header(m);
2991 trace_default_header(m);
2993 } else if (iter->leftover) {
2995 * If we filled the seq_file buffer earlier, we
2996 * want to just show it now.
2998 ret = trace_print_seq(m, &iter->seq);
3000 /* ret should this time be zero, but you never know */
3001 iter->leftover = ret;
3004 print_trace_line(iter);
3005 ret = trace_print_seq(m, &iter->seq);
3007 * If we overflow the seq_file buffer, then it will
3008 * ask us for this data again at start up.
3010 * ret is 0 if seq_file write succeeded.
3013 iter->leftover = ret;
3020 * Should be used after trace_array_get(), trace_types_lock
3021 * ensures that i_cdev was already initialized.
3023 static inline int tracing_get_cpu(struct inode *inode)
3025 if (inode->i_cdev) /* See trace_create_cpu_file() */
3026 return (long)inode->i_cdev - 1;
3027 return RING_BUFFER_ALL_CPUS;
3030 static const struct seq_operations tracer_seq_ops = {
3037 static struct trace_iterator *
3038 __tracing_open(struct inode *inode, struct file *file, bool snapshot)
3040 struct trace_array *tr = inode->i_private;
3041 struct trace_iterator *iter;
3044 if (tracing_disabled)
3045 return ERR_PTR(-ENODEV);
3047 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
3049 return ERR_PTR(-ENOMEM);
3051 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
3053 if (!iter->buffer_iter)
3057 * We make a copy of the current tracer to avoid concurrent
3058 * changes on it while we are reading.
3060 mutex_lock(&trace_types_lock);
3061 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
3065 *iter->trace = *tr->current_trace;
3067 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
3072 #ifdef CONFIG_TRACER_MAX_TRACE
3073 /* Currently only the top directory has a snapshot */
3074 if (tr->current_trace->print_max || snapshot)
3075 iter->trace_buffer = &tr->max_buffer;
3078 iter->trace_buffer = &tr->trace_buffer;
3079 iter->snapshot = snapshot;
3081 iter->cpu_file = tracing_get_cpu(inode);
3082 mutex_init(&iter->mutex);
3084 /* Notify the tracer early; before we stop tracing. */
3085 if (iter->trace && iter->trace->open)
3086 iter->trace->open(iter);
3088 /* Annotate start of buffers if we had overruns */
3089 if (ring_buffer_overruns(iter->trace_buffer->buffer))
3090 iter->iter_flags |= TRACE_FILE_ANNOTATE;
3092 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
3093 if (trace_clocks[tr->clock_id].in_ns)
3094 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3096 /* stop the trace while dumping if we are not opening "snapshot" */
3097 if (!iter->snapshot)
3098 tracing_stop_tr(tr);
3100 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
3101 for_each_tracing_cpu(cpu) {
3102 iter->buffer_iter[cpu] =
3103 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
3105 ring_buffer_read_prepare_sync();
3106 for_each_tracing_cpu(cpu) {
3107 ring_buffer_read_start(iter->buffer_iter[cpu]);
3108 tracing_iter_reset(iter, cpu);
3111 cpu = iter->cpu_file;
3112 iter->buffer_iter[cpu] =
3113 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
3114 ring_buffer_read_prepare_sync();
3115 ring_buffer_read_start(iter->buffer_iter[cpu]);
3116 tracing_iter_reset(iter, cpu);
3119 mutex_unlock(&trace_types_lock);
3124 mutex_unlock(&trace_types_lock);
3126 kfree(iter->buffer_iter);
3128 seq_release_private(inode, file);
3129 return ERR_PTR(-ENOMEM);
3132 int tracing_open_generic(struct inode *inode, struct file *filp)
3134 if (tracing_disabled)
3137 filp->private_data = inode->i_private;
3141 bool tracing_is_disabled(void)
3143 return (tracing_disabled) ? true: false;
3147 * Open and update trace_array ref count.
3148 * Must have the current trace_array passed to it.
3150 static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
3152 struct trace_array *tr = inode->i_private;
3154 if (tracing_disabled)
3157 if (trace_array_get(tr) < 0)
3160 filp->private_data = inode->i_private;
3165 static int tracing_release(struct inode *inode, struct file *file)
3167 struct trace_array *tr = inode->i_private;
3168 struct seq_file *m = file->private_data;
3169 struct trace_iterator *iter;
3172 if (!(file->f_mode & FMODE_READ)) {
3173 trace_array_put(tr);
3177 /* Writes do not use seq_file */
3179 mutex_lock(&trace_types_lock);
3181 for_each_tracing_cpu(cpu) {
3182 if (iter->buffer_iter[cpu])
3183 ring_buffer_read_finish(iter->buffer_iter[cpu]);
3186 if (iter->trace && iter->trace->close)
3187 iter->trace->close(iter);
3189 if (!iter->snapshot)
3190 /* reenable tracing if it was previously enabled */
3191 tracing_start_tr(tr);
3193 __trace_array_put(tr);
3195 mutex_unlock(&trace_types_lock);
3197 mutex_destroy(&iter->mutex);
3198 free_cpumask_var(iter->started);
3200 kfree(iter->buffer_iter);
3201 seq_release_private(inode, file);
3206 static int tracing_release_generic_tr(struct inode *inode, struct file *file)
3208 struct trace_array *tr = inode->i_private;
3210 trace_array_put(tr);
3214 static int tracing_single_release_tr(struct inode *inode, struct file *file)
3216 struct trace_array *tr = inode->i_private;
3218 trace_array_put(tr);
3220 return single_release(inode, file);
3223 static int tracing_open(struct inode *inode, struct file *file)
3225 struct trace_array *tr = inode->i_private;
3226 struct trace_iterator *iter;
3229 if (trace_array_get(tr) < 0)
3232 /* If this file was open for write, then erase contents */
3233 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
3234 int cpu = tracing_get_cpu(inode);
3235 struct trace_buffer *trace_buf = &tr->trace_buffer;
3237 #ifdef CONFIG_TRACER_MAX_TRACE
3238 if (tr->current_trace->print_max)
3239 trace_buf = &tr->max_buffer;
3242 if (cpu == RING_BUFFER_ALL_CPUS)
3243 tracing_reset_online_cpus(trace_buf);
3245 tracing_reset(trace_buf, cpu);
3248 if (file->f_mode & FMODE_READ) {
3249 iter = __tracing_open(inode, file, false);
3251 ret = PTR_ERR(iter);
3252 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
3253 iter->iter_flags |= TRACE_FILE_LAT_FMT;
3257 trace_array_put(tr);
3263 * Some tracers are not suitable for instance buffers.
3264 * A tracer is always available for the global array (toplevel)
3265 * or if it explicitly states that it is.
3268 trace_ok_for_array(struct tracer *t, struct trace_array *tr)
3270 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
3273 /* Find the next tracer that this trace array may use */
3274 static struct tracer *
3275 get_tracer_for_array(struct trace_array *tr, struct tracer *t)
3277 while (t && !trace_ok_for_array(t, tr))
3284 t_next(struct seq_file *m, void *v, loff_t *pos)
3286 struct trace_array *tr = m->private;
3287 struct tracer *t = v;
3292 t = get_tracer_for_array(tr, t->next);
3297 static void *t_start(struct seq_file *m, loff_t *pos)
3299 struct trace_array *tr = m->private;
3303 mutex_lock(&trace_types_lock);
3305 t = get_tracer_for_array(tr, trace_types);
3306 for (; t && l < *pos; t = t_next(m, t, &l))
3312 static void t_stop(struct seq_file *m, void *p)
3314 mutex_unlock(&trace_types_lock);
3317 static int t_show(struct seq_file *m, void *v)
3319 struct tracer *t = v;
3324 seq_puts(m, t->name);
3333 static const struct seq_operations show_traces_seq_ops = {
3340 static int show_traces_open(struct inode *inode, struct file *file)
3342 struct trace_array *tr = inode->i_private;
3346 if (tracing_disabled)
3349 ret = seq_open(file, &show_traces_seq_ops);
3353 m = file->private_data;
3360 tracing_write_stub(struct file *filp, const char __user *ubuf,
3361 size_t count, loff_t *ppos)
3366 loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
3370 if (file->f_mode & FMODE_READ)
3371 ret = seq_lseek(file, offset, whence);
3373 file->f_pos = ret = 0;
3378 static const struct file_operations tracing_fops = {
3379 .open = tracing_open,
3381 .write = tracing_write_stub,
3382 .llseek = tracing_lseek,
3383 .release = tracing_release,
3386 static const struct file_operations show_traces_fops = {
3387 .open = show_traces_open,
3389 .release = seq_release,
3390 .llseek = seq_lseek,
3394 tracing_cpumask_read(struct file *filp, char __user *ubuf,
3395 size_t count, loff_t *ppos)
3397 struct trace_array *tr = file_inode(filp)->i_private;
3401 len = snprintf(NULL, 0, "%*pb\n",
3402 cpumask_pr_args(tr->tracing_cpumask)) + 1;
3403 mask_str = kmalloc(len, GFP_KERNEL);
3407 len = snprintf(mask_str, len, "%*pb\n",
3408 cpumask_pr_args(tr->tracing_cpumask));
3413 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
3422 tracing_cpumask_write(struct file *filp, const char __user *ubuf,
3423 size_t count, loff_t *ppos)
3425 struct trace_array *tr = file_inode(filp)->i_private;
3426 cpumask_var_t tracing_cpumask_new;
3429 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
3432 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
3436 local_irq_disable();
3437 arch_spin_lock(&tr->max_lock);
3438 for_each_tracing_cpu(cpu) {
3440 * Increase/decrease the disabled counter if we are
3441 * about to flip a bit in the cpumask:
3443 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
3444 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
3445 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3446 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
3448 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
3449 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
3450 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3451 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
3454 arch_spin_unlock(&tr->max_lock);
3457 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
3458 free_cpumask_var(tracing_cpumask_new);
3463 free_cpumask_var(tracing_cpumask_new);
3468 static const struct file_operations tracing_cpumask_fops = {
3469 .open = tracing_open_generic_tr,
3470 .read = tracing_cpumask_read,
3471 .write = tracing_cpumask_write,
3472 .release = tracing_release_generic_tr,
3473 .llseek = generic_file_llseek,
3476 static int tracing_trace_options_show(struct seq_file *m, void *v)
3478 struct tracer_opt *trace_opts;
3479 struct trace_array *tr = m->private;
3483 mutex_lock(&trace_types_lock);
3484 tracer_flags = tr->current_trace->flags->val;
3485 trace_opts = tr->current_trace->flags->opts;
3487 for (i = 0; trace_options[i]; i++) {
3488 if (tr->trace_flags & (1 << i))
3489 seq_printf(m, "%s\n", trace_options[i]);
3491 seq_printf(m, "no%s\n", trace_options[i]);
3494 for (i = 0; trace_opts[i].name; i++) {
3495 if (tracer_flags & trace_opts[i].bit)
3496 seq_printf(m, "%s\n", trace_opts[i].name);
3498 seq_printf(m, "no%s\n", trace_opts[i].name);
3500 mutex_unlock(&trace_types_lock);
3505 static int __set_tracer_option(struct trace_array *tr,
3506 struct tracer_flags *tracer_flags,
3507 struct tracer_opt *opts, int neg)
3509 struct tracer *trace = tr->current_trace;
3512 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
3517 tracer_flags->val &= ~opts->bit;
3519 tracer_flags->val |= opts->bit;
3523 /* Try to assign a tracer specific option */
3524 static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
3526 struct tracer *trace = tr->current_trace;
3527 struct tracer_flags *tracer_flags = trace->flags;
3528 struct tracer_opt *opts = NULL;
3531 for (i = 0; tracer_flags->opts[i].name; i++) {
3532 opts = &tracer_flags->opts[i];
3534 if (strcmp(cmp, opts->name) == 0)
3535 return __set_tracer_option(tr, trace->flags, opts, neg);
3541 /* Some tracers require overwrite to stay enabled */
3542 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
3544 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
3550 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
3552 /* do nothing if flag is already set */
3553 if (!!(tr->trace_flags & mask) == !!enabled)
3556 /* Give the tracer a chance to approve the change */
3557 if (tr->current_trace->flag_changed)
3558 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
3562 tr->trace_flags |= mask;
3564 tr->trace_flags &= ~mask;
3566 if (mask == TRACE_ITER_RECORD_CMD)
3567 trace_event_enable_cmd_record(enabled);
3569 if (mask == TRACE_ITER_OVERWRITE) {
3570 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
3571 #ifdef CONFIG_TRACER_MAX_TRACE
3572 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
3576 if (mask == TRACE_ITER_PRINTK) {
3577 trace_printk_start_stop_comm(enabled);
3578 trace_printk_control(enabled);
3584 static int trace_set_options(struct trace_array *tr, char *option)
3590 size_t orig_len = strlen(option);
3592 cmp = strstrip(option);
3594 if (strncmp(cmp, "no", 2) == 0) {
3599 mutex_lock(&trace_types_lock);
3601 for (i = 0; trace_options[i]; i++) {
3602 if (strcmp(cmp, trace_options[i]) == 0) {
3603 ret = set_tracer_flag(tr, 1 << i, !neg);
3608 /* If no option could be set, test the specific tracer options */
3609 if (!trace_options[i])
3610 ret = set_tracer_option(tr, cmp, neg);
3612 mutex_unlock(&trace_types_lock);
3615 * If the first trailing whitespace is replaced with '\0' by strstrip,
3616 * turn it back into a space.
3618 if (orig_len > strlen(option))
3619 option[strlen(option)] = ' ';
3624 static void __init apply_trace_boot_options(void)
3626 char *buf = trace_boot_options_buf;
3630 option = strsep(&buf, ",");
3636 trace_set_options(&global_trace, option);
3638 /* Put back the comma to allow this to be called again */
3645 tracing_trace_options_write(struct file *filp, const char __user *ubuf,
3646 size_t cnt, loff_t *ppos)
3648 struct seq_file *m = filp->private_data;
3649 struct trace_array *tr = m->private;
3653 if (cnt >= sizeof(buf))
3656 if (copy_from_user(&buf, ubuf, cnt))
3661 ret = trace_set_options(tr, buf);
3670 static int tracing_trace_options_open(struct inode *inode, struct file *file)
3672 struct trace_array *tr = inode->i_private;
3675 if (tracing_disabled)
3678 if (trace_array_get(tr) < 0)
3681 ret = single_open(file, tracing_trace_options_show, inode->i_private);
3683 trace_array_put(tr);
3688 static const struct file_operations tracing_iter_fops = {
3689 .open = tracing_trace_options_open,
3691 .llseek = seq_lseek,
3692 .release = tracing_single_release_tr,
3693 .write = tracing_trace_options_write,
3696 static const char readme_msg[] =
3697 "tracing mini-HOWTO:\n\n"
3698 "# echo 0 > tracing_on : quick way to disable tracing\n"
3699 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
3700 " Important files:\n"
3701 " trace\t\t\t- The static contents of the buffer\n"
3702 "\t\t\t To clear the buffer write into this file: echo > trace\n"
3703 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
3704 " current_tracer\t- function and latency tracers\n"
3705 " available_tracers\t- list of configured tracers for current_tracer\n"
3706 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
3707 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
3708 " trace_clock\t\t-change the clock used to order events\n"
3709 " local: Per cpu clock but may not be synced across CPUs\n"
3710 " global: Synced across CPUs but slows tracing down.\n"
3711 " counter: Not a clock, but just an increment\n"
3712 " uptime: Jiffy counter from time of boot\n"
3713 " perf: Same clock that perf events use\n"
3714 #ifdef CONFIG_X86_64
3715 " x86-tsc: TSC cycle counter\n"
3717 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
3718 " tracing_cpumask\t- Limit which CPUs to trace\n"
3719 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
3720 "\t\t\t Remove sub-buffer with rmdir\n"
3721 " trace_options\t\t- Set format or modify how tracing happens\n"
3722 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
3723 "\t\t\t option name\n"
3724 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
3725 #ifdef CONFIG_DYNAMIC_FTRACE
3726 "\n available_filter_functions - list of functions that can be filtered on\n"
3727 " set_ftrace_filter\t- echo function name in here to only trace these\n"
3728 "\t\t\t functions\n"
3729 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3730 "\t modules: Can select a group via module\n"
3731 "\t Format: :mod:<module-name>\n"
3732 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
3733 "\t triggers: a command to perform when function is hit\n"
3734 "\t Format: <function>:<trigger>[:count]\n"
3735 "\t trigger: traceon, traceoff\n"
3736 "\t\t enable_event:<system>:<event>\n"
3737 "\t\t disable_event:<system>:<event>\n"
3738 #ifdef CONFIG_STACKTRACE
3741 #ifdef CONFIG_TRACER_SNAPSHOT
3746 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
3747 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
3748 "\t The first one will disable tracing every time do_fault is hit\n"
3749 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
3750 "\t The first time do trap is hit and it disables tracing, the\n"
3751 "\t counter will decrement to 2. If tracing is already disabled,\n"
3752 "\t the counter will not decrement. It only decrements when the\n"
3753 "\t trigger did work\n"
3754 "\t To remove trigger without count:\n"
3755 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
3756 "\t To remove trigger with a count:\n"
3757 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
3758 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
3759 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3760 "\t modules: Can select a group via module command :mod:\n"
3761 "\t Does not accept triggers\n"
3762 #endif /* CONFIG_DYNAMIC_FTRACE */
3763 #ifdef CONFIG_FUNCTION_TRACER
3764 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
3767 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3768 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
3769 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
3770 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
3772 #ifdef CONFIG_TRACER_SNAPSHOT
3773 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
3774 "\t\t\t snapshot buffer. Read the contents for more\n"
3775 "\t\t\t information\n"
3777 #ifdef CONFIG_STACK_TRACER
3778 " stack_trace\t\t- Shows the max stack trace when active\n"
3779 " stack_max_size\t- Shows current max stack size that was traced\n"
3780 "\t\t\t Write into this file to reset the max size (trigger a\n"
3781 "\t\t\t new trace)\n"
3782 #ifdef CONFIG_DYNAMIC_FTRACE
3783 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
3786 #endif /* CONFIG_STACK_TRACER */
3787 " events/\t\t- Directory containing all trace event subsystems:\n"
3788 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
3789 " events/<system>/\t- Directory containing all trace events for <system>:\n"
3790 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
3792 " filter\t\t- If set, only events passing filter are traced\n"
3793 " events/<system>/<event>/\t- Directory containing control files for\n"
3795 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
3796 " filter\t\t- If set, only events passing filter are traced\n"
3797 " trigger\t\t- If set, a command to perform when event is hit\n"
3798 "\t Format: <trigger>[:count][if <filter>]\n"
3799 "\t trigger: traceon, traceoff\n"
3800 "\t enable_event:<system>:<event>\n"
3801 "\t disable_event:<system>:<event>\n"
3802 #ifdef CONFIG_STACKTRACE
3805 #ifdef CONFIG_TRACER_SNAPSHOT
3808 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
3809 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
3810 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
3811 "\t events/block/block_unplug/trigger\n"
3812 "\t The first disables tracing every time block_unplug is hit.\n"
3813 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
3814 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
3815 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
3816 "\t Like function triggers, the counter is only decremented if it\n"
3817 "\t enabled or disabled tracing.\n"
3818 "\t To remove a trigger without a count:\n"
3819 "\t echo '!<trigger> > <system>/<event>/trigger\n"
3820 "\t To remove a trigger with a count:\n"
3821 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
3822 "\t Filters can be ignored when removing a trigger.\n"
3826 tracing_readme_read(struct file *filp, char __user *ubuf,
3827 size_t cnt, loff_t *ppos)
3829 return simple_read_from_buffer(ubuf, cnt, ppos,
3830 readme_msg, strlen(readme_msg));
3833 static const struct file_operations tracing_readme_fops = {
3834 .open = tracing_open_generic,
3835 .read = tracing_readme_read,
3836 .llseek = generic_file_llseek,
3839 static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
3841 unsigned int *ptr = v;
3843 if (*pos || m->count)
3848 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
3850 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
3859 static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
3865 arch_spin_lock(&trace_cmdline_lock);
3867 v = &savedcmd->map_cmdline_to_pid[0];
3869 v = saved_cmdlines_next(m, v, &l);
3877 static void saved_cmdlines_stop(struct seq_file *m, void *v)
3879 arch_spin_unlock(&trace_cmdline_lock);
3883 static int saved_cmdlines_show(struct seq_file *m, void *v)
3885 char buf[TASK_COMM_LEN];
3886 unsigned int *pid = v;
3888 __trace_find_cmdline(*pid, buf);
3889 seq_printf(m, "%d %s\n", *pid, buf);
3893 static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
3894 .start = saved_cmdlines_start,
3895 .next = saved_cmdlines_next,
3896 .stop = saved_cmdlines_stop,
3897 .show = saved_cmdlines_show,
3900 static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
3902 if (tracing_disabled)
3905 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
3908 static const struct file_operations tracing_saved_cmdlines_fops = {
3909 .open = tracing_saved_cmdlines_open,
3911 .llseek = seq_lseek,
3912 .release = seq_release,
3916 tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
3917 size_t cnt, loff_t *ppos)
3922 arch_spin_lock(&trace_cmdline_lock);
3923 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
3924 arch_spin_unlock(&trace_cmdline_lock);
3926 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3929 static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
3931 kfree(s->saved_cmdlines);
3932 kfree(s->map_cmdline_to_pid);
3936 static int tracing_resize_saved_cmdlines(unsigned int val)
3938 struct saved_cmdlines_buffer *s, *savedcmd_temp;
3940 s = kmalloc(sizeof(*s), GFP_KERNEL);
3944 if (allocate_cmdlines_buffer(val, s) < 0) {
3949 arch_spin_lock(&trace_cmdline_lock);
3950 savedcmd_temp = savedcmd;
3952 arch_spin_unlock(&trace_cmdline_lock);
3953 free_saved_cmdlines_buffer(savedcmd_temp);
3959 tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
3960 size_t cnt, loff_t *ppos)
3965 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
3969 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
3970 if (!val || val > PID_MAX_DEFAULT)
3973 ret = tracing_resize_saved_cmdlines((unsigned int)val);
3982 static const struct file_operations tracing_saved_cmdlines_size_fops = {
3983 .open = tracing_open_generic,
3984 .read = tracing_saved_cmdlines_size_read,
3985 .write = tracing_saved_cmdlines_size_write,
3988 #ifdef CONFIG_TRACE_ENUM_MAP_FILE
3989 static union trace_enum_map_item *
3990 update_enum_map(union trace_enum_map_item *ptr)
3992 if (!ptr->map.enum_string) {
3993 if (ptr->tail.next) {
3994 ptr = ptr->tail.next;
3995 /* Set ptr to the next real item (skip head) */
4003 static void *enum_map_next(struct seq_file *m, void *v, loff_t *pos)
4005 union trace_enum_map_item *ptr = v;
4008 * Paranoid! If ptr points to end, we don't want to increment past it.
4009 * This really should never happen.
4011 ptr = update_enum_map(ptr);
4012 if (WARN_ON_ONCE(!ptr))
4019 ptr = update_enum_map(ptr);
4024 static void *enum_map_start(struct seq_file *m, loff_t *pos)
4026 union trace_enum_map_item *v;
4029 mutex_lock(&trace_enum_mutex);
4031 v = trace_enum_maps;
4035 while (v && l < *pos) {
4036 v = enum_map_next(m, v, &l);
4042 static void enum_map_stop(struct seq_file *m, void *v)
4044 mutex_unlock(&trace_enum_mutex);
4047 static int enum_map_show(struct seq_file *m, void *v)
4049 union trace_enum_map_item *ptr = v;
4051 seq_printf(m, "%s %ld (%s)\n",
4052 ptr->map.enum_string, ptr->map.enum_value,
4058 static const struct seq_operations tracing_enum_map_seq_ops = {
4059 .start = enum_map_start,
4060 .next = enum_map_next,
4061 .stop = enum_map_stop,
4062 .show = enum_map_show,
4065 static int tracing_enum_map_open(struct inode *inode, struct file *filp)
4067 if (tracing_disabled)
4070 return seq_open(filp, &tracing_enum_map_seq_ops);
4073 static const struct file_operations tracing_enum_map_fops = {
4074 .open = tracing_enum_map_open,
4076 .llseek = seq_lseek,
4077 .release = seq_release,
4080 static inline union trace_enum_map_item *
4081 trace_enum_jmp_to_tail(union trace_enum_map_item *ptr)
4083 /* Return tail of array given the head */
4084 return ptr + ptr->head.length + 1;
4088 trace_insert_enum_map_file(struct module *mod, struct trace_enum_map **start,
4091 struct trace_enum_map **stop;
4092 struct trace_enum_map **map;
4093 union trace_enum_map_item *map_array;
4094 union trace_enum_map_item *ptr;
4099 * The trace_enum_maps contains the map plus a head and tail item,
4100 * where the head holds the module and length of array, and the
4101 * tail holds a pointer to the next list.
4103 map_array = kmalloc(sizeof(*map_array) * (len + 2), GFP_KERNEL);
4105 pr_warning("Unable to allocate trace enum mapping\n");
4109 mutex_lock(&trace_enum_mutex);
4111 if (!trace_enum_maps)
4112 trace_enum_maps = map_array;
4114 ptr = trace_enum_maps;
4116 ptr = trace_enum_jmp_to_tail(ptr);
4117 if (!ptr->tail.next)
4119 ptr = ptr->tail.next;
4122 ptr->tail.next = map_array;
4124 map_array->head.mod = mod;
4125 map_array->head.length = len;
4128 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
4129 map_array->map = **map;
4132 memset(map_array, 0, sizeof(*map_array));
4134 mutex_unlock(&trace_enum_mutex);
4137 static void trace_create_enum_file(struct dentry *d_tracer)
4139 trace_create_file("enum_map", 0444, d_tracer,
4140 NULL, &tracing_enum_map_fops);
4143 #else /* CONFIG_TRACE_ENUM_MAP_FILE */
4144 static inline void trace_create_enum_file(struct dentry *d_tracer) { }
4145 static inline void trace_insert_enum_map_file(struct module *mod,
4146 struct trace_enum_map **start, int len) { }
4147 #endif /* !CONFIG_TRACE_ENUM_MAP_FILE */
4149 static void trace_insert_enum_map(struct module *mod,
4150 struct trace_enum_map **start, int len)
4152 struct trace_enum_map **map;
4159 trace_event_enum_update(map, len);
4161 trace_insert_enum_map_file(mod, start, len);
4165 tracing_set_trace_read(struct file *filp, char __user *ubuf,
4166 size_t cnt, loff_t *ppos)
4168 struct trace_array *tr = filp->private_data;
4169 char buf[MAX_TRACER_SIZE+2];
4172 mutex_lock(&trace_types_lock);
4173 r = sprintf(buf, "%s\n", tr->current_trace->name);
4174 mutex_unlock(&trace_types_lock);
4176 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4179 int tracer_init(struct tracer *t, struct trace_array *tr)
4181 tracing_reset_online_cpus(&tr->trace_buffer);
4185 static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
4189 for_each_tracing_cpu(cpu)
4190 per_cpu_ptr(buf->data, cpu)->entries = val;
4193 #ifdef CONFIG_TRACER_MAX_TRACE
4194 /* resize @tr's buffer to the size of @size_tr's entries */
4195 static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
4196 struct trace_buffer *size_buf, int cpu_id)
4200 if (cpu_id == RING_BUFFER_ALL_CPUS) {
4201 for_each_tracing_cpu(cpu) {
4202 ret = ring_buffer_resize(trace_buf->buffer,
4203 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
4206 per_cpu_ptr(trace_buf->data, cpu)->entries =
4207 per_cpu_ptr(size_buf->data, cpu)->entries;
4210 ret = ring_buffer_resize(trace_buf->buffer,
4211 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
4213 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
4214 per_cpu_ptr(size_buf->data, cpu_id)->entries;
4219 #endif /* CONFIG_TRACER_MAX_TRACE */
4221 static int __tracing_resize_ring_buffer(struct trace_array *tr,
4222 unsigned long size, int cpu)
4227 * If kernel or user changes the size of the ring buffer
4228 * we use the size that was given, and we can forget about
4229 * expanding it later.
4231 ring_buffer_expanded = true;
4233 /* May be called before buffers are initialized */
4234 if (!tr->trace_buffer.buffer)
4237 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
4241 #ifdef CONFIG_TRACER_MAX_TRACE
4242 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
4243 !tr->current_trace->use_max_tr)
4246 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
4248 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
4249 &tr->trace_buffer, cpu);
4252 * AARGH! We are left with different
4253 * size max buffer!!!!
4254 * The max buffer is our "snapshot" buffer.
4255 * When a tracer needs a snapshot (one of the
4256 * latency tracers), it swaps the max buffer
4257 * with the saved snap shot. We succeeded to
4258 * update the size of the main buffer, but failed to
4259 * update the size of the max buffer. But when we tried
4260 * to reset the main buffer to the original size, we
4261 * failed there too. This is very unlikely to
4262 * happen, but if it does, warn and kill all
4266 tracing_disabled = 1;
4271 if (cpu == RING_BUFFER_ALL_CPUS)
4272 set_buffer_entries(&tr->max_buffer, size);
4274 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
4277 #endif /* CONFIG_TRACER_MAX_TRACE */
4279 if (cpu == RING_BUFFER_ALL_CPUS)
4280 set_buffer_entries(&tr->trace_buffer, size);
4282 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
4287 static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
4288 unsigned long size, int cpu_id)
4292 mutex_lock(&trace_types_lock);
4294 if (cpu_id != RING_BUFFER_ALL_CPUS) {
4295 /* make sure, this cpu is enabled in the mask */
4296 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
4302 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
4307 mutex_unlock(&trace_types_lock);
4314 * tracing_update_buffers - used by tracing facility to expand ring buffers
4316 * To save on memory when the tracing is never used on a system with it
4317 * configured in. The ring buffers are set to a minimum size. But once
4318 * a user starts to use the tracing facility, then they need to grow
4319 * to their default size.
4321 * This function is to be called when a tracer is about to be used.
4323 int tracing_update_buffers(void)
4327 mutex_lock(&trace_types_lock);
4328 if (!ring_buffer_expanded)
4329 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
4330 RING_BUFFER_ALL_CPUS);
4331 mutex_unlock(&trace_types_lock);
4336 struct trace_option_dentry;
4339 create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
4342 * Used to clear out the tracer before deletion of an instance.
4343 * Must have trace_types_lock held.
4345 static void tracing_set_nop(struct trace_array *tr)
4347 if (tr->current_trace == &nop_trace)
4350 tr->current_trace->enabled--;
4352 if (tr->current_trace->reset)
4353 tr->current_trace->reset(tr);
4355 tr->current_trace = &nop_trace;
4358 static void add_tracer_options(struct trace_array *tr, struct tracer *t)
4360 /* Only enable if the directory has been created already. */
4364 create_trace_option_files(tr, t);
4367 static int tracing_set_tracer(struct trace_array *tr, const char *buf)
4370 #ifdef CONFIG_TRACER_MAX_TRACE
4375 mutex_lock(&trace_types_lock);
4377 if (!ring_buffer_expanded) {
4378 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
4379 RING_BUFFER_ALL_CPUS);
4385 for (t = trace_types; t; t = t->next) {
4386 if (strcmp(t->name, buf) == 0)
4393 if (t == tr->current_trace)
4396 /* Some tracers are only allowed for the top level buffer */
4397 if (!trace_ok_for_array(t, tr)) {
4402 /* If trace pipe files are being read, we can't change the tracer */
4403 if (tr->current_trace->ref) {
4408 trace_branch_disable();
4410 tr->current_trace->enabled--;
4412 if (tr->current_trace->reset)
4413 tr->current_trace->reset(tr);
4415 /* Current trace needs to be nop_trace before synchronize_sched */
4416 tr->current_trace = &nop_trace;
4418 #ifdef CONFIG_TRACER_MAX_TRACE
4419 had_max_tr = tr->allocated_snapshot;
4421 if (had_max_tr && !t->use_max_tr) {
4423 * We need to make sure that the update_max_tr sees that
4424 * current_trace changed to nop_trace to keep it from
4425 * swapping the buffers after we resize it.
4426 * The update_max_tr is called from interrupts disabled
4427 * so a synchronized_sched() is sufficient.
4429 synchronize_sched();
4434 #ifdef CONFIG_TRACER_MAX_TRACE
4435 if (t->use_max_tr && !had_max_tr) {
4436 ret = alloc_snapshot(tr);
4443 ret = tracer_init(t, tr);
4448 tr->current_trace = t;
4449 tr->current_trace->enabled++;
4450 trace_branch_enable(tr);
4452 mutex_unlock(&trace_types_lock);
4458 tracing_set_trace_write(struct file *filp, const char __user *ubuf,
4459 size_t cnt, loff_t *ppos)
4461 struct trace_array *tr = filp->private_data;
4462 char buf[MAX_TRACER_SIZE+1];
4469 if (cnt > MAX_TRACER_SIZE)
4470 cnt = MAX_TRACER_SIZE;
4472 if (copy_from_user(&buf, ubuf, cnt))
4477 /* strip ending whitespace. */
4478 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
4481 err = tracing_set_tracer(tr, buf);
4491 tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
4492 size_t cnt, loff_t *ppos)
4497 r = snprintf(buf, sizeof(buf), "%ld\n",
4498 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
4499 if (r > sizeof(buf))
4501 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4505 tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
4506 size_t cnt, loff_t *ppos)
4511 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4521 tracing_thresh_read(struct file *filp, char __user *ubuf,
4522 size_t cnt, loff_t *ppos)
4524 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
4528 tracing_thresh_write(struct file *filp, const char __user *ubuf,
4529 size_t cnt, loff_t *ppos)
4531 struct trace_array *tr = filp->private_data;
4534 mutex_lock(&trace_types_lock);
4535 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
4539 if (tr->current_trace->update_thresh) {
4540 ret = tr->current_trace->update_thresh(tr);
4547 mutex_unlock(&trace_types_lock);
4552 #ifdef CONFIG_TRACER_MAX_TRACE
4555 tracing_max_lat_read(struct file *filp, char __user *ubuf,
4556 size_t cnt, loff_t *ppos)
4558 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
4562 tracing_max_lat_write(struct file *filp, const char __user *ubuf,
4563 size_t cnt, loff_t *ppos)
4565 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
4570 static int tracing_open_pipe(struct inode *inode, struct file *filp)
4572 struct trace_array *tr = inode->i_private;
4573 struct trace_iterator *iter;
4576 if (tracing_disabled)
4579 if (trace_array_get(tr) < 0)
4582 mutex_lock(&trace_types_lock);
4584 /* create a buffer to store the information to pass to userspace */
4585 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
4588 __trace_array_put(tr);
4592 trace_seq_init(&iter->seq);
4593 iter->trace = tr->current_trace;
4595 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
4600 /* trace pipe does not show start of buffer */
4601 cpumask_setall(iter->started);
4603 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
4604 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4606 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
4607 if (trace_clocks[tr->clock_id].in_ns)
4608 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4611 iter->trace_buffer = &tr->trace_buffer;
4612 iter->cpu_file = tracing_get_cpu(inode);
4613 mutex_init(&iter->mutex);
4614 filp->private_data = iter;
4616 if (iter->trace->pipe_open)
4617 iter->trace->pipe_open(iter);
4619 nonseekable_open(inode, filp);
4621 tr->current_trace->ref++;
4623 mutex_unlock(&trace_types_lock);
4629 __trace_array_put(tr);
4630 mutex_unlock(&trace_types_lock);
4634 static int tracing_release_pipe(struct inode *inode, struct file *file)
4636 struct trace_iterator *iter = file->private_data;
4637 struct trace_array *tr = inode->i_private;
4639 mutex_lock(&trace_types_lock);
4641 tr->current_trace->ref--;
4643 if (iter->trace->pipe_close)
4644 iter->trace->pipe_close(iter);
4646 mutex_unlock(&trace_types_lock);
4648 free_cpumask_var(iter->started);
4649 mutex_destroy(&iter->mutex);
4652 trace_array_put(tr);
4658 trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
4660 struct trace_array *tr = iter->tr;
4662 /* Iterators are static, they should be filled or empty */
4663 if (trace_buffer_iter(iter, iter->cpu_file))
4664 return POLLIN | POLLRDNORM;
4666 if (tr->trace_flags & TRACE_ITER_BLOCK)
4668 * Always select as readable when in blocking mode
4670 return POLLIN | POLLRDNORM;
4672 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
4677 tracing_poll_pipe(struct file *filp, poll_table *poll_table)
4679 struct trace_iterator *iter = filp->private_data;
4681 return trace_poll(iter, filp, poll_table);
4684 /* Must be called with iter->mutex held. */
4685 static int tracing_wait_pipe(struct file *filp)
4687 struct trace_iterator *iter = filp->private_data;
4690 while (trace_empty(iter)) {
4692 if ((filp->f_flags & O_NONBLOCK)) {
4697 * We block until we read something and tracing is disabled.
4698 * We still block if tracing is disabled, but we have never
4699 * read anything. This allows a user to cat this file, and
4700 * then enable tracing. But after we have read something,
4701 * we give an EOF when tracing is again disabled.
4703 * iter->pos will be 0 if we haven't read anything.
4705 if (!tracer_tracing_is_on(iter->tr) && iter->pos)
4708 mutex_unlock(&iter->mutex);
4710 ret = wait_on_pipe(iter, false);
4712 mutex_lock(&iter->mutex);
4725 tracing_read_pipe(struct file *filp, char __user *ubuf,
4726 size_t cnt, loff_t *ppos)
4728 struct trace_iterator *iter = filp->private_data;
4732 * Avoid more than one consumer on a single file descriptor
4733 * This is just a matter of traces coherency, the ring buffer itself
4736 mutex_lock(&iter->mutex);
4738 /* return any leftover data */
4739 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4743 trace_seq_init(&iter->seq);
4745 if (iter->trace->read) {
4746 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
4752 sret = tracing_wait_pipe(filp);
4756 /* stop when tracing is finished */
4757 if (trace_empty(iter)) {
4762 if (cnt >= PAGE_SIZE)
4763 cnt = PAGE_SIZE - 1;
4765 /* reset all but tr, trace, and overruns */
4766 memset(&iter->seq, 0,
4767 sizeof(struct trace_iterator) -
4768 offsetof(struct trace_iterator, seq));
4769 cpumask_clear(iter->started);
4772 trace_event_read_lock();
4773 trace_access_lock(iter->cpu_file);
4774 while (trace_find_next_entry_inc(iter) != NULL) {
4775 enum print_line_t ret;
4776 int save_len = iter->seq.seq.len;
4778 ret = print_trace_line(iter);
4779 if (ret == TRACE_TYPE_PARTIAL_LINE) {
4780 /* don't print partial lines */
4781 iter->seq.seq.len = save_len;
4784 if (ret != TRACE_TYPE_NO_CONSUME)
4785 trace_consume(iter);
4787 if (trace_seq_used(&iter->seq) >= cnt)
4791 * Setting the full flag means we reached the trace_seq buffer
4792 * size and we should leave by partial output condition above.
4793 * One of the trace_seq_* functions is not used properly.
4795 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
4798 trace_access_unlock(iter->cpu_file);
4799 trace_event_read_unlock();
4801 /* Now copy what we have to the user */
4802 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4803 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
4804 trace_seq_init(&iter->seq);
4807 * If there was nothing to send to user, in spite of consuming trace
4808 * entries, go back to wait for more entries.
4814 mutex_unlock(&iter->mutex);
4819 static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
4822 __free_page(spd->pages[idx]);
4825 static const struct pipe_buf_operations tracing_pipe_buf_ops = {
4827 .confirm = generic_pipe_buf_confirm,
4828 .release = generic_pipe_buf_release,
4829 .steal = generic_pipe_buf_steal,
4830 .get = generic_pipe_buf_get,
4834 tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
4840 /* Seq buffer is page-sized, exactly what we need. */
4842 save_len = iter->seq.seq.len;
4843 ret = print_trace_line(iter);
4845 if (trace_seq_has_overflowed(&iter->seq)) {
4846 iter->seq.seq.len = save_len;
4851 * This should not be hit, because it should only
4852 * be set if the iter->seq overflowed. But check it
4853 * anyway to be safe.
4855 if (ret == TRACE_TYPE_PARTIAL_LINE) {
4856 iter->seq.seq.len = save_len;
4860 count = trace_seq_used(&iter->seq) - save_len;
4863 iter->seq.seq.len = save_len;
4867 if (ret != TRACE_TYPE_NO_CONSUME)
4868 trace_consume(iter);
4870 if (!trace_find_next_entry_inc(iter)) {
4880 static ssize_t tracing_splice_read_pipe(struct file *filp,
4882 struct pipe_inode_info *pipe,
4886 struct page *pages_def[PIPE_DEF_BUFFERS];
4887 struct partial_page partial_def[PIPE_DEF_BUFFERS];
4888 struct trace_iterator *iter = filp->private_data;
4889 struct splice_pipe_desc spd = {
4891 .partial = partial_def,
4892 .nr_pages = 0, /* This gets updated below. */
4893 .nr_pages_max = PIPE_DEF_BUFFERS,
4895 .ops = &tracing_pipe_buf_ops,
4896 .spd_release = tracing_spd_release_pipe,
4902 if (splice_grow_spd(pipe, &spd))
4905 mutex_lock(&iter->mutex);
4907 if (iter->trace->splice_read) {
4908 ret = iter->trace->splice_read(iter, filp,
4909 ppos, pipe, len, flags);
4914 ret = tracing_wait_pipe(filp);
4918 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
4923 trace_event_read_lock();
4924 trace_access_lock(iter->cpu_file);
4926 /* Fill as many pages as possible. */
4927 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
4928 spd.pages[i] = alloc_page(GFP_KERNEL);
4932 rem = tracing_fill_pipe_page(rem, iter);
4934 /* Copy the data into the page, so we can start over. */
4935 ret = trace_seq_to_buffer(&iter->seq,
4936 page_address(spd.pages[i]),
4937 trace_seq_used(&iter->seq));
4939 __free_page(spd.pages[i]);
4942 spd.partial[i].offset = 0;
4943 spd.partial[i].len = trace_seq_used(&iter->seq);
4945 trace_seq_init(&iter->seq);
4948 trace_access_unlock(iter->cpu_file);
4949 trace_event_read_unlock();
4950 mutex_unlock(&iter->mutex);
4955 ret = splice_to_pipe(pipe, &spd);
4959 splice_shrink_spd(&spd);
4963 mutex_unlock(&iter->mutex);
4968 tracing_entries_read(struct file *filp, char __user *ubuf,
4969 size_t cnt, loff_t *ppos)
4971 struct inode *inode = file_inode(filp);
4972 struct trace_array *tr = inode->i_private;
4973 int cpu = tracing_get_cpu(inode);
4978 mutex_lock(&trace_types_lock);
4980 if (cpu == RING_BUFFER_ALL_CPUS) {
4981 int cpu, buf_size_same;
4986 /* check if all cpu sizes are same */
4987 for_each_tracing_cpu(cpu) {
4988 /* fill in the size from first enabled cpu */
4990 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
4991 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
4997 if (buf_size_same) {
4998 if (!ring_buffer_expanded)
4999 r = sprintf(buf, "%lu (expanded: %lu)\n",
5001 trace_buf_size >> 10);
5003 r = sprintf(buf, "%lu\n", size >> 10);
5005 r = sprintf(buf, "X\n");
5007 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
5009 mutex_unlock(&trace_types_lock);
5011 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5016 tracing_entries_write(struct file *filp, const char __user *ubuf,
5017 size_t cnt, loff_t *ppos)
5019 struct inode *inode = file_inode(filp);
5020 struct trace_array *tr = inode->i_private;
5024 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5028 /* must have at least 1 entry */
5032 /* value is in KB */
5034 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
5044 tracing_total_entries_read(struct file *filp, char __user *ubuf,
5045 size_t cnt, loff_t *ppos)
5047 struct trace_array *tr = filp->private_data;
5050 unsigned long size = 0, expanded_size = 0;
5052 mutex_lock(&trace_types_lock);
5053 for_each_tracing_cpu(cpu) {
5054 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
5055 if (!ring_buffer_expanded)
5056 expanded_size += trace_buf_size >> 10;
5058 if (ring_buffer_expanded)
5059 r = sprintf(buf, "%lu\n", size);
5061 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
5062 mutex_unlock(&trace_types_lock);
5064 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5068 tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
5069 size_t cnt, loff_t *ppos)
5072 * There is no need to read what the user has written, this function
5073 * is just to make sure that there is no error when "echo" is used
5082 tracing_free_buffer_release(struct inode *inode, struct file *filp)
5084 struct trace_array *tr = inode->i_private;
5086 /* disable tracing ? */
5087 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
5088 tracer_tracing_off(tr);
5089 /* resize the ring buffer to 0 */
5090 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
5092 trace_array_put(tr);
5098 tracing_mark_write(struct file *filp, const char __user *ubuf,
5099 size_t cnt, loff_t *fpos)
5101 unsigned long addr = (unsigned long)ubuf;
5102 struct trace_array *tr = filp->private_data;
5103 struct ring_buffer_event *event;
5104 struct ring_buffer *buffer;
5105 struct print_entry *entry;
5106 unsigned long irq_flags;
5107 struct page *pages[2];
5117 if (tracing_disabled)
5120 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
5123 if (cnt > TRACE_BUF_SIZE)
5124 cnt = TRACE_BUF_SIZE;
5127 * Userspace is injecting traces into the kernel trace buffer.
5128 * We want to be as non intrusive as possible.
5129 * To do so, we do not want to allocate any special buffers
5130 * or take any locks, but instead write the userspace data
5131 * straight into the ring buffer.
5133 * First we need to pin the userspace buffer into memory,
5134 * which, most likely it is, because it just referenced it.
5135 * But there's no guarantee that it is. By using get_user_pages_fast()
5136 * and kmap_atomic/kunmap_atomic() we can get access to the
5137 * pages directly. We then write the data directly into the
5140 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
5142 /* check if we cross pages */
5143 if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
5146 offset = addr & (PAGE_SIZE - 1);
5149 ret = get_user_pages_fast(addr, nr_pages, 0, pages);
5150 if (ret < nr_pages) {
5152 put_page(pages[ret]);
5157 for (i = 0; i < nr_pages; i++)
5158 map_page[i] = kmap_atomic(pages[i]);
5160 local_save_flags(irq_flags);
5161 size = sizeof(*entry) + cnt + 2; /* possible \n added */
5162 buffer = tr->trace_buffer.buffer;
5163 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
5164 irq_flags, preempt_count());
5166 /* Ring buffer disabled, return as if not open for write */
5171 entry = ring_buffer_event_data(event);
5172 entry->ip = _THIS_IP_;
5174 if (nr_pages == 2) {
5175 len = PAGE_SIZE - offset;
5176 memcpy(&entry->buf, map_page[0] + offset, len);
5177 memcpy(&entry->buf[len], map_page[1], cnt - len);
5179 memcpy(&entry->buf, map_page[0] + offset, cnt);
5181 if (entry->buf[cnt - 1] != '\n') {
5182 entry->buf[cnt] = '\n';
5183 entry->buf[cnt + 1] = '\0';
5185 entry->buf[cnt] = '\0';
5187 __buffer_unlock_commit(buffer, event);
5194 for (i = nr_pages - 1; i >= 0; i--) {
5195 kunmap_atomic(map_page[i]);
5202 static int tracing_clock_show(struct seq_file *m, void *v)
5204 struct trace_array *tr = m->private;
5207 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
5209 "%s%s%s%s", i ? " " : "",
5210 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
5211 i == tr->clock_id ? "]" : "");
5217 static int tracing_set_clock(struct trace_array *tr, const char *clockstr)
5221 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
5222 if (strcmp(trace_clocks[i].name, clockstr) == 0)
5225 if (i == ARRAY_SIZE(trace_clocks))
5228 mutex_lock(&trace_types_lock);
5232 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
5235 * New clock may not be consistent with the previous clock.
5236 * Reset the buffer so that it doesn't have incomparable timestamps.
5238 tracing_reset_online_cpus(&tr->trace_buffer);
5240 #ifdef CONFIG_TRACER_MAX_TRACE
5241 if (tr->max_buffer.buffer)
5242 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
5243 tracing_reset_online_cpus(&tr->max_buffer);
5246 mutex_unlock(&trace_types_lock);
5251 static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
5252 size_t cnt, loff_t *fpos)
5254 struct seq_file *m = filp->private_data;
5255 struct trace_array *tr = m->private;
5257 const char *clockstr;
5260 if (cnt >= sizeof(buf))
5263 if (copy_from_user(&buf, ubuf, cnt))
5268 clockstr = strstrip(buf);
5270 ret = tracing_set_clock(tr, clockstr);
5279 static int tracing_clock_open(struct inode *inode, struct file *file)
5281 struct trace_array *tr = inode->i_private;
5284 if (tracing_disabled)
5287 if (trace_array_get(tr))
5290 ret = single_open(file, tracing_clock_show, inode->i_private);
5292 trace_array_put(tr);
5297 struct ftrace_buffer_info {
5298 struct trace_iterator iter;
5303 #ifdef CONFIG_TRACER_SNAPSHOT
5304 static int tracing_snapshot_open(struct inode *inode, struct file *file)
5306 struct trace_array *tr = inode->i_private;
5307 struct trace_iterator *iter;
5311 if (trace_array_get(tr) < 0)
5314 if (file->f_mode & FMODE_READ) {
5315 iter = __tracing_open(inode, file, true);
5317 ret = PTR_ERR(iter);
5319 /* Writes still need the seq_file to hold the private data */
5321 m = kzalloc(sizeof(*m), GFP_KERNEL);
5324 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
5332 iter->trace_buffer = &tr->max_buffer;
5333 iter->cpu_file = tracing_get_cpu(inode);
5335 file->private_data = m;
5339 trace_array_put(tr);
5345 tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
5348 struct seq_file *m = filp->private_data;
5349 struct trace_iterator *iter = m->private;
5350 struct trace_array *tr = iter->tr;
5354 ret = tracing_update_buffers();
5358 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5362 mutex_lock(&trace_types_lock);
5364 if (tr->current_trace->use_max_tr) {
5371 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5375 if (tr->allocated_snapshot)
5379 /* Only allow per-cpu swap if the ring buffer supports it */
5380 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
5381 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5386 if (!tr->allocated_snapshot) {
5387 ret = alloc_snapshot(tr);
5391 local_irq_disable();
5392 /* Now, we're going to swap */
5393 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
5394 update_max_tr(tr, current, smp_processor_id());
5396 update_max_tr_single(tr, current, iter->cpu_file);
5400 if (tr->allocated_snapshot) {
5401 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
5402 tracing_reset_online_cpus(&tr->max_buffer);
5404 tracing_reset(&tr->max_buffer, iter->cpu_file);
5414 mutex_unlock(&trace_types_lock);
5418 static int tracing_snapshot_release(struct inode *inode, struct file *file)
5420 struct seq_file *m = file->private_data;
5423 ret = tracing_release(inode, file);
5425 if (file->f_mode & FMODE_READ)
5428 /* If write only, the seq_file is just a stub */
5436 static int tracing_buffers_open(struct inode *inode, struct file *filp);
5437 static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
5438 size_t count, loff_t *ppos);
5439 static int tracing_buffers_release(struct inode *inode, struct file *file);
5440 static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5441 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
5443 static int snapshot_raw_open(struct inode *inode, struct file *filp)
5445 struct ftrace_buffer_info *info;
5448 ret = tracing_buffers_open(inode, filp);
5452 info = filp->private_data;
5454 if (info->iter.trace->use_max_tr) {
5455 tracing_buffers_release(inode, filp);
5459 info->iter.snapshot = true;
5460 info->iter.trace_buffer = &info->iter.tr->max_buffer;
5465 #endif /* CONFIG_TRACER_SNAPSHOT */
5468 static const struct file_operations tracing_thresh_fops = {
5469 .open = tracing_open_generic,
5470 .read = tracing_thresh_read,
5471 .write = tracing_thresh_write,
5472 .llseek = generic_file_llseek,
5475 #ifdef CONFIG_TRACER_MAX_TRACE
5476 static const struct file_operations tracing_max_lat_fops = {
5477 .open = tracing_open_generic,
5478 .read = tracing_max_lat_read,
5479 .write = tracing_max_lat_write,
5480 .llseek = generic_file_llseek,
5484 static const struct file_operations set_tracer_fops = {
5485 .open = tracing_open_generic,
5486 .read = tracing_set_trace_read,
5487 .write = tracing_set_trace_write,
5488 .llseek = generic_file_llseek,
5491 static const struct file_operations tracing_pipe_fops = {
5492 .open = tracing_open_pipe,
5493 .poll = tracing_poll_pipe,
5494 .read = tracing_read_pipe,
5495 .splice_read = tracing_splice_read_pipe,
5496 .release = tracing_release_pipe,
5497 .llseek = no_llseek,
5500 static const struct file_operations tracing_entries_fops = {
5501 .open = tracing_open_generic_tr,
5502 .read = tracing_entries_read,
5503 .write = tracing_entries_write,
5504 .llseek = generic_file_llseek,
5505 .release = tracing_release_generic_tr,
5508 static const struct file_operations tracing_total_entries_fops = {
5509 .open = tracing_open_generic_tr,
5510 .read = tracing_total_entries_read,
5511 .llseek = generic_file_llseek,
5512 .release = tracing_release_generic_tr,
5515 static const struct file_operations tracing_free_buffer_fops = {
5516 .open = tracing_open_generic_tr,
5517 .write = tracing_free_buffer_write,
5518 .release = tracing_free_buffer_release,
5521 static const struct file_operations tracing_mark_fops = {
5522 .open = tracing_open_generic_tr,
5523 .write = tracing_mark_write,
5524 .llseek = generic_file_llseek,
5525 .release = tracing_release_generic_tr,
5528 static const struct file_operations trace_clock_fops = {
5529 .open = tracing_clock_open,
5531 .llseek = seq_lseek,
5532 .release = tracing_single_release_tr,
5533 .write = tracing_clock_write,
5536 #ifdef CONFIG_TRACER_SNAPSHOT
5537 static const struct file_operations snapshot_fops = {
5538 .open = tracing_snapshot_open,
5540 .write = tracing_snapshot_write,
5541 .llseek = tracing_lseek,
5542 .release = tracing_snapshot_release,
5545 static const struct file_operations snapshot_raw_fops = {
5546 .open = snapshot_raw_open,
5547 .read = tracing_buffers_read,
5548 .release = tracing_buffers_release,
5549 .splice_read = tracing_buffers_splice_read,
5550 .llseek = no_llseek,
5553 #endif /* CONFIG_TRACER_SNAPSHOT */
5555 static int tracing_buffers_open(struct inode *inode, struct file *filp)
5557 struct trace_array *tr = inode->i_private;
5558 struct ftrace_buffer_info *info;
5561 if (tracing_disabled)
5564 if (trace_array_get(tr) < 0)
5567 info = kzalloc(sizeof(*info), GFP_KERNEL);
5569 trace_array_put(tr);
5573 mutex_lock(&trace_types_lock);
5576 info->iter.cpu_file = tracing_get_cpu(inode);
5577 info->iter.trace = tr->current_trace;
5578 info->iter.trace_buffer = &tr->trace_buffer;
5580 /* Force reading ring buffer for first read */
5581 info->read = (unsigned int)-1;
5583 filp->private_data = info;
5585 tr->current_trace->ref++;
5587 mutex_unlock(&trace_types_lock);
5589 ret = nonseekable_open(inode, filp);
5591 trace_array_put(tr);
5597 tracing_buffers_poll(struct file *filp, poll_table *poll_table)
5599 struct ftrace_buffer_info *info = filp->private_data;
5600 struct trace_iterator *iter = &info->iter;
5602 return trace_poll(iter, filp, poll_table);
5606 tracing_buffers_read(struct file *filp, char __user *ubuf,
5607 size_t count, loff_t *ppos)
5609 struct ftrace_buffer_info *info = filp->private_data;
5610 struct trace_iterator *iter = &info->iter;
5617 #ifdef CONFIG_TRACER_MAX_TRACE
5618 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
5623 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
5628 /* Do we have previous read data to read? */
5629 if (info->read < PAGE_SIZE)
5633 trace_access_lock(iter->cpu_file);
5634 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
5638 trace_access_unlock(iter->cpu_file);
5641 if (trace_empty(iter)) {
5642 if ((filp->f_flags & O_NONBLOCK))
5645 ret = wait_on_pipe(iter, false);
5656 size = PAGE_SIZE - info->read;
5660 ret = copy_to_user(ubuf, info->spare + info->read, size);
5672 static int tracing_buffers_release(struct inode *inode, struct file *file)
5674 struct ftrace_buffer_info *info = file->private_data;
5675 struct trace_iterator *iter = &info->iter;
5677 mutex_lock(&trace_types_lock);
5679 iter->tr->current_trace->ref--;
5681 __trace_array_put(iter->tr);
5684 ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare);
5687 mutex_unlock(&trace_types_lock);
5693 struct ring_buffer *buffer;
5698 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
5699 struct pipe_buffer *buf)
5701 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5706 ring_buffer_free_read_page(ref->buffer, ref->page);
5711 static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
5712 struct pipe_buffer *buf)
5714 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5719 /* Pipe buffer operations for a buffer. */
5720 static const struct pipe_buf_operations buffer_pipe_buf_ops = {
5722 .confirm = generic_pipe_buf_confirm,
5723 .release = buffer_pipe_buf_release,
5724 .steal = generic_pipe_buf_steal,
5725 .get = buffer_pipe_buf_get,
5729 * Callback from splice_to_pipe(), if we need to release some pages
5730 * at the end of the spd in case we error'ed out in filling the pipe.
5732 static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
5734 struct buffer_ref *ref =
5735 (struct buffer_ref *)spd->partial[i].private;
5740 ring_buffer_free_read_page(ref->buffer, ref->page);
5742 spd->partial[i].private = 0;
5746 tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5747 struct pipe_inode_info *pipe, size_t len,
5750 struct ftrace_buffer_info *info = file->private_data;
5751 struct trace_iterator *iter = &info->iter;
5752 struct partial_page partial_def[PIPE_DEF_BUFFERS];
5753 struct page *pages_def[PIPE_DEF_BUFFERS];
5754 struct splice_pipe_desc spd = {
5756 .partial = partial_def,
5757 .nr_pages_max = PIPE_DEF_BUFFERS,
5759 .ops = &buffer_pipe_buf_ops,
5760 .spd_release = buffer_spd_release,
5762 struct buffer_ref *ref;
5766 #ifdef CONFIG_TRACER_MAX_TRACE
5767 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
5771 if (*ppos & (PAGE_SIZE - 1))
5774 if (len & (PAGE_SIZE - 1)) {
5775 if (len < PAGE_SIZE)
5780 if (splice_grow_spd(pipe, &spd))
5784 trace_access_lock(iter->cpu_file);
5785 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
5787 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
5791 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
5798 ref->buffer = iter->trace_buffer->buffer;
5799 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
5806 r = ring_buffer_read_page(ref->buffer, &ref->page,
5807 len, iter->cpu_file, 1);
5809 ring_buffer_free_read_page(ref->buffer, ref->page);
5814 page = virt_to_page(ref->page);
5816 spd.pages[i] = page;
5817 spd.partial[i].len = PAGE_SIZE;
5818 spd.partial[i].offset = 0;
5819 spd.partial[i].private = (unsigned long)ref;
5823 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
5826 trace_access_unlock(iter->cpu_file);
5829 /* did we read anything? */
5830 if (!spd.nr_pages) {
5835 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
5838 ret = wait_on_pipe(iter, true);
5845 ret = splice_to_pipe(pipe, &spd);
5847 splice_shrink_spd(&spd);
5852 static const struct file_operations tracing_buffers_fops = {
5853 .open = tracing_buffers_open,
5854 .read = tracing_buffers_read,
5855 .poll = tracing_buffers_poll,
5856 .release = tracing_buffers_release,
5857 .splice_read = tracing_buffers_splice_read,
5858 .llseek = no_llseek,
5862 tracing_stats_read(struct file *filp, char __user *ubuf,
5863 size_t count, loff_t *ppos)
5865 struct inode *inode = file_inode(filp);
5866 struct trace_array *tr = inode->i_private;
5867 struct trace_buffer *trace_buf = &tr->trace_buffer;
5868 int cpu = tracing_get_cpu(inode);
5869 struct trace_seq *s;
5871 unsigned long long t;
5872 unsigned long usec_rem;
5874 s = kmalloc(sizeof(*s), GFP_KERNEL);
5880 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
5881 trace_seq_printf(s, "entries: %ld\n", cnt);
5883 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
5884 trace_seq_printf(s, "overrun: %ld\n", cnt);
5886 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
5887 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
5889 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
5890 trace_seq_printf(s, "bytes: %ld\n", cnt);
5892 if (trace_clocks[tr->clock_id].in_ns) {
5893 /* local or global for trace_clock */
5894 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
5895 usec_rem = do_div(t, USEC_PER_SEC);
5896 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
5899 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
5900 usec_rem = do_div(t, USEC_PER_SEC);
5901 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
5903 /* counter or tsc mode for trace_clock */
5904 trace_seq_printf(s, "oldest event ts: %llu\n",
5905 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
5907 trace_seq_printf(s, "now ts: %llu\n",
5908 ring_buffer_time_stamp(trace_buf->buffer, cpu));
5911 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
5912 trace_seq_printf(s, "dropped events: %ld\n", cnt);
5914 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
5915 trace_seq_printf(s, "read events: %ld\n", cnt);
5917 count = simple_read_from_buffer(ubuf, count, ppos,
5918 s->buffer, trace_seq_used(s));
5925 static const struct file_operations tracing_stats_fops = {
5926 .open = tracing_open_generic_tr,
5927 .read = tracing_stats_read,
5928 .llseek = generic_file_llseek,
5929 .release = tracing_release_generic_tr,
5932 #ifdef CONFIG_DYNAMIC_FTRACE
5934 int __weak ftrace_arch_read_dyn_info(char *buf, int size)
5940 tracing_read_dyn_info(struct file *filp, char __user *ubuf,
5941 size_t cnt, loff_t *ppos)
5943 static char ftrace_dyn_info_buffer[1024];
5944 static DEFINE_MUTEX(dyn_info_mutex);
5945 unsigned long *p = filp->private_data;
5946 char *buf = ftrace_dyn_info_buffer;
5947 int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
5950 mutex_lock(&dyn_info_mutex);
5951 r = sprintf(buf, "%ld ", *p);
5953 r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
5956 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5958 mutex_unlock(&dyn_info_mutex);
5963 static const struct file_operations tracing_dyn_info_fops = {
5964 .open = tracing_open_generic,
5965 .read = tracing_read_dyn_info,
5966 .llseek = generic_file_llseek,
5968 #endif /* CONFIG_DYNAMIC_FTRACE */
5970 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
5972 ftrace_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
5978 ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
5980 unsigned long *count = (long *)data;
5992 ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
5993 struct ftrace_probe_ops *ops, void *data)
5995 long count = (long)data;
5997 seq_printf(m, "%ps:", (void *)ip);
5999 seq_puts(m, "snapshot");
6002 seq_puts(m, ":unlimited\n");
6004 seq_printf(m, ":count=%ld\n", count);
6009 static struct ftrace_probe_ops snapshot_probe_ops = {
6010 .func = ftrace_snapshot,
6011 .print = ftrace_snapshot_print,
6014 static struct ftrace_probe_ops snapshot_count_probe_ops = {
6015 .func = ftrace_count_snapshot,
6016 .print = ftrace_snapshot_print,
6020 ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
6021 char *glob, char *cmd, char *param, int enable)
6023 struct ftrace_probe_ops *ops;
6024 void *count = (void *)-1;
6028 /* hash funcs only work with set_ftrace_filter */
6032 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
6034 if (glob[0] == '!') {
6035 unregister_ftrace_function_probe_func(glob+1, ops);
6042 number = strsep(¶m, ":");
6044 if (!strlen(number))
6048 * We use the callback data field (which is a pointer)
6051 ret = kstrtoul(number, 0, (unsigned long *)&count);
6056 ret = alloc_snapshot(&global_trace);
6060 ret = register_ftrace_function_probe(glob, ops, count);
6063 return ret < 0 ? ret : 0;
6066 static struct ftrace_func_command ftrace_snapshot_cmd = {
6068 .func = ftrace_trace_snapshot_callback,
6071 static __init int register_snapshot_cmd(void)
6073 return register_ftrace_command(&ftrace_snapshot_cmd);
6076 static inline __init int register_snapshot_cmd(void) { return 0; }
6077 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
6079 static struct dentry *tracing_get_dentry(struct trace_array *tr)
6081 if (WARN_ON(!tr->dir))
6082 return ERR_PTR(-ENODEV);
6084 /* Top directory uses NULL as the parent */
6085 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
6088 /* All sub buffers have a descriptor */
6092 static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
6094 struct dentry *d_tracer;
6097 return tr->percpu_dir;
6099 d_tracer = tracing_get_dentry(tr);
6100 if (IS_ERR(d_tracer))
6103 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
6105 WARN_ONCE(!tr->percpu_dir,
6106 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
6108 return tr->percpu_dir;
6111 static struct dentry *
6112 trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
6113 void *data, long cpu, const struct file_operations *fops)
6115 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
6117 if (ret) /* See tracing_get_cpu() */
6118 d_inode(ret)->i_cdev = (void *)(cpu + 1);
6123 tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
6125 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
6126 struct dentry *d_cpu;
6127 char cpu_dir[30]; /* 30 characters should be more than enough */
6132 snprintf(cpu_dir, 30, "cpu%ld", cpu);
6133 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
6135 pr_warning("Could not create tracefs '%s' entry\n", cpu_dir);
6139 /* per cpu trace_pipe */
6140 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
6141 tr, cpu, &tracing_pipe_fops);
6144 trace_create_cpu_file("trace", 0644, d_cpu,
6145 tr, cpu, &tracing_fops);
6147 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
6148 tr, cpu, &tracing_buffers_fops);
6150 trace_create_cpu_file("stats", 0444, d_cpu,
6151 tr, cpu, &tracing_stats_fops);
6153 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
6154 tr, cpu, &tracing_entries_fops);
6156 #ifdef CONFIG_TRACER_SNAPSHOT
6157 trace_create_cpu_file("snapshot", 0644, d_cpu,
6158 tr, cpu, &snapshot_fops);
6160 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
6161 tr, cpu, &snapshot_raw_fops);
6165 #ifdef CONFIG_FTRACE_SELFTEST
6166 /* Let selftest have access to static functions in this file */
6167 #include "trace_selftest.c"
6171 trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
6174 struct trace_option_dentry *topt = filp->private_data;
6177 if (topt->flags->val & topt->opt->bit)
6182 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
6186 trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
6189 struct trace_option_dentry *topt = filp->private_data;
6193 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6197 if (val != 0 && val != 1)
6200 if (!!(topt->flags->val & topt->opt->bit) != val) {
6201 mutex_lock(&trace_types_lock);
6202 ret = __set_tracer_option(topt->tr, topt->flags,
6204 mutex_unlock(&trace_types_lock);
6215 static const struct file_operations trace_options_fops = {
6216 .open = tracing_open_generic,
6217 .read = trace_options_read,
6218 .write = trace_options_write,
6219 .llseek = generic_file_llseek,
6223 * In order to pass in both the trace_array descriptor as well as the index
6224 * to the flag that the trace option file represents, the trace_array
6225 * has a character array of trace_flags_index[], which holds the index
6226 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
6227 * The address of this character array is passed to the flag option file
6228 * read/write callbacks.
6230 * In order to extract both the index and the trace_array descriptor,
6231 * get_tr_index() uses the following algorithm.
6235 * As the pointer itself contains the address of the index (remember
6238 * Then to get the trace_array descriptor, by subtracting that index
6239 * from the ptr, we get to the start of the index itself.
6241 * ptr - idx == &index[0]
6243 * Then a simple container_of() from that pointer gets us to the
6244 * trace_array descriptor.
6246 static void get_tr_index(void *data, struct trace_array **ptr,
6247 unsigned int *pindex)
6249 *pindex = *(unsigned char *)data;
6251 *ptr = container_of(data - *pindex, struct trace_array,
6256 trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
6259 void *tr_index = filp->private_data;
6260 struct trace_array *tr;
6264 get_tr_index(tr_index, &tr, &index);
6266 if (tr->trace_flags & (1 << index))
6271 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
6275 trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
6278 void *tr_index = filp->private_data;
6279 struct trace_array *tr;
6284 get_tr_index(tr_index, &tr, &index);
6286 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6290 if (val != 0 && val != 1)
6293 mutex_lock(&trace_types_lock);
6294 ret = set_tracer_flag(tr, 1 << index, val);
6295 mutex_unlock(&trace_types_lock);
6305 static const struct file_operations trace_options_core_fops = {
6306 .open = tracing_open_generic,
6307 .read = trace_options_core_read,
6308 .write = trace_options_core_write,
6309 .llseek = generic_file_llseek,
6312 struct dentry *trace_create_file(const char *name,
6314 struct dentry *parent,
6316 const struct file_operations *fops)
6320 ret = tracefs_create_file(name, mode, parent, data, fops);
6322 pr_warning("Could not create tracefs '%s' entry\n", name);
6328 static struct dentry *trace_options_init_dentry(struct trace_array *tr)
6330 struct dentry *d_tracer;
6335 d_tracer = tracing_get_dentry(tr);
6336 if (IS_ERR(d_tracer))
6339 tr->options = tracefs_create_dir("options", d_tracer);
6341 pr_warning("Could not create tracefs directory 'options'\n");
6349 create_trace_option_file(struct trace_array *tr,
6350 struct trace_option_dentry *topt,
6351 struct tracer_flags *flags,
6352 struct tracer_opt *opt)
6354 struct dentry *t_options;
6356 t_options = trace_options_init_dentry(tr);
6360 topt->flags = flags;
6364 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
6365 &trace_options_fops);
6370 create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
6372 struct trace_option_dentry *topts;
6373 struct trace_options *tr_topts;
6374 struct tracer_flags *flags;
6375 struct tracer_opt *opts;
6382 flags = tracer->flags;
6384 if (!flags || !flags->opts)
6388 * If this is an instance, only create flags for tracers
6389 * the instance may have.
6391 if (!trace_ok_for_array(tracer, tr))
6394 for (i = 0; i < tr->nr_topts; i++) {
6396 * Check if these flags have already been added.
6397 * Some tracers share flags.
6399 if (tr->topts[i].tracer->flags == tracer->flags)
6405 for (cnt = 0; opts[cnt].name; cnt++)
6408 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
6412 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
6419 tr->topts = tr_topts;
6420 tr->topts[tr->nr_topts].tracer = tracer;
6421 tr->topts[tr->nr_topts].topts = topts;
6424 for (cnt = 0; opts[cnt].name; cnt++) {
6425 create_trace_option_file(tr, &topts[cnt], flags,
6427 WARN_ONCE(topts[cnt].entry == NULL,
6428 "Failed to create trace option: %s",
6433 static struct dentry *
6434 create_trace_option_core_file(struct trace_array *tr,
6435 const char *option, long index)
6437 struct dentry *t_options;
6439 t_options = trace_options_init_dentry(tr);
6443 return trace_create_file(option, 0644, t_options,
6444 (void *)&tr->trace_flags_index[index],
6445 &trace_options_core_fops);
6448 static void create_trace_options_dir(struct trace_array *tr)
6450 struct dentry *t_options;
6451 bool top_level = tr == &global_trace;
6454 t_options = trace_options_init_dentry(tr);
6458 for (i = 0; trace_options[i]; i++) {
6460 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
6461 create_trace_option_core_file(tr, trace_options[i], i);
6466 rb_simple_read(struct file *filp, char __user *ubuf,
6467 size_t cnt, loff_t *ppos)
6469 struct trace_array *tr = filp->private_data;
6473 r = tracer_tracing_is_on(tr);
6474 r = sprintf(buf, "%d\n", r);
6476 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6480 rb_simple_write(struct file *filp, const char __user *ubuf,
6481 size_t cnt, loff_t *ppos)
6483 struct trace_array *tr = filp->private_data;
6484 struct ring_buffer *buffer = tr->trace_buffer.buffer;
6488 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6493 mutex_lock(&trace_types_lock);
6495 tracer_tracing_on(tr);
6496 if (tr->current_trace->start)
6497 tr->current_trace->start(tr);
6499 tracer_tracing_off(tr);
6500 if (tr->current_trace->stop)
6501 tr->current_trace->stop(tr);
6503 mutex_unlock(&trace_types_lock);
6511 static const struct file_operations rb_simple_fops = {
6512 .open = tracing_open_generic_tr,
6513 .read = rb_simple_read,
6514 .write = rb_simple_write,
6515 .release = tracing_release_generic_tr,
6516 .llseek = default_llseek,
6519 struct dentry *trace_instance_dir;
6522 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
6525 allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
6527 enum ring_buffer_flags rb_flags;
6529 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
6533 buf->buffer = ring_buffer_alloc(size, rb_flags);
6537 buf->data = alloc_percpu(struct trace_array_cpu);
6539 ring_buffer_free(buf->buffer);
6544 /* Allocate the first page for all buffers */
6545 set_buffer_entries(&tr->trace_buffer,
6546 ring_buffer_size(tr->trace_buffer.buffer, 0));
6551 static int allocate_trace_buffers(struct trace_array *tr, int size)
6555 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
6559 #ifdef CONFIG_TRACER_MAX_TRACE
6560 ret = allocate_trace_buffer(tr, &tr->max_buffer,
6561 allocate_snapshot ? size : 1);
6563 ring_buffer_free(tr->trace_buffer.buffer);
6564 tr->trace_buffer.buffer = NULL;
6565 free_percpu(tr->trace_buffer.data);
6566 tr->trace_buffer.data = NULL;
6569 tr->allocated_snapshot = allocate_snapshot;
6572 * Only the top level trace array gets its snapshot allocated
6573 * from the kernel command line.
6575 allocate_snapshot = false;
6580 static void free_trace_buffer(struct trace_buffer *buf)
6583 ring_buffer_free(buf->buffer);
6585 free_percpu(buf->data);
6590 static void free_trace_buffers(struct trace_array *tr)
6595 free_trace_buffer(&tr->trace_buffer);
6597 #ifdef CONFIG_TRACER_MAX_TRACE
6598 free_trace_buffer(&tr->max_buffer);
6602 static void init_trace_flags_index(struct trace_array *tr)
6606 /* Used by the trace options files */
6607 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
6608 tr->trace_flags_index[i] = i;
6611 static void __update_tracer_options(struct trace_array *tr)
6615 for (t = trace_types; t; t = t->next)
6616 add_tracer_options(tr, t);
6619 static void update_tracer_options(struct trace_array *tr)
6621 mutex_lock(&trace_types_lock);
6622 __update_tracer_options(tr);
6623 mutex_unlock(&trace_types_lock);
6626 static int instance_mkdir(const char *name)
6628 struct trace_array *tr;
6631 mutex_lock(&trace_types_lock);
6634 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6635 if (tr->name && strcmp(tr->name, name) == 0)
6640 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
6644 tr->name = kstrdup(name, GFP_KERNEL);
6648 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
6651 tr->trace_flags = global_trace.trace_flags;
6653 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
6655 raw_spin_lock_init(&tr->start_lock);
6657 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
6659 tr->current_trace = &nop_trace;
6661 INIT_LIST_HEAD(&tr->systems);
6662 INIT_LIST_HEAD(&tr->events);
6664 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
6667 tr->dir = tracefs_create_dir(name, trace_instance_dir);
6671 ret = event_trace_add_tracer(tr->dir, tr);
6673 tracefs_remove_recursive(tr->dir);
6677 init_tracer_tracefs(tr, tr->dir);
6678 init_trace_flags_index(tr);
6679 __update_tracer_options(tr);
6681 list_add(&tr->list, &ftrace_trace_arrays);
6683 mutex_unlock(&trace_types_lock);
6688 free_trace_buffers(tr);
6689 free_cpumask_var(tr->tracing_cpumask);
6694 mutex_unlock(&trace_types_lock);
6700 static int instance_rmdir(const char *name)
6702 struct trace_array *tr;
6707 mutex_lock(&trace_types_lock);
6710 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6711 if (tr->name && strcmp(tr->name, name) == 0) {
6720 if (tr->ref || (tr->current_trace && tr->current_trace->ref))
6723 list_del(&tr->list);
6725 tracing_set_nop(tr);
6726 event_trace_del_tracer(tr);
6727 ftrace_destroy_function_files(tr);
6728 tracefs_remove_recursive(tr->dir);
6729 free_trace_buffers(tr);
6731 for (i = 0; i < tr->nr_topts; i++) {
6732 kfree(tr->topts[i].topts);
6736 free_cpumask_var(tr->tracing_cpumask);
6743 mutex_unlock(&trace_types_lock);
6748 static __init void create_trace_instances(struct dentry *d_tracer)
6750 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
6753 if (WARN_ON(!trace_instance_dir))
6758 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
6762 trace_create_file("available_tracers", 0444, d_tracer,
6763 tr, &show_traces_fops);
6765 trace_create_file("current_tracer", 0644, d_tracer,
6766 tr, &set_tracer_fops);
6768 trace_create_file("tracing_cpumask", 0644, d_tracer,
6769 tr, &tracing_cpumask_fops);
6771 trace_create_file("trace_options", 0644, d_tracer,
6772 tr, &tracing_iter_fops);
6774 trace_create_file("trace", 0644, d_tracer,
6777 trace_create_file("trace_pipe", 0444, d_tracer,
6778 tr, &tracing_pipe_fops);
6780 trace_create_file("buffer_size_kb", 0644, d_tracer,
6781 tr, &tracing_entries_fops);
6783 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
6784 tr, &tracing_total_entries_fops);
6786 trace_create_file("free_buffer", 0200, d_tracer,
6787 tr, &tracing_free_buffer_fops);
6789 trace_create_file("trace_marker", 0220, d_tracer,
6790 tr, &tracing_mark_fops);
6792 trace_create_file("trace_clock", 0644, d_tracer, tr,
6795 trace_create_file("tracing_on", 0644, d_tracer,
6796 tr, &rb_simple_fops);
6798 create_trace_options_dir(tr);
6800 #ifdef CONFIG_TRACER_MAX_TRACE
6801 trace_create_file("tracing_max_latency", 0644, d_tracer,
6802 &tr->max_latency, &tracing_max_lat_fops);
6805 if (ftrace_create_function_files(tr, d_tracer))
6806 WARN(1, "Could not allocate function filter files");
6808 #ifdef CONFIG_TRACER_SNAPSHOT
6809 trace_create_file("snapshot", 0644, d_tracer,
6810 tr, &snapshot_fops);
6813 for_each_tracing_cpu(cpu)
6814 tracing_init_tracefs_percpu(tr, cpu);
6818 static struct vfsmount *trace_automount(void *ingore)
6820 struct vfsmount *mnt;
6821 struct file_system_type *type;
6824 * To maintain backward compatibility for tools that mount
6825 * debugfs to get to the tracing facility, tracefs is automatically
6826 * mounted to the debugfs/tracing directory.
6828 type = get_fs_type("tracefs");
6831 mnt = vfs_kern_mount(type, 0, "tracefs", NULL);
6832 put_filesystem(type);
6841 * tracing_init_dentry - initialize top level trace array
6843 * This is called when creating files or directories in the tracing
6844 * directory. It is called via fs_initcall() by any of the boot up code
6845 * and expects to return the dentry of the top level tracing directory.
6847 struct dentry *tracing_init_dentry(void)
6849 struct trace_array *tr = &global_trace;
6851 /* The top level trace array uses NULL as parent */
6855 if (WARN_ON(!tracefs_initialized()) ||
6856 (IS_ENABLED(CONFIG_DEBUG_FS) &&
6857 WARN_ON(!debugfs_initialized())))
6858 return ERR_PTR(-ENODEV);
6861 * As there may still be users that expect the tracing
6862 * files to exist in debugfs/tracing, we must automount
6863 * the tracefs file system there, so older tools still
6864 * work with the newer kerenl.
6866 tr->dir = debugfs_create_automount("tracing", NULL,
6867 trace_automount, NULL);
6869 pr_warn_once("Could not create debugfs directory 'tracing'\n");
6870 return ERR_PTR(-ENOMEM);
6876 extern struct trace_enum_map *__start_ftrace_enum_maps[];
6877 extern struct trace_enum_map *__stop_ftrace_enum_maps[];
6879 static void __init trace_enum_init(void)
6883 len = __stop_ftrace_enum_maps - __start_ftrace_enum_maps;
6884 trace_insert_enum_map(NULL, __start_ftrace_enum_maps, len);
6887 #ifdef CONFIG_MODULES
6888 static void trace_module_add_enums(struct module *mod)
6890 if (!mod->num_trace_enums)
6894 * Modules with bad taint do not have events created, do
6895 * not bother with enums either.
6897 if (trace_module_has_bad_taint(mod))
6900 trace_insert_enum_map(mod, mod->trace_enums, mod->num_trace_enums);
6903 #ifdef CONFIG_TRACE_ENUM_MAP_FILE
6904 static void trace_module_remove_enums(struct module *mod)
6906 union trace_enum_map_item *map;
6907 union trace_enum_map_item **last = &trace_enum_maps;
6909 if (!mod->num_trace_enums)
6912 mutex_lock(&trace_enum_mutex);
6914 map = trace_enum_maps;
6917 if (map->head.mod == mod)
6919 map = trace_enum_jmp_to_tail(map);
6920 last = &map->tail.next;
6921 map = map->tail.next;
6926 *last = trace_enum_jmp_to_tail(map)->tail.next;
6929 mutex_unlock(&trace_enum_mutex);
6932 static inline void trace_module_remove_enums(struct module *mod) { }
6933 #endif /* CONFIG_TRACE_ENUM_MAP_FILE */
6935 static int trace_module_notify(struct notifier_block *self,
6936 unsigned long val, void *data)
6938 struct module *mod = data;
6941 case MODULE_STATE_COMING:
6942 trace_module_add_enums(mod);
6944 case MODULE_STATE_GOING:
6945 trace_module_remove_enums(mod);
6952 static struct notifier_block trace_module_nb = {
6953 .notifier_call = trace_module_notify,
6956 #endif /* CONFIG_MODULES */
6958 static __init int tracer_init_tracefs(void)
6960 struct dentry *d_tracer;
6962 trace_access_lock_init();
6964 d_tracer = tracing_init_dentry();
6965 if (IS_ERR(d_tracer))
6968 init_tracer_tracefs(&global_trace, d_tracer);
6970 trace_create_file("tracing_thresh", 0644, d_tracer,
6971 &global_trace, &tracing_thresh_fops);
6973 trace_create_file("README", 0444, d_tracer,
6974 NULL, &tracing_readme_fops);
6976 trace_create_file("saved_cmdlines", 0444, d_tracer,
6977 NULL, &tracing_saved_cmdlines_fops);
6979 trace_create_file("saved_cmdlines_size", 0644, d_tracer,
6980 NULL, &tracing_saved_cmdlines_size_fops);
6984 trace_create_enum_file(d_tracer);
6986 #ifdef CONFIG_MODULES
6987 register_module_notifier(&trace_module_nb);
6990 #ifdef CONFIG_DYNAMIC_FTRACE
6991 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
6992 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
6995 create_trace_instances(d_tracer);
6997 update_tracer_options(&global_trace);
7002 static int trace_panic_handler(struct notifier_block *this,
7003 unsigned long event, void *unused)
7005 if (ftrace_dump_on_oops)
7006 ftrace_dump(ftrace_dump_on_oops);
7010 static struct notifier_block trace_panic_notifier = {
7011 .notifier_call = trace_panic_handler,
7013 .priority = 150 /* priority: INT_MAX >= x >= 0 */
7016 static int trace_die_handler(struct notifier_block *self,
7022 if (ftrace_dump_on_oops)
7023 ftrace_dump(ftrace_dump_on_oops);
7031 static struct notifier_block trace_die_notifier = {
7032 .notifier_call = trace_die_handler,
7037 * printk is set to max of 1024, we really don't need it that big.
7038 * Nothing should be printing 1000 characters anyway.
7040 #define TRACE_MAX_PRINT 1000
7043 * Define here KERN_TRACE so that we have one place to modify
7044 * it if we decide to change what log level the ftrace dump
7047 #define KERN_TRACE KERN_EMERG
7050 trace_printk_seq(struct trace_seq *s)
7052 /* Probably should print a warning here. */
7053 if (s->seq.len >= TRACE_MAX_PRINT)
7054 s->seq.len = TRACE_MAX_PRINT;
7057 * More paranoid code. Although the buffer size is set to
7058 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
7059 * an extra layer of protection.
7061 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
7062 s->seq.len = s->seq.size - 1;
7064 /* should be zero ended, but we are paranoid. */
7065 s->buffer[s->seq.len] = 0;
7067 printk(KERN_TRACE "%s", s->buffer);
7072 void trace_init_global_iter(struct trace_iterator *iter)
7074 iter->tr = &global_trace;
7075 iter->trace = iter->tr->current_trace;
7076 iter->cpu_file = RING_BUFFER_ALL_CPUS;
7077 iter->trace_buffer = &global_trace.trace_buffer;
7079 if (iter->trace && iter->trace->open)
7080 iter->trace->open(iter);
7082 /* Annotate start of buffers if we had overruns */
7083 if (ring_buffer_overruns(iter->trace_buffer->buffer))
7084 iter->iter_flags |= TRACE_FILE_ANNOTATE;
7086 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
7087 if (trace_clocks[iter->tr->clock_id].in_ns)
7088 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
7091 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
7093 /* use static because iter can be a bit big for the stack */
7094 static struct trace_iterator iter;
7095 static atomic_t dump_running;
7096 struct trace_array *tr = &global_trace;
7097 unsigned int old_userobj;
7098 unsigned long flags;
7101 /* Only allow one dump user at a time. */
7102 if (atomic_inc_return(&dump_running) != 1) {
7103 atomic_dec(&dump_running);
7108 * Always turn off tracing when we dump.
7109 * We don't need to show trace output of what happens
7110 * between multiple crashes.
7112 * If the user does a sysrq-z, then they can re-enable
7113 * tracing with echo 1 > tracing_on.
7117 local_irq_save(flags);
7119 /* Simulate the iterator */
7120 trace_init_global_iter(&iter);
7122 for_each_tracing_cpu(cpu) {
7123 atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
7126 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
7128 /* don't look at user memory in panic mode */
7129 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
7131 switch (oops_dump_mode) {
7133 iter.cpu_file = RING_BUFFER_ALL_CPUS;
7136 iter.cpu_file = raw_smp_processor_id();
7141 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
7142 iter.cpu_file = RING_BUFFER_ALL_CPUS;
7145 printk(KERN_TRACE "Dumping ftrace buffer:\n");
7147 /* Did function tracer already get disabled? */
7148 if (ftrace_is_dead()) {
7149 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
7150 printk("# MAY BE MISSING FUNCTION EVENTS\n");
7154 * We need to stop all tracing on all CPUS to read the
7155 * the next buffer. This is a bit expensive, but is
7156 * not done often. We fill all what we can read,
7157 * and then release the locks again.
7160 while (!trace_empty(&iter)) {
7163 printk(KERN_TRACE "---------------------------------\n");
7167 /* reset all but tr, trace, and overruns */
7168 memset(&iter.seq, 0,
7169 sizeof(struct trace_iterator) -
7170 offsetof(struct trace_iterator, seq));
7171 iter.iter_flags |= TRACE_FILE_LAT_FMT;
7174 if (trace_find_next_entry_inc(&iter) != NULL) {
7177 ret = print_trace_line(&iter);
7178 if (ret != TRACE_TYPE_NO_CONSUME)
7179 trace_consume(&iter);
7181 touch_nmi_watchdog();
7183 trace_printk_seq(&iter.seq);
7187 printk(KERN_TRACE " (ftrace buffer empty)\n");
7189 printk(KERN_TRACE "---------------------------------\n");
7192 tr->trace_flags |= old_userobj;
7194 for_each_tracing_cpu(cpu) {
7195 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
7197 atomic_dec(&dump_running);
7198 local_irq_restore(flags);
7200 EXPORT_SYMBOL_GPL(ftrace_dump);
7202 __init static int tracer_alloc_buffers(void)
7208 * Make sure we don't accidently add more trace options
7209 * than we have bits for.
7211 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
7213 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
7216 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
7217 goto out_free_buffer_mask;
7219 /* Only allocate trace_printk buffers if a trace_printk exists */
7220 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
7221 /* Must be called before global_trace.buffer is allocated */
7222 trace_printk_init_buffers();
7224 /* To save memory, keep the ring buffer size to its minimum */
7225 if (ring_buffer_expanded)
7226 ring_buf_size = trace_buf_size;
7230 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
7231 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
7233 raw_spin_lock_init(&global_trace.start_lock);
7235 /* Used for event triggers */
7236 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
7238 goto out_free_cpumask;
7240 if (trace_create_savedcmd() < 0)
7241 goto out_free_temp_buffer;
7243 /* TODO: make the number of buffers hot pluggable with CPUS */
7244 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
7245 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
7247 goto out_free_savedcmd;
7250 if (global_trace.buffer_disabled)
7253 if (trace_boot_clock) {
7254 ret = tracing_set_clock(&global_trace, trace_boot_clock);
7256 pr_warning("Trace clock %s not defined, going back to default\n",
7261 * register_tracer() might reference current_trace, so it
7262 * needs to be set before we register anything. This is
7263 * just a bootstrap of current_trace anyway.
7265 global_trace.current_trace = &nop_trace;
7267 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
7269 ftrace_init_global_array_ops(&global_trace);
7271 init_trace_flags_index(&global_trace);
7273 register_tracer(&nop_trace);
7275 /* All seems OK, enable tracing */
7276 tracing_disabled = 0;
7278 atomic_notifier_chain_register(&panic_notifier_list,
7279 &trace_panic_notifier);
7281 register_die_notifier(&trace_die_notifier);
7283 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
7285 INIT_LIST_HEAD(&global_trace.systems);
7286 INIT_LIST_HEAD(&global_trace.events);
7287 list_add(&global_trace.list, &ftrace_trace_arrays);
7289 apply_trace_boot_options();
7291 register_snapshot_cmd();
7296 free_saved_cmdlines_buffer(savedcmd);
7297 out_free_temp_buffer:
7298 ring_buffer_free(temp_buffer);
7300 free_cpumask_var(global_trace.tracing_cpumask);
7301 out_free_buffer_mask:
7302 free_cpumask_var(tracing_buffer_mask);
7307 void __init trace_init(void)
7309 if (tracepoint_printk) {
7310 tracepoint_print_iter =
7311 kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
7312 if (WARN_ON(!tracepoint_print_iter))
7313 tracepoint_printk = 0;
7315 tracer_alloc_buffers();
7319 __init static int clear_boot_tracer(void)
7322 * The default tracer at boot buffer is an init section.
7323 * This function is called in lateinit. If we did not
7324 * find the boot tracer, then clear it out, to prevent
7325 * later registration from accessing the buffer that is
7326 * about to be freed.
7328 if (!default_bootup_tracer)
7331 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
7332 default_bootup_tracer);
7333 default_bootup_tracer = NULL;
7338 fs_initcall(tracer_init_tracefs);
7339 late_initcall(clear_boot_tracer);