]> rtime.felk.cvut.cz Git - linux-imx.git/blob - kernel/trace/trace.c
3f2477713acaa9ea17ed54be7e79a4e0794d4c17
[linux-imx.git] / kernel / trace / trace.c
1 /*
2  * ring buffer based function tracer
3  *
4  * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Originally taken from the RT patch by:
8  *    Arnaldo Carvalho de Melo <acme@redhat.com>
9  *
10  * Based on code from the latency_tracer, that is:
11  *  Copyright (C) 2004-2006 Ingo Molnar
12  *  Copyright (C) 2004 Nadia Yvette Chambers
13  */
14 #include <linux/ring_buffer.h>
15 #include <generated/utsrelease.h>
16 #include <linux/stacktrace.h>
17 #include <linux/writeback.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/notifier.h>
21 #include <linux/irqflags.h>
22 #include <linux/debugfs.h>
23 #include <linux/pagemap.h>
24 #include <linux/hardirq.h>
25 #include <linux/linkage.h>
26 #include <linux/uaccess.h>
27 #include <linux/kprobes.h>
28 #include <linux/ftrace.h>
29 #include <linux/module.h>
30 #include <linux/percpu.h>
31 #include <linux/splice.h>
32 #include <linux/kdebug.h>
33 #include <linux/string.h>
34 #include <linux/rwsem.h>
35 #include <linux/slab.h>
36 #include <linux/ctype.h>
37 #include <linux/init.h>
38 #include <linux/poll.h>
39 #include <linux/nmi.h>
40 #include <linux/fs.h>
41 #include <linux/sched/rt.h>
42
43 #include "trace.h"
44 #include "trace_output.h"
45
46 /*
47  * On boot up, the ring buffer is set to the minimum size, so that
48  * we do not waste memory on systems that are not using tracing.
49  */
50 bool ring_buffer_expanded;
51
52 /*
53  * We need to change this state when a selftest is running.
54  * A selftest will lurk into the ring-buffer to count the
55  * entries inserted during the selftest although some concurrent
56  * insertions into the ring-buffer such as trace_printk could occurred
57  * at the same time, giving false positive or negative results.
58  */
59 static bool __read_mostly tracing_selftest_running;
60
61 /*
62  * If a tracer is running, we do not want to run SELFTEST.
63  */
64 bool __read_mostly tracing_selftest_disabled;
65
66 /* For tracers that don't implement custom flags */
67 static struct tracer_opt dummy_tracer_opt[] = {
68         { }
69 };
70
71 static struct tracer_flags dummy_tracer_flags = {
72         .val = 0,
73         .opts = dummy_tracer_opt
74 };
75
76 static int dummy_set_flag(u32 old_flags, u32 bit, int set)
77 {
78         return 0;
79 }
80
81 /*
82  * To prevent the comm cache from being overwritten when no
83  * tracing is active, only save the comm when a trace event
84  * occurred.
85  */
86 static DEFINE_PER_CPU(bool, trace_cmdline_save);
87
88 /*
89  * Kill all tracing for good (never come back).
90  * It is initialized to 1 but will turn to zero if the initialization
91  * of the tracer is successful. But that is the only place that sets
92  * this back to zero.
93  */
94 static int tracing_disabled = 1;
95
96 DEFINE_PER_CPU(int, ftrace_cpu_disabled);
97
98 cpumask_var_t __read_mostly     tracing_buffer_mask;
99
100 /*
101  * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
102  *
103  * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
104  * is set, then ftrace_dump is called. This will output the contents
105  * of the ftrace buffers to the console.  This is very useful for
106  * capturing traces that lead to crashes and outputing it to a
107  * serial console.
108  *
109  * It is default off, but you can enable it with either specifying
110  * "ftrace_dump_on_oops" in the kernel command line, or setting
111  * /proc/sys/kernel/ftrace_dump_on_oops
112  * Set 1 if you want to dump buffers of all CPUs
113  * Set 2 if you want to dump the buffer of the CPU that triggered oops
114  */
115
116 enum ftrace_dump_mode ftrace_dump_on_oops;
117
118 /* When set, tracing will stop when a WARN*() is hit */
119 int __disable_trace_on_warning;
120
121 static int tracing_set_tracer(const char *buf);
122
123 #define MAX_TRACER_SIZE         100
124 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
125 static char *default_bootup_tracer;
126
127 static bool allocate_snapshot;
128
129 static int __init set_cmdline_ftrace(char *str)
130 {
131         strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
132         default_bootup_tracer = bootup_tracer_buf;
133         /* We are using ftrace early, expand it */
134         ring_buffer_expanded = true;
135         return 1;
136 }
137 __setup("ftrace=", set_cmdline_ftrace);
138
139 static int __init set_ftrace_dump_on_oops(char *str)
140 {
141         if (*str++ != '=' || !*str) {
142                 ftrace_dump_on_oops = DUMP_ALL;
143                 return 1;
144         }
145
146         if (!strcmp("orig_cpu", str)) {
147                 ftrace_dump_on_oops = DUMP_ORIG;
148                 return 1;
149         }
150
151         return 0;
152 }
153 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
154
155 static int __init stop_trace_on_warning(char *str)
156 {
157         __disable_trace_on_warning = 1;
158         return 1;
159 }
160 __setup("traceoff_on_warning=", stop_trace_on_warning);
161
162 static int __init boot_alloc_snapshot(char *str)
163 {
164         allocate_snapshot = true;
165         /* We also need the main ring buffer expanded */
166         ring_buffer_expanded = true;
167         return 1;
168 }
169 __setup("alloc_snapshot", boot_alloc_snapshot);
170
171
172 static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
173 static char *trace_boot_options __initdata;
174
175 static int __init set_trace_boot_options(char *str)
176 {
177         strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
178         trace_boot_options = trace_boot_options_buf;
179         return 0;
180 }
181 __setup("trace_options=", set_trace_boot_options);
182
183
184 unsigned long long ns2usecs(cycle_t nsec)
185 {
186         nsec += 500;
187         do_div(nsec, 1000);
188         return nsec;
189 }
190
191 /*
192  * The global_trace is the descriptor that holds the tracing
193  * buffers for the live tracing. For each CPU, it contains
194  * a link list of pages that will store trace entries. The
195  * page descriptor of the pages in the memory is used to hold
196  * the link list by linking the lru item in the page descriptor
197  * to each of the pages in the buffer per CPU.
198  *
199  * For each active CPU there is a data field that holds the
200  * pages for the buffer for that CPU. Each CPU has the same number
201  * of pages allocated for its buffer.
202  */
203 static struct trace_array       global_trace;
204
205 LIST_HEAD(ftrace_trace_arrays);
206
207 int trace_array_get(struct trace_array *this_tr)
208 {
209         struct trace_array *tr;
210         int ret = -ENODEV;
211
212         mutex_lock(&trace_types_lock);
213         list_for_each_entry(tr, &ftrace_trace_arrays, list) {
214                 if (tr == this_tr) {
215                         tr->ref++;
216                         ret = 0;
217                         break;
218                 }
219         }
220         mutex_unlock(&trace_types_lock);
221
222         return ret;
223 }
224
225 static void __trace_array_put(struct trace_array *this_tr)
226 {
227         WARN_ON(!this_tr->ref);
228         this_tr->ref--;
229 }
230
231 void trace_array_put(struct trace_array *this_tr)
232 {
233         mutex_lock(&trace_types_lock);
234         __trace_array_put(this_tr);
235         mutex_unlock(&trace_types_lock);
236 }
237
238 int filter_current_check_discard(struct ring_buffer *buffer,
239                                  struct ftrace_event_call *call, void *rec,
240                                  struct ring_buffer_event *event)
241 {
242         return filter_check_discard(call, rec, buffer, event);
243 }
244 EXPORT_SYMBOL_GPL(filter_current_check_discard);
245
246 cycle_t ftrace_now(int cpu)
247 {
248         u64 ts;
249
250         /* Early boot up does not have a buffer yet */
251         if (!global_trace.trace_buffer.buffer)
252                 return trace_clock_local();
253
254         ts = ring_buffer_time_stamp(global_trace.trace_buffer.buffer, cpu);
255         ring_buffer_normalize_time_stamp(global_trace.trace_buffer.buffer, cpu, &ts);
256
257         return ts;
258 }
259
260 /**
261  * tracing_is_enabled - Show if global_trace has been disabled
262  *
263  * Shows if the global trace has been enabled or not. It uses the
264  * mirror flag "buffer_disabled" to be used in fast paths such as for
265  * the irqsoff tracer. But it may be inaccurate due to races. If you
266  * need to know the accurate state, use tracing_is_on() which is a little
267  * slower, but accurate.
268  */
269 int tracing_is_enabled(void)
270 {
271         /*
272          * For quick access (irqsoff uses this in fast path), just
273          * return the mirror variable of the state of the ring buffer.
274          * It's a little racy, but we don't really care.
275          */
276         smp_rmb();
277         return !global_trace.buffer_disabled;
278 }
279
280 /*
281  * trace_buf_size is the size in bytes that is allocated
282  * for a buffer. Note, the number of bytes is always rounded
283  * to page size.
284  *
285  * This number is purposely set to a low number of 16384.
286  * If the dump on oops happens, it will be much appreciated
287  * to not have to wait for all that output. Anyway this can be
288  * boot time and run time configurable.
289  */
290 #define TRACE_BUF_SIZE_DEFAULT  1441792UL /* 16384 * 88 (sizeof(entry)) */
291
292 static unsigned long            trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
293
294 /* trace_types holds a link list of available tracers. */
295 static struct tracer            *trace_types __read_mostly;
296
297 /*
298  * trace_types_lock is used to protect the trace_types list.
299  */
300 DEFINE_MUTEX(trace_types_lock);
301
302 /*
303  * serialize the access of the ring buffer
304  *
305  * ring buffer serializes readers, but it is low level protection.
306  * The validity of the events (which returns by ring_buffer_peek() ..etc)
307  * are not protected by ring buffer.
308  *
309  * The content of events may become garbage if we allow other process consumes
310  * these events concurrently:
311  *   A) the page of the consumed events may become a normal page
312  *      (not reader page) in ring buffer, and this page will be rewrited
313  *      by events producer.
314  *   B) The page of the consumed events may become a page for splice_read,
315  *      and this page will be returned to system.
316  *
317  * These primitives allow multi process access to different cpu ring buffer
318  * concurrently.
319  *
320  * These primitives don't distinguish read-only and read-consume access.
321  * Multi read-only access are also serialized.
322  */
323
324 #ifdef CONFIG_SMP
325 static DECLARE_RWSEM(all_cpu_access_lock);
326 static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
327
328 static inline void trace_access_lock(int cpu)
329 {
330         if (cpu == RING_BUFFER_ALL_CPUS) {
331                 /* gain it for accessing the whole ring buffer. */
332                 down_write(&all_cpu_access_lock);
333         } else {
334                 /* gain it for accessing a cpu ring buffer. */
335
336                 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
337                 down_read(&all_cpu_access_lock);
338
339                 /* Secondly block other access to this @cpu ring buffer. */
340                 mutex_lock(&per_cpu(cpu_access_lock, cpu));
341         }
342 }
343
344 static inline void trace_access_unlock(int cpu)
345 {
346         if (cpu == RING_BUFFER_ALL_CPUS) {
347                 up_write(&all_cpu_access_lock);
348         } else {
349                 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
350                 up_read(&all_cpu_access_lock);
351         }
352 }
353
354 static inline void trace_access_lock_init(void)
355 {
356         int cpu;
357
358         for_each_possible_cpu(cpu)
359                 mutex_init(&per_cpu(cpu_access_lock, cpu));
360 }
361
362 #else
363
364 static DEFINE_MUTEX(access_lock);
365
366 static inline void trace_access_lock(int cpu)
367 {
368         (void)cpu;
369         mutex_lock(&access_lock);
370 }
371
372 static inline void trace_access_unlock(int cpu)
373 {
374         (void)cpu;
375         mutex_unlock(&access_lock);
376 }
377
378 static inline void trace_access_lock_init(void)
379 {
380 }
381
382 #endif
383
384 /* trace_flags holds trace_options default values */
385 unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
386         TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
387         TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |
388         TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | TRACE_ITER_FUNCTION;
389
390 static void tracer_tracing_on(struct trace_array *tr)
391 {
392         if (tr->trace_buffer.buffer)
393                 ring_buffer_record_on(tr->trace_buffer.buffer);
394         /*
395          * This flag is looked at when buffers haven't been allocated
396          * yet, or by some tracers (like irqsoff), that just want to
397          * know if the ring buffer has been disabled, but it can handle
398          * races of where it gets disabled but we still do a record.
399          * As the check is in the fast path of the tracers, it is more
400          * important to be fast than accurate.
401          */
402         tr->buffer_disabled = 0;
403         /* Make the flag seen by readers */
404         smp_wmb();
405 }
406
407 /**
408  * tracing_on - enable tracing buffers
409  *
410  * This function enables tracing buffers that may have been
411  * disabled with tracing_off.
412  */
413 void tracing_on(void)
414 {
415         tracer_tracing_on(&global_trace);
416 }
417 EXPORT_SYMBOL_GPL(tracing_on);
418
419 /**
420  * __trace_puts - write a constant string into the trace buffer.
421  * @ip:    The address of the caller
422  * @str:   The constant string to write
423  * @size:  The size of the string.
424  */
425 int __trace_puts(unsigned long ip, const char *str, int size)
426 {
427         struct ring_buffer_event *event;
428         struct ring_buffer *buffer;
429         struct print_entry *entry;
430         unsigned long irq_flags;
431         int alloc;
432
433         alloc = sizeof(*entry) + size + 2; /* possible \n added */
434
435         local_save_flags(irq_flags);
436         buffer = global_trace.trace_buffer.buffer;
437         event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc, 
438                                           irq_flags, preempt_count());
439         if (!event)
440                 return 0;
441
442         entry = ring_buffer_event_data(event);
443         entry->ip = ip;
444
445         memcpy(&entry->buf, str, size);
446
447         /* Add a newline if necessary */
448         if (entry->buf[size - 1] != '\n') {
449                 entry->buf[size] = '\n';
450                 entry->buf[size + 1] = '\0';
451         } else
452                 entry->buf[size] = '\0';
453
454         __buffer_unlock_commit(buffer, event);
455
456         return size;
457 }
458 EXPORT_SYMBOL_GPL(__trace_puts);
459
460 /**
461  * __trace_bputs - write the pointer to a constant string into trace buffer
462  * @ip:    The address of the caller
463  * @str:   The constant string to write to the buffer to
464  */
465 int __trace_bputs(unsigned long ip, const char *str)
466 {
467         struct ring_buffer_event *event;
468         struct ring_buffer *buffer;
469         struct bputs_entry *entry;
470         unsigned long irq_flags;
471         int size = sizeof(struct bputs_entry);
472
473         local_save_flags(irq_flags);
474         buffer = global_trace.trace_buffer.buffer;
475         event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
476                                           irq_flags, preempt_count());
477         if (!event)
478                 return 0;
479
480         entry = ring_buffer_event_data(event);
481         entry->ip                       = ip;
482         entry->str                      = str;
483
484         __buffer_unlock_commit(buffer, event);
485
486         return 1;
487 }
488 EXPORT_SYMBOL_GPL(__trace_bputs);
489
490 #ifdef CONFIG_TRACER_SNAPSHOT
491 /**
492  * trace_snapshot - take a snapshot of the current buffer.
493  *
494  * This causes a swap between the snapshot buffer and the current live
495  * tracing buffer. You can use this to take snapshots of the live
496  * trace when some condition is triggered, but continue to trace.
497  *
498  * Note, make sure to allocate the snapshot with either
499  * a tracing_snapshot_alloc(), or by doing it manually
500  * with: echo 1 > /sys/kernel/debug/tracing/snapshot
501  *
502  * If the snapshot buffer is not allocated, it will stop tracing.
503  * Basically making a permanent snapshot.
504  */
505 void tracing_snapshot(void)
506 {
507         struct trace_array *tr = &global_trace;
508         struct tracer *tracer = tr->current_trace;
509         unsigned long flags;
510
511         if (in_nmi()) {
512                 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
513                 internal_trace_puts("*** snapshot is being ignored        ***\n");
514                 return;
515         }
516
517         if (!tr->allocated_snapshot) {
518                 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
519                 internal_trace_puts("*** stopping trace here!   ***\n");
520                 tracing_off();
521                 return;
522         }
523
524         /* Note, snapshot can not be used when the tracer uses it */
525         if (tracer->use_max_tr) {
526                 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
527                 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
528                 return;
529         }
530
531         local_irq_save(flags);
532         update_max_tr(tr, current, smp_processor_id());
533         local_irq_restore(flags);
534 }
535 EXPORT_SYMBOL_GPL(tracing_snapshot);
536
537 static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
538                                         struct trace_buffer *size_buf, int cpu_id);
539 static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
540
541 static int alloc_snapshot(struct trace_array *tr)
542 {
543         int ret;
544
545         if (!tr->allocated_snapshot) {
546
547                 /* allocate spare buffer */
548                 ret = resize_buffer_duplicate_size(&tr->max_buffer,
549                                    &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
550                 if (ret < 0)
551                         return ret;
552
553                 tr->allocated_snapshot = true;
554         }
555
556         return 0;
557 }
558
559 void free_snapshot(struct trace_array *tr)
560 {
561         /*
562          * We don't free the ring buffer. instead, resize it because
563          * The max_tr ring buffer has some state (e.g. ring->clock) and
564          * we want preserve it.
565          */
566         ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
567         set_buffer_entries(&tr->max_buffer, 1);
568         tracing_reset_online_cpus(&tr->max_buffer);
569         tr->allocated_snapshot = false;
570 }
571
572 /**
573  * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
574  *
575  * This is similar to trace_snapshot(), but it will allocate the
576  * snapshot buffer if it isn't already allocated. Use this only
577  * where it is safe to sleep, as the allocation may sleep.
578  *
579  * This causes a swap between the snapshot buffer and the current live
580  * tracing buffer. You can use this to take snapshots of the live
581  * trace when some condition is triggered, but continue to trace.
582  */
583 void tracing_snapshot_alloc(void)
584 {
585         struct trace_array *tr = &global_trace;
586         int ret;
587
588         ret = alloc_snapshot(tr);
589         if (WARN_ON(ret < 0))
590                 return;
591
592         tracing_snapshot();
593 }
594 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
595 #else
596 void tracing_snapshot(void)
597 {
598         WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
599 }
600 EXPORT_SYMBOL_GPL(tracing_snapshot);
601 void tracing_snapshot_alloc(void)
602 {
603         /* Give warning */
604         tracing_snapshot();
605 }
606 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
607 #endif /* CONFIG_TRACER_SNAPSHOT */
608
609 static void tracer_tracing_off(struct trace_array *tr)
610 {
611         if (tr->trace_buffer.buffer)
612                 ring_buffer_record_off(tr->trace_buffer.buffer);
613         /*
614          * This flag is looked at when buffers haven't been allocated
615          * yet, or by some tracers (like irqsoff), that just want to
616          * know if the ring buffer has been disabled, but it can handle
617          * races of where it gets disabled but we still do a record.
618          * As the check is in the fast path of the tracers, it is more
619          * important to be fast than accurate.
620          */
621         tr->buffer_disabled = 1;
622         /* Make the flag seen by readers */
623         smp_wmb();
624 }
625
626 /**
627  * tracing_off - turn off tracing buffers
628  *
629  * This function stops the tracing buffers from recording data.
630  * It does not disable any overhead the tracers themselves may
631  * be causing. This function simply causes all recording to
632  * the ring buffers to fail.
633  */
634 void tracing_off(void)
635 {
636         tracer_tracing_off(&global_trace);
637 }
638 EXPORT_SYMBOL_GPL(tracing_off);
639
640 void disable_trace_on_warning(void)
641 {
642         if (__disable_trace_on_warning)
643                 tracing_off();
644 }
645
646 /**
647  * tracer_tracing_is_on - show real state of ring buffer enabled
648  * @tr : the trace array to know if ring buffer is enabled
649  *
650  * Shows real state of the ring buffer if it is enabled or not.
651  */
652 static int tracer_tracing_is_on(struct trace_array *tr)
653 {
654         if (tr->trace_buffer.buffer)
655                 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
656         return !tr->buffer_disabled;
657 }
658
659 /**
660  * tracing_is_on - show state of ring buffers enabled
661  */
662 int tracing_is_on(void)
663 {
664         return tracer_tracing_is_on(&global_trace);
665 }
666 EXPORT_SYMBOL_GPL(tracing_is_on);
667
668 static int __init set_buf_size(char *str)
669 {
670         unsigned long buf_size;
671
672         if (!str)
673                 return 0;
674         buf_size = memparse(str, &str);
675         /* nr_entries can not be zero */
676         if (buf_size == 0)
677                 return 0;
678         trace_buf_size = buf_size;
679         return 1;
680 }
681 __setup("trace_buf_size=", set_buf_size);
682
683 static int __init set_tracing_thresh(char *str)
684 {
685         unsigned long threshold;
686         int ret;
687
688         if (!str)
689                 return 0;
690         ret = kstrtoul(str, 0, &threshold);
691         if (ret < 0)
692                 return 0;
693         tracing_thresh = threshold * 1000;
694         return 1;
695 }
696 __setup("tracing_thresh=", set_tracing_thresh);
697
698 unsigned long nsecs_to_usecs(unsigned long nsecs)
699 {
700         return nsecs / 1000;
701 }
702
703 /* These must match the bit postions in trace_iterator_flags */
704 static const char *trace_options[] = {
705         "print-parent",
706         "sym-offset",
707         "sym-addr",
708         "verbose",
709         "raw",
710         "hex",
711         "bin",
712         "block",
713         "stacktrace",
714         "trace_printk",
715         "ftrace_preempt",
716         "branch",
717         "annotate",
718         "userstacktrace",
719         "sym-userobj",
720         "printk-msg-only",
721         "context-info",
722         "latency-format",
723         "sleep-time",
724         "graph-time",
725         "record-cmd",
726         "overwrite",
727         "disable_on_free",
728         "irq-info",
729         "markers",
730         "function-trace",
731         NULL
732 };
733
734 static struct {
735         u64 (*func)(void);
736         const char *name;
737         int in_ns;              /* is this clock in nanoseconds? */
738 } trace_clocks[] = {
739         { trace_clock_local,    "local",        1 },
740         { trace_clock_global,   "global",       1 },
741         { trace_clock_counter,  "counter",      0 },
742         { trace_clock_jiffies,  "uptime",       1 },
743         { trace_clock,          "perf",         1 },
744         ARCH_TRACE_CLOCKS
745 };
746
747 /*
748  * trace_parser_get_init - gets the buffer for trace parser
749  */
750 int trace_parser_get_init(struct trace_parser *parser, int size)
751 {
752         memset(parser, 0, sizeof(*parser));
753
754         parser->buffer = kmalloc(size, GFP_KERNEL);
755         if (!parser->buffer)
756                 return 1;
757
758         parser->size = size;
759         return 0;
760 }
761
762 /*
763  * trace_parser_put - frees the buffer for trace parser
764  */
765 void trace_parser_put(struct trace_parser *parser)
766 {
767         kfree(parser->buffer);
768 }
769
770 /*
771  * trace_get_user - reads the user input string separated by  space
772  * (matched by isspace(ch))
773  *
774  * For each string found the 'struct trace_parser' is updated,
775  * and the function returns.
776  *
777  * Returns number of bytes read.
778  *
779  * See kernel/trace/trace.h for 'struct trace_parser' details.
780  */
781 int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
782         size_t cnt, loff_t *ppos)
783 {
784         char ch;
785         size_t read = 0;
786         ssize_t ret;
787
788         if (!*ppos)
789                 trace_parser_clear(parser);
790
791         ret = get_user(ch, ubuf++);
792         if (ret)
793                 goto out;
794
795         read++;
796         cnt--;
797
798         /*
799          * The parser is not finished with the last write,
800          * continue reading the user input without skipping spaces.
801          */
802         if (!parser->cont) {
803                 /* skip white space */
804                 while (cnt && isspace(ch)) {
805                         ret = get_user(ch, ubuf++);
806                         if (ret)
807                                 goto out;
808                         read++;
809                         cnt--;
810                 }
811
812                 /* only spaces were written */
813                 if (isspace(ch)) {
814                         *ppos += read;
815                         ret = read;
816                         goto out;
817                 }
818
819                 parser->idx = 0;
820         }
821
822         /* read the non-space input */
823         while (cnt && !isspace(ch)) {
824                 if (parser->idx < parser->size - 1)
825                         parser->buffer[parser->idx++] = ch;
826                 else {
827                         ret = -EINVAL;
828                         goto out;
829                 }
830                 ret = get_user(ch, ubuf++);
831                 if (ret)
832                         goto out;
833                 read++;
834                 cnt--;
835         }
836
837         /* We either got finished input or we have to wait for another call. */
838         if (isspace(ch)) {
839                 parser->buffer[parser->idx] = 0;
840                 parser->cont = false;
841         } else {
842                 parser->cont = true;
843                 parser->buffer[parser->idx++] = ch;
844         }
845
846         *ppos += read;
847         ret = read;
848
849 out:
850         return ret;
851 }
852
853 ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt)
854 {
855         int len;
856         int ret;
857
858         if (!cnt)
859                 return 0;
860
861         if (s->len <= s->readpos)
862                 return -EBUSY;
863
864         len = s->len - s->readpos;
865         if (cnt > len)
866                 cnt = len;
867         ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt);
868         if (ret == cnt)
869                 return -EFAULT;
870
871         cnt -= ret;
872
873         s->readpos += cnt;
874         return cnt;
875 }
876
877 static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
878 {
879         int len;
880
881         if (s->len <= s->readpos)
882                 return -EBUSY;
883
884         len = s->len - s->readpos;
885         if (cnt > len)
886                 cnt = len;
887         memcpy(buf, s->buffer + s->readpos, cnt);
888
889         s->readpos += cnt;
890         return cnt;
891 }
892
893 /*
894  * ftrace_max_lock is used to protect the swapping of buffers
895  * when taking a max snapshot. The buffers themselves are
896  * protected by per_cpu spinlocks. But the action of the swap
897  * needs its own lock.
898  *
899  * This is defined as a arch_spinlock_t in order to help
900  * with performance when lockdep debugging is enabled.
901  *
902  * It is also used in other places outside the update_max_tr
903  * so it needs to be defined outside of the
904  * CONFIG_TRACER_MAX_TRACE.
905  */
906 static arch_spinlock_t ftrace_max_lock =
907         (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
908
909 unsigned long __read_mostly     tracing_thresh;
910
911 #ifdef CONFIG_TRACER_MAX_TRACE
912 unsigned long __read_mostly     tracing_max_latency;
913
914 /*
915  * Copy the new maximum trace into the separate maximum-trace
916  * structure. (this way the maximum trace is permanently saved,
917  * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
918  */
919 static void
920 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
921 {
922         struct trace_buffer *trace_buf = &tr->trace_buffer;
923         struct trace_buffer *max_buf = &tr->max_buffer;
924         struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
925         struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
926
927         max_buf->cpu = cpu;
928         max_buf->time_start = data->preempt_timestamp;
929
930         max_data->saved_latency = tracing_max_latency;
931         max_data->critical_start = data->critical_start;
932         max_data->critical_end = data->critical_end;
933
934         memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
935         max_data->pid = tsk->pid;
936         /*
937          * If tsk == current, then use current_uid(), as that does not use
938          * RCU. The irq tracer can be called out of RCU scope.
939          */
940         if (tsk == current)
941                 max_data->uid = current_uid();
942         else
943                 max_data->uid = task_uid(tsk);
944
945         max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
946         max_data->policy = tsk->policy;
947         max_data->rt_priority = tsk->rt_priority;
948
949         /* record this tasks comm */
950         tracing_record_cmdline(tsk);
951 }
952
953 /**
954  * update_max_tr - snapshot all trace buffers from global_trace to max_tr
955  * @tr: tracer
956  * @tsk: the task with the latency
957  * @cpu: The cpu that initiated the trace.
958  *
959  * Flip the buffers between the @tr and the max_tr and record information
960  * about which task was the cause of this latency.
961  */
962 void
963 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
964 {
965         struct ring_buffer *buf;
966
967         if (tr->stop_count)
968                 return;
969
970         WARN_ON_ONCE(!irqs_disabled());
971
972         if (!tr->allocated_snapshot) {
973                 /* Only the nop tracer should hit this when disabling */
974                 WARN_ON_ONCE(tr->current_trace != &nop_trace);
975                 return;
976         }
977
978         arch_spin_lock(&ftrace_max_lock);
979
980         buf = tr->trace_buffer.buffer;
981         tr->trace_buffer.buffer = tr->max_buffer.buffer;
982         tr->max_buffer.buffer = buf;
983
984         __update_max_tr(tr, tsk, cpu);
985         arch_spin_unlock(&ftrace_max_lock);
986 }
987
988 /**
989  * update_max_tr_single - only copy one trace over, and reset the rest
990  * @tr - tracer
991  * @tsk - task with the latency
992  * @cpu - the cpu of the buffer to copy.
993  *
994  * Flip the trace of a single CPU buffer between the @tr and the max_tr.
995  */
996 void
997 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
998 {
999         int ret;
1000
1001         if (tr->stop_count)
1002                 return;
1003
1004         WARN_ON_ONCE(!irqs_disabled());
1005         if (!tr->allocated_snapshot) {
1006                 /* Only the nop tracer should hit this when disabling */
1007                 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1008                 return;
1009         }
1010
1011         arch_spin_lock(&ftrace_max_lock);
1012
1013         ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
1014
1015         if (ret == -EBUSY) {
1016                 /*
1017                  * We failed to swap the buffer due to a commit taking
1018                  * place on this CPU. We fail to record, but we reset
1019                  * the max trace buffer (no one writes directly to it)
1020                  * and flag that it failed.
1021                  */
1022                 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
1023                         "Failed to swap buffers due to commit in progress\n");
1024         }
1025
1026         WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
1027
1028         __update_max_tr(tr, tsk, cpu);
1029         arch_spin_unlock(&ftrace_max_lock);
1030 }
1031 #endif /* CONFIG_TRACER_MAX_TRACE */
1032
1033 static void default_wait_pipe(struct trace_iterator *iter)
1034 {
1035         /* Iterators are static, they should be filled or empty */
1036         if (trace_buffer_iter(iter, iter->cpu_file))
1037                 return;
1038
1039         ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file);
1040 }
1041
1042 #ifdef CONFIG_FTRACE_STARTUP_TEST
1043 static int run_tracer_selftest(struct tracer *type)
1044 {
1045         struct trace_array *tr = &global_trace;
1046         struct tracer *saved_tracer = tr->current_trace;
1047         int ret;
1048
1049         if (!type->selftest || tracing_selftest_disabled)
1050                 return 0;
1051
1052         /*
1053          * Run a selftest on this tracer.
1054          * Here we reset the trace buffer, and set the current
1055          * tracer to be this tracer. The tracer can then run some
1056          * internal tracing to verify that everything is in order.
1057          * If we fail, we do not register this tracer.
1058          */
1059         tracing_reset_online_cpus(&tr->trace_buffer);
1060
1061         tr->current_trace = type;
1062
1063 #ifdef CONFIG_TRACER_MAX_TRACE
1064         if (type->use_max_tr) {
1065                 /* If we expanded the buffers, make sure the max is expanded too */
1066                 if (ring_buffer_expanded)
1067                         ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1068                                            RING_BUFFER_ALL_CPUS);
1069                 tr->allocated_snapshot = true;
1070         }
1071 #endif
1072
1073         /* the test is responsible for initializing and enabling */
1074         pr_info("Testing tracer %s: ", type->name);
1075         ret = type->selftest(type, tr);
1076         /* the test is responsible for resetting too */
1077         tr->current_trace = saved_tracer;
1078         if (ret) {
1079                 printk(KERN_CONT "FAILED!\n");
1080                 /* Add the warning after printing 'FAILED' */
1081                 WARN_ON(1);
1082                 return -1;
1083         }
1084         /* Only reset on passing, to avoid touching corrupted buffers */
1085         tracing_reset_online_cpus(&tr->trace_buffer);
1086
1087 #ifdef CONFIG_TRACER_MAX_TRACE
1088         if (type->use_max_tr) {
1089                 tr->allocated_snapshot = false;
1090
1091                 /* Shrink the max buffer again */
1092                 if (ring_buffer_expanded)
1093                         ring_buffer_resize(tr->max_buffer.buffer, 1,
1094                                            RING_BUFFER_ALL_CPUS);
1095         }
1096 #endif
1097
1098         printk(KERN_CONT "PASSED\n");
1099         return 0;
1100 }
1101 #else
1102 static inline int run_tracer_selftest(struct tracer *type)
1103 {
1104         return 0;
1105 }
1106 #endif /* CONFIG_FTRACE_STARTUP_TEST */
1107
1108 /**
1109  * register_tracer - register a tracer with the ftrace system.
1110  * @type - the plugin for the tracer
1111  *
1112  * Register a new plugin tracer.
1113  */
1114 int register_tracer(struct tracer *type)
1115 {
1116         struct tracer *t;
1117         int ret = 0;
1118
1119         if (!type->name) {
1120                 pr_info("Tracer must have a name\n");
1121                 return -1;
1122         }
1123
1124         if (strlen(type->name) >= MAX_TRACER_SIZE) {
1125                 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1126                 return -1;
1127         }
1128
1129         mutex_lock(&trace_types_lock);
1130
1131         tracing_selftest_running = true;
1132
1133         for (t = trace_types; t; t = t->next) {
1134                 if (strcmp(type->name, t->name) == 0) {
1135                         /* already found */
1136                         pr_info("Tracer %s already registered\n",
1137                                 type->name);
1138                         ret = -1;
1139                         goto out;
1140                 }
1141         }
1142
1143         if (!type->set_flag)
1144                 type->set_flag = &dummy_set_flag;
1145         if (!type->flags)
1146                 type->flags = &dummy_tracer_flags;
1147         else
1148                 if (!type->flags->opts)
1149                         type->flags->opts = dummy_tracer_opt;
1150         if (!type->wait_pipe)
1151                 type->wait_pipe = default_wait_pipe;
1152
1153         ret = run_tracer_selftest(type);
1154         if (ret < 0)
1155                 goto out;
1156
1157         type->next = trace_types;
1158         trace_types = type;
1159
1160  out:
1161         tracing_selftest_running = false;
1162         mutex_unlock(&trace_types_lock);
1163
1164         if (ret || !default_bootup_tracer)
1165                 goto out_unlock;
1166
1167         if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
1168                 goto out_unlock;
1169
1170         printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1171         /* Do we want this tracer to start on bootup? */
1172         tracing_set_tracer(type->name);
1173         default_bootup_tracer = NULL;
1174         /* disable other selftests, since this will break it. */
1175         tracing_selftest_disabled = true;
1176 #ifdef CONFIG_FTRACE_STARTUP_TEST
1177         printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1178                type->name);
1179 #endif
1180
1181  out_unlock:
1182         return ret;
1183 }
1184
1185 void tracing_reset(struct trace_buffer *buf, int cpu)
1186 {
1187         struct ring_buffer *buffer = buf->buffer;
1188
1189         if (!buffer)
1190                 return;
1191
1192         ring_buffer_record_disable(buffer);
1193
1194         /* Make sure all commits have finished */
1195         synchronize_sched();
1196         ring_buffer_reset_cpu(buffer, cpu);
1197
1198         ring_buffer_record_enable(buffer);
1199 }
1200
1201 void tracing_reset_online_cpus(struct trace_buffer *buf)
1202 {
1203         struct ring_buffer *buffer = buf->buffer;
1204         int cpu;
1205
1206         if (!buffer)
1207                 return;
1208
1209         ring_buffer_record_disable(buffer);
1210
1211         /* Make sure all commits have finished */
1212         synchronize_sched();
1213
1214         buf->time_start = ftrace_now(buf->cpu);
1215
1216         for_each_online_cpu(cpu)
1217                 ring_buffer_reset_cpu(buffer, cpu);
1218
1219         ring_buffer_record_enable(buffer);
1220 }
1221
1222 void tracing_reset_current(int cpu)
1223 {
1224         tracing_reset(&global_trace.trace_buffer, cpu);
1225 }
1226
1227 void tracing_reset_all_online_cpus(void)
1228 {
1229         struct trace_array *tr;
1230
1231         mutex_lock(&trace_types_lock);
1232         list_for_each_entry(tr, &ftrace_trace_arrays, list) {
1233                 tracing_reset_online_cpus(&tr->trace_buffer);
1234 #ifdef CONFIG_TRACER_MAX_TRACE
1235                 tracing_reset_online_cpus(&tr->max_buffer);
1236 #endif
1237         }
1238         mutex_unlock(&trace_types_lock);
1239 }
1240
1241 #define SAVED_CMDLINES 128
1242 #define NO_CMDLINE_MAP UINT_MAX
1243 static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1244 static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
1245 static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
1246 static int cmdline_idx;
1247 static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
1248
1249 /* temporary disable recording */
1250 static atomic_t trace_record_cmdline_disabled __read_mostly;
1251
1252 static void trace_init_cmdlines(void)
1253 {
1254         memset(&map_pid_to_cmdline, NO_CMDLINE_MAP, sizeof(map_pid_to_cmdline));
1255         memset(&map_cmdline_to_pid, NO_CMDLINE_MAP, sizeof(map_cmdline_to_pid));
1256         cmdline_idx = 0;
1257 }
1258
1259 int is_tracing_stopped(void)
1260 {
1261         return global_trace.stop_count;
1262 }
1263
1264 /**
1265  * ftrace_off_permanent - disable all ftrace code permanently
1266  *
1267  * This should only be called when a serious anomally has
1268  * been detected.  This will turn off the function tracing,
1269  * ring buffers, and other tracing utilites. It takes no
1270  * locks and can be called from any context.
1271  */
1272 void ftrace_off_permanent(void)
1273 {
1274         tracing_disabled = 1;
1275         ftrace_stop();
1276         tracing_off_permanent();
1277 }
1278
1279 /**
1280  * tracing_start - quick start of the tracer
1281  *
1282  * If tracing is enabled but was stopped by tracing_stop,
1283  * this will start the tracer back up.
1284  */
1285 void tracing_start(void)
1286 {
1287         struct ring_buffer *buffer;
1288         unsigned long flags;
1289
1290         if (tracing_disabled)
1291                 return;
1292
1293         raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1294         if (--global_trace.stop_count) {
1295                 if (global_trace.stop_count < 0) {
1296                         /* Someone screwed up their debugging */
1297                         WARN_ON_ONCE(1);
1298                         global_trace.stop_count = 0;
1299                 }
1300                 goto out;
1301         }
1302
1303         /* Prevent the buffers from switching */
1304         arch_spin_lock(&ftrace_max_lock);
1305
1306         buffer = global_trace.trace_buffer.buffer;
1307         if (buffer)
1308                 ring_buffer_record_enable(buffer);
1309
1310 #ifdef CONFIG_TRACER_MAX_TRACE
1311         buffer = global_trace.max_buffer.buffer;
1312         if (buffer)
1313                 ring_buffer_record_enable(buffer);
1314 #endif
1315
1316         arch_spin_unlock(&ftrace_max_lock);
1317
1318         ftrace_start();
1319  out:
1320         raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1321 }
1322
1323 static void tracing_start_tr(struct trace_array *tr)
1324 {
1325         struct ring_buffer *buffer;
1326         unsigned long flags;
1327
1328         if (tracing_disabled)
1329                 return;
1330
1331         /* If global, we need to also start the max tracer */
1332         if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1333                 return tracing_start();
1334
1335         raw_spin_lock_irqsave(&tr->start_lock, flags);
1336
1337         if (--tr->stop_count) {
1338                 if (tr->stop_count < 0) {
1339                         /* Someone screwed up their debugging */
1340                         WARN_ON_ONCE(1);
1341                         tr->stop_count = 0;
1342                 }
1343                 goto out;
1344         }
1345
1346         buffer = tr->trace_buffer.buffer;
1347         if (buffer)
1348                 ring_buffer_record_enable(buffer);
1349
1350  out:
1351         raw_spin_unlock_irqrestore(&tr->start_lock, flags);
1352 }
1353
1354 /**
1355  * tracing_stop - quick stop of the tracer
1356  *
1357  * Light weight way to stop tracing. Use in conjunction with
1358  * tracing_start.
1359  */
1360 void tracing_stop(void)
1361 {
1362         struct ring_buffer *buffer;
1363         unsigned long flags;
1364
1365         ftrace_stop();
1366         raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1367         if (global_trace.stop_count++)
1368                 goto out;
1369
1370         /* Prevent the buffers from switching */
1371         arch_spin_lock(&ftrace_max_lock);
1372
1373         buffer = global_trace.trace_buffer.buffer;
1374         if (buffer)
1375                 ring_buffer_record_disable(buffer);
1376
1377 #ifdef CONFIG_TRACER_MAX_TRACE
1378         buffer = global_trace.max_buffer.buffer;
1379         if (buffer)
1380                 ring_buffer_record_disable(buffer);
1381 #endif
1382
1383         arch_spin_unlock(&ftrace_max_lock);
1384
1385  out:
1386         raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1387 }
1388
1389 static void tracing_stop_tr(struct trace_array *tr)
1390 {
1391         struct ring_buffer *buffer;
1392         unsigned long flags;
1393
1394         /* If global, we need to also stop the max tracer */
1395         if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1396                 return tracing_stop();
1397
1398         raw_spin_lock_irqsave(&tr->start_lock, flags);
1399         if (tr->stop_count++)
1400                 goto out;
1401
1402         buffer = tr->trace_buffer.buffer;
1403         if (buffer)
1404                 ring_buffer_record_disable(buffer);
1405
1406  out:
1407         raw_spin_unlock_irqrestore(&tr->start_lock, flags);
1408 }
1409
1410 void trace_stop_cmdline_recording(void);
1411
1412 static void trace_save_cmdline(struct task_struct *tsk)
1413 {
1414         unsigned pid, idx;
1415
1416         if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
1417                 return;
1418
1419         /*
1420          * It's not the end of the world if we don't get
1421          * the lock, but we also don't want to spin
1422          * nor do we want to disable interrupts,
1423          * so if we miss here, then better luck next time.
1424          */
1425         if (!arch_spin_trylock(&trace_cmdline_lock))
1426                 return;
1427
1428         idx = map_pid_to_cmdline[tsk->pid];
1429         if (idx == NO_CMDLINE_MAP) {
1430                 idx = (cmdline_idx + 1) % SAVED_CMDLINES;
1431
1432                 /*
1433                  * Check whether the cmdline buffer at idx has a pid
1434                  * mapped. We are going to overwrite that entry so we
1435                  * need to clear the map_pid_to_cmdline. Otherwise we
1436                  * would read the new comm for the old pid.
1437                  */
1438                 pid = map_cmdline_to_pid[idx];
1439                 if (pid != NO_CMDLINE_MAP)
1440                         map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
1441
1442                 map_cmdline_to_pid[idx] = tsk->pid;
1443                 map_pid_to_cmdline[tsk->pid] = idx;
1444
1445                 cmdline_idx = idx;
1446         }
1447
1448         memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
1449
1450         arch_spin_unlock(&trace_cmdline_lock);
1451 }
1452
1453 void trace_find_cmdline(int pid, char comm[])
1454 {
1455         unsigned map;
1456
1457         if (!pid) {
1458                 strcpy(comm, "<idle>");
1459                 return;
1460         }
1461
1462         if (WARN_ON_ONCE(pid < 0)) {
1463                 strcpy(comm, "<XXX>");
1464                 return;
1465         }
1466
1467         if (pid > PID_MAX_DEFAULT) {
1468                 strcpy(comm, "<...>");
1469                 return;
1470         }
1471
1472         preempt_disable();
1473         arch_spin_lock(&trace_cmdline_lock);
1474         map = map_pid_to_cmdline[pid];
1475         if (map != NO_CMDLINE_MAP)
1476                 strcpy(comm, saved_cmdlines[map]);
1477         else
1478                 strcpy(comm, "<...>");
1479
1480         arch_spin_unlock(&trace_cmdline_lock);
1481         preempt_enable();
1482 }
1483
1484 void tracing_record_cmdline(struct task_struct *tsk)
1485 {
1486         if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
1487                 return;
1488
1489         if (!__this_cpu_read(trace_cmdline_save))
1490                 return;
1491
1492         __this_cpu_write(trace_cmdline_save, false);
1493
1494         trace_save_cmdline(tsk);
1495 }
1496
1497 void
1498 tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1499                              int pc)
1500 {
1501         struct task_struct *tsk = current;
1502
1503         entry->preempt_count            = pc & 0xff;
1504         entry->pid                      = (tsk) ? tsk->pid : 0;
1505         entry->flags =
1506 #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
1507                 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
1508 #else
1509                 TRACE_FLAG_IRQS_NOSUPPORT |
1510 #endif
1511                 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
1512                 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
1513                 (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
1514 }
1515 EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
1516
1517 struct ring_buffer_event *
1518 trace_buffer_lock_reserve(struct ring_buffer *buffer,
1519                           int type,
1520                           unsigned long len,
1521                           unsigned long flags, int pc)
1522 {
1523         struct ring_buffer_event *event;
1524
1525         event = ring_buffer_lock_reserve(buffer, len);
1526         if (event != NULL) {
1527                 struct trace_entry *ent = ring_buffer_event_data(event);
1528
1529                 tracing_generic_entry_update(ent, flags, pc);
1530                 ent->type = type;
1531         }
1532
1533         return event;
1534 }
1535
1536 void
1537 __buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
1538 {
1539         __this_cpu_write(trace_cmdline_save, true);
1540         ring_buffer_unlock_commit(buffer, event);
1541 }
1542
1543 static inline void
1544 __trace_buffer_unlock_commit(struct ring_buffer *buffer,
1545                              struct ring_buffer_event *event,
1546                              unsigned long flags, int pc)
1547 {
1548         __buffer_unlock_commit(buffer, event);
1549
1550         ftrace_trace_stack(buffer, flags, 6, pc);
1551         ftrace_trace_userstack(buffer, flags, pc);
1552 }
1553
1554 void trace_buffer_unlock_commit(struct ring_buffer *buffer,
1555                                 struct ring_buffer_event *event,
1556                                 unsigned long flags, int pc)
1557 {
1558         __trace_buffer_unlock_commit(buffer, event, flags, pc);
1559 }
1560 EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);
1561
1562 struct ring_buffer_event *
1563 trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
1564                           struct ftrace_event_file *ftrace_file,
1565                           int type, unsigned long len,
1566                           unsigned long flags, int pc)
1567 {
1568         *current_rb = ftrace_file->tr->trace_buffer.buffer;
1569         return trace_buffer_lock_reserve(*current_rb,
1570                                          type, len, flags, pc);
1571 }
1572 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
1573
1574 struct ring_buffer_event *
1575 trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
1576                                   int type, unsigned long len,
1577                                   unsigned long flags, int pc)
1578 {
1579         *current_rb = global_trace.trace_buffer.buffer;
1580         return trace_buffer_lock_reserve(*current_rb,
1581                                          type, len, flags, pc);
1582 }
1583 EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
1584
1585 void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
1586                                         struct ring_buffer_event *event,
1587                                         unsigned long flags, int pc)
1588 {
1589         __trace_buffer_unlock_commit(buffer, event, flags, pc);
1590 }
1591 EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit);
1592
1593 void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer,
1594                                      struct ring_buffer_event *event,
1595                                      unsigned long flags, int pc,
1596                                      struct pt_regs *regs)
1597 {
1598         __buffer_unlock_commit(buffer, event);
1599
1600         ftrace_trace_stack_regs(buffer, flags, 0, pc, regs);
1601         ftrace_trace_userstack(buffer, flags, pc);
1602 }
1603 EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs);
1604
1605 void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
1606                                          struct ring_buffer_event *event)
1607 {
1608         ring_buffer_discard_commit(buffer, event);
1609 }
1610 EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit);
1611
1612 void
1613 trace_function(struct trace_array *tr,
1614                unsigned long ip, unsigned long parent_ip, unsigned long flags,
1615                int pc)
1616 {
1617         struct ftrace_event_call *call = &event_function;
1618         struct ring_buffer *buffer = tr->trace_buffer.buffer;
1619         struct ring_buffer_event *event;
1620         struct ftrace_entry *entry;
1621
1622         /* If we are reading the ring buffer, don't trace */
1623         if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
1624                 return;
1625
1626         event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
1627                                           flags, pc);
1628         if (!event)
1629                 return;
1630         entry   = ring_buffer_event_data(event);
1631         entry->ip                       = ip;
1632         entry->parent_ip                = parent_ip;
1633
1634         if (!filter_check_discard(call, entry, buffer, event))
1635                 __buffer_unlock_commit(buffer, event);
1636 }
1637
1638 #ifdef CONFIG_STACKTRACE
1639
1640 #define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
1641 struct ftrace_stack {
1642         unsigned long           calls[FTRACE_STACK_MAX_ENTRIES];
1643 };
1644
1645 static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
1646 static DEFINE_PER_CPU(int, ftrace_stack_reserve);
1647
1648 static void __ftrace_trace_stack(struct ring_buffer *buffer,
1649                                  unsigned long flags,
1650                                  int skip, int pc, struct pt_regs *regs)
1651 {
1652         struct ftrace_event_call *call = &event_kernel_stack;
1653         struct ring_buffer_event *event;
1654         struct stack_entry *entry;
1655         struct stack_trace trace;
1656         int use_stack;
1657         int size = FTRACE_STACK_ENTRIES;
1658
1659         trace.nr_entries        = 0;
1660         trace.skip              = skip;
1661
1662         /*
1663          * Since events can happen in NMIs there's no safe way to
1664          * use the per cpu ftrace_stacks. We reserve it and if an interrupt
1665          * or NMI comes in, it will just have to use the default
1666          * FTRACE_STACK_SIZE.
1667          */
1668         preempt_disable_notrace();
1669
1670         use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
1671         /*
1672          * We don't need any atomic variables, just a barrier.
1673          * If an interrupt comes in, we don't care, because it would
1674          * have exited and put the counter back to what we want.
1675          * We just need a barrier to keep gcc from moving things
1676          * around.
1677          */
1678         barrier();
1679         if (use_stack == 1) {
1680                 trace.entries           = &__get_cpu_var(ftrace_stack).calls[0];
1681                 trace.max_entries       = FTRACE_STACK_MAX_ENTRIES;
1682
1683                 if (regs)
1684                         save_stack_trace_regs(regs, &trace);
1685                 else
1686                         save_stack_trace(&trace);
1687
1688                 if (trace.nr_entries > size)
1689                         size = trace.nr_entries;
1690         } else
1691                 /* From now on, use_stack is a boolean */
1692                 use_stack = 0;
1693
1694         size *= sizeof(unsigned long);
1695
1696         event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
1697                                           sizeof(*entry) + size, flags, pc);
1698         if (!event)
1699                 goto out;
1700         entry = ring_buffer_event_data(event);
1701
1702         memset(&entry->caller, 0, size);
1703
1704         if (use_stack)
1705                 memcpy(&entry->caller, trace.entries,
1706                        trace.nr_entries * sizeof(unsigned long));
1707         else {
1708                 trace.max_entries       = FTRACE_STACK_ENTRIES;
1709                 trace.entries           = entry->caller;
1710                 if (regs)
1711                         save_stack_trace_regs(regs, &trace);
1712                 else
1713                         save_stack_trace(&trace);
1714         }
1715
1716         entry->size = trace.nr_entries;
1717
1718         if (!filter_check_discard(call, entry, buffer, event))
1719                 __buffer_unlock_commit(buffer, event);
1720
1721  out:
1722         /* Again, don't let gcc optimize things here */
1723         barrier();
1724         __this_cpu_dec(ftrace_stack_reserve);
1725         preempt_enable_notrace();
1726
1727 }
1728
1729 void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags,
1730                              int skip, int pc, struct pt_regs *regs)
1731 {
1732         if (!(trace_flags & TRACE_ITER_STACKTRACE))
1733                 return;
1734
1735         __ftrace_trace_stack(buffer, flags, skip, pc, regs);
1736 }
1737
1738 void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
1739                         int skip, int pc)
1740 {
1741         if (!(trace_flags & TRACE_ITER_STACKTRACE))
1742                 return;
1743
1744         __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
1745 }
1746
1747 void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
1748                    int pc)
1749 {
1750         __ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL);
1751 }
1752
1753 /**
1754  * trace_dump_stack - record a stack back trace in the trace buffer
1755  * @skip: Number of functions to skip (helper handlers)
1756  */
1757 void trace_dump_stack(int skip)
1758 {
1759         unsigned long flags;
1760
1761         if (tracing_disabled || tracing_selftest_running)
1762                 return;
1763
1764         local_save_flags(flags);
1765
1766         /*
1767          * Skip 3 more, seems to get us at the caller of
1768          * this function.
1769          */
1770         skip += 3;
1771         __ftrace_trace_stack(global_trace.trace_buffer.buffer,
1772                              flags, skip, preempt_count(), NULL);
1773 }
1774
1775 static DEFINE_PER_CPU(int, user_stack_count);
1776
1777 void
1778 ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
1779 {
1780         struct ftrace_event_call *call = &event_user_stack;
1781         struct ring_buffer_event *event;
1782         struct userstack_entry *entry;
1783         struct stack_trace trace;
1784
1785         if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
1786                 return;
1787
1788         /*
1789          * NMIs can not handle page faults, even with fix ups.
1790          * The save user stack can (and often does) fault.
1791          */
1792         if (unlikely(in_nmi()))
1793                 return;
1794
1795         /*
1796          * prevent recursion, since the user stack tracing may
1797          * trigger other kernel events.
1798          */
1799         preempt_disable();
1800         if (__this_cpu_read(user_stack_count))
1801                 goto out;
1802
1803         __this_cpu_inc(user_stack_count);
1804
1805         event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
1806                                           sizeof(*entry), flags, pc);
1807         if (!event)
1808                 goto out_drop_count;
1809         entry   = ring_buffer_event_data(event);
1810
1811         entry->tgid             = current->tgid;
1812         memset(&entry->caller, 0, sizeof(entry->caller));
1813
1814         trace.nr_entries        = 0;
1815         trace.max_entries       = FTRACE_STACK_ENTRIES;
1816         trace.skip              = 0;
1817         trace.entries           = entry->caller;
1818
1819         save_stack_trace_user(&trace);
1820         if (!filter_check_discard(call, entry, buffer, event))
1821                 __buffer_unlock_commit(buffer, event);
1822
1823  out_drop_count:
1824         __this_cpu_dec(user_stack_count);
1825  out:
1826         preempt_enable();
1827 }
1828
1829 #ifdef UNUSED
1830 static void __trace_userstack(struct trace_array *tr, unsigned long flags)
1831 {
1832         ftrace_trace_userstack(tr, flags, preempt_count());
1833 }
1834 #endif /* UNUSED */
1835
1836 #endif /* CONFIG_STACKTRACE */
1837
1838 /* created for use with alloc_percpu */
1839 struct trace_buffer_struct {
1840         char buffer[TRACE_BUF_SIZE];
1841 };
1842
1843 static struct trace_buffer_struct *trace_percpu_buffer;
1844 static struct trace_buffer_struct *trace_percpu_sirq_buffer;
1845 static struct trace_buffer_struct *trace_percpu_irq_buffer;
1846 static struct trace_buffer_struct *trace_percpu_nmi_buffer;
1847
1848 /*
1849  * The buffer used is dependent on the context. There is a per cpu
1850  * buffer for normal context, softirq contex, hard irq context and
1851  * for NMI context. Thise allows for lockless recording.
1852  *
1853  * Note, if the buffers failed to be allocated, then this returns NULL
1854  */
1855 static char *get_trace_buf(void)
1856 {
1857         struct trace_buffer_struct *percpu_buffer;
1858
1859         /*
1860          * If we have allocated per cpu buffers, then we do not
1861          * need to do any locking.
1862          */
1863         if (in_nmi())
1864                 percpu_buffer = trace_percpu_nmi_buffer;
1865         else if (in_irq())
1866                 percpu_buffer = trace_percpu_irq_buffer;
1867         else if (in_softirq())
1868                 percpu_buffer = trace_percpu_sirq_buffer;
1869         else
1870                 percpu_buffer = trace_percpu_buffer;
1871
1872         if (!percpu_buffer)
1873                 return NULL;
1874
1875         return this_cpu_ptr(&percpu_buffer->buffer[0]);
1876 }
1877
1878 static int alloc_percpu_trace_buffer(void)
1879 {
1880         struct trace_buffer_struct *buffers;
1881         struct trace_buffer_struct *sirq_buffers;
1882         struct trace_buffer_struct *irq_buffers;
1883         struct trace_buffer_struct *nmi_buffers;
1884
1885         buffers = alloc_percpu(struct trace_buffer_struct);
1886         if (!buffers)
1887                 goto err_warn;
1888
1889         sirq_buffers = alloc_percpu(struct trace_buffer_struct);
1890         if (!sirq_buffers)
1891                 goto err_sirq;
1892
1893         irq_buffers = alloc_percpu(struct trace_buffer_struct);
1894         if (!irq_buffers)
1895                 goto err_irq;
1896
1897         nmi_buffers = alloc_percpu(struct trace_buffer_struct);
1898         if (!nmi_buffers)
1899                 goto err_nmi;
1900
1901         trace_percpu_buffer = buffers;
1902         trace_percpu_sirq_buffer = sirq_buffers;
1903         trace_percpu_irq_buffer = irq_buffers;
1904         trace_percpu_nmi_buffer = nmi_buffers;
1905
1906         return 0;
1907
1908  err_nmi:
1909         free_percpu(irq_buffers);
1910  err_irq:
1911         free_percpu(sirq_buffers);
1912  err_sirq:
1913         free_percpu(buffers);
1914  err_warn:
1915         WARN(1, "Could not allocate percpu trace_printk buffer");
1916         return -ENOMEM;
1917 }
1918
1919 static int buffers_allocated;
1920
1921 void trace_printk_init_buffers(void)
1922 {
1923         if (buffers_allocated)
1924                 return;
1925
1926         if (alloc_percpu_trace_buffer())
1927                 return;
1928
1929         pr_info("ftrace: Allocated trace_printk buffers\n");
1930
1931         /* Expand the buffers to set size */
1932         tracing_update_buffers();
1933
1934         buffers_allocated = 1;
1935
1936         /*
1937          * trace_printk_init_buffers() can be called by modules.
1938          * If that happens, then we need to start cmdline recording
1939          * directly here. If the global_trace.buffer is already
1940          * allocated here, then this was called by module code.
1941          */
1942         if (global_trace.trace_buffer.buffer)
1943                 tracing_start_cmdline_record();
1944 }
1945
1946 void trace_printk_start_comm(void)
1947 {
1948         /* Start tracing comms if trace printk is set */
1949         if (!buffers_allocated)
1950                 return;
1951         tracing_start_cmdline_record();
1952 }
1953
1954 static void trace_printk_start_stop_comm(int enabled)
1955 {
1956         if (!buffers_allocated)
1957                 return;
1958
1959         if (enabled)
1960                 tracing_start_cmdline_record();
1961         else
1962                 tracing_stop_cmdline_record();
1963 }
1964
1965 /**
1966  * trace_vbprintk - write binary msg to tracing buffer
1967  *
1968  */
1969 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
1970 {
1971         struct ftrace_event_call *call = &event_bprint;
1972         struct ring_buffer_event *event;
1973         struct ring_buffer *buffer;
1974         struct trace_array *tr = &global_trace;
1975         struct bprint_entry *entry;
1976         unsigned long flags;
1977         char *tbuffer;
1978         int len = 0, size, pc;
1979
1980         if (unlikely(tracing_selftest_running || tracing_disabled))
1981                 return 0;
1982
1983         /* Don't pollute graph traces with trace_vprintk internals */
1984         pause_graph_tracing();
1985
1986         pc = preempt_count();
1987         preempt_disable_notrace();
1988
1989         tbuffer = get_trace_buf();
1990         if (!tbuffer) {
1991                 len = 0;
1992                 goto out;
1993         }
1994
1995         len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
1996
1997         if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
1998                 goto out;
1999
2000         local_save_flags(flags);
2001         size = sizeof(*entry) + sizeof(u32) * len;
2002         buffer = tr->trace_buffer.buffer;
2003         event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2004                                           flags, pc);
2005         if (!event)
2006                 goto out;
2007         entry = ring_buffer_event_data(event);
2008         entry->ip                       = ip;
2009         entry->fmt                      = fmt;
2010
2011         memcpy(entry->buf, tbuffer, sizeof(u32) * len);
2012         if (!filter_check_discard(call, entry, buffer, event)) {
2013                 __buffer_unlock_commit(buffer, event);
2014                 ftrace_trace_stack(buffer, flags, 6, pc);
2015         }
2016
2017 out:
2018         preempt_enable_notrace();
2019         unpause_graph_tracing();
2020
2021         return len;
2022 }
2023 EXPORT_SYMBOL_GPL(trace_vbprintk);
2024
2025 static int
2026 __trace_array_vprintk(struct ring_buffer *buffer,
2027                       unsigned long ip, const char *fmt, va_list args)
2028 {
2029         struct ftrace_event_call *call = &event_print;
2030         struct ring_buffer_event *event;
2031         int len = 0, size, pc;
2032         struct print_entry *entry;
2033         unsigned long flags;
2034         char *tbuffer;
2035
2036         if (tracing_disabled || tracing_selftest_running)
2037                 return 0;
2038
2039         /* Don't pollute graph traces with trace_vprintk internals */
2040         pause_graph_tracing();
2041
2042         pc = preempt_count();
2043         preempt_disable_notrace();
2044
2045
2046         tbuffer = get_trace_buf();
2047         if (!tbuffer) {
2048                 len = 0;
2049                 goto out;
2050         }
2051
2052         len = vsnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
2053         if (len > TRACE_BUF_SIZE)
2054                 goto out;
2055
2056         local_save_flags(flags);
2057         size = sizeof(*entry) + len + 1;
2058         event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
2059                                           flags, pc);
2060         if (!event)
2061                 goto out;
2062         entry = ring_buffer_event_data(event);
2063         entry->ip = ip;
2064
2065         memcpy(&entry->buf, tbuffer, len);
2066         entry->buf[len] = '\0';
2067         if (!filter_check_discard(call, entry, buffer, event)) {
2068                 __buffer_unlock_commit(buffer, event);
2069                 ftrace_trace_stack(buffer, flags, 6, pc);
2070         }
2071  out:
2072         preempt_enable_notrace();
2073         unpause_graph_tracing();
2074
2075         return len;
2076 }
2077
2078 int trace_array_vprintk(struct trace_array *tr,
2079                         unsigned long ip, const char *fmt, va_list args)
2080 {
2081         return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
2082 }
2083
2084 int trace_array_printk(struct trace_array *tr,
2085                        unsigned long ip, const char *fmt, ...)
2086 {
2087         int ret;
2088         va_list ap;
2089
2090         if (!(trace_flags & TRACE_ITER_PRINTK))
2091                 return 0;
2092
2093         va_start(ap, fmt);
2094         ret = trace_array_vprintk(tr, ip, fmt, ap);
2095         va_end(ap);
2096         return ret;
2097 }
2098
2099 int trace_array_printk_buf(struct ring_buffer *buffer,
2100                            unsigned long ip, const char *fmt, ...)
2101 {
2102         int ret;
2103         va_list ap;
2104
2105         if (!(trace_flags & TRACE_ITER_PRINTK))
2106                 return 0;
2107
2108         va_start(ap, fmt);
2109         ret = __trace_array_vprintk(buffer, ip, fmt, ap);
2110         va_end(ap);
2111         return ret;
2112 }
2113
2114 int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2115 {
2116         return trace_array_vprintk(&global_trace, ip, fmt, args);
2117 }
2118 EXPORT_SYMBOL_GPL(trace_vprintk);
2119
2120 static void trace_iterator_increment(struct trace_iterator *iter)
2121 {
2122         struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
2123
2124         iter->idx++;
2125         if (buf_iter)
2126                 ring_buffer_read(buf_iter, NULL);
2127 }
2128
2129 static struct trace_entry *
2130 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
2131                 unsigned long *lost_events)
2132 {
2133         struct ring_buffer_event *event;
2134         struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
2135
2136         if (buf_iter)
2137                 event = ring_buffer_iter_peek(buf_iter, ts);
2138         else
2139                 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
2140                                          lost_events);
2141
2142         if (event) {
2143                 iter->ent_size = ring_buffer_event_length(event);
2144                 return ring_buffer_event_data(event);
2145         }
2146         iter->ent_size = 0;
2147         return NULL;
2148 }
2149
2150 static struct trace_entry *
2151 __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
2152                   unsigned long *missing_events, u64 *ent_ts)
2153 {
2154         struct ring_buffer *buffer = iter->trace_buffer->buffer;
2155         struct trace_entry *ent, *next = NULL;
2156         unsigned long lost_events = 0, next_lost = 0;
2157         int cpu_file = iter->cpu_file;
2158         u64 next_ts = 0, ts;
2159         int next_cpu = -1;
2160         int next_size = 0;
2161         int cpu;
2162
2163         /*
2164          * If we are in a per_cpu trace file, don't bother by iterating over
2165          * all cpu and peek directly.
2166          */
2167         if (cpu_file > RING_BUFFER_ALL_CPUS) {
2168                 if (ring_buffer_empty_cpu(buffer, cpu_file))
2169                         return NULL;
2170                 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
2171                 if (ent_cpu)
2172                         *ent_cpu = cpu_file;
2173
2174                 return ent;
2175         }
2176
2177         for_each_tracing_cpu(cpu) {
2178
2179                 if (ring_buffer_empty_cpu(buffer, cpu))
2180                         continue;
2181
2182                 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
2183
2184                 /*
2185                  * Pick the entry with the smallest timestamp:
2186                  */
2187                 if (ent && (!next || ts < next_ts)) {
2188                         next = ent;
2189                         next_cpu = cpu;
2190                         next_ts = ts;
2191                         next_lost = lost_events;
2192                         next_size = iter->ent_size;
2193                 }
2194         }
2195
2196         iter->ent_size = next_size;
2197
2198         if (ent_cpu)
2199                 *ent_cpu = next_cpu;
2200
2201         if (ent_ts)
2202                 *ent_ts = next_ts;
2203
2204         if (missing_events)
2205                 *missing_events = next_lost;
2206
2207         return next;
2208 }
2209
2210 /* Find the next real entry, without updating the iterator itself */
2211 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
2212                                           int *ent_cpu, u64 *ent_ts)
2213 {
2214         return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
2215 }
2216
2217 /* Find the next real entry, and increment the iterator to the next entry */
2218 void *trace_find_next_entry_inc(struct trace_iterator *iter)
2219 {
2220         iter->ent = __find_next_entry(iter, &iter->cpu,
2221                                       &iter->lost_events, &iter->ts);
2222
2223         if (iter->ent)
2224                 trace_iterator_increment(iter);
2225
2226         return iter->ent ? iter : NULL;
2227 }
2228
2229 static void trace_consume(struct trace_iterator *iter)
2230 {
2231         ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
2232                             &iter->lost_events);
2233 }
2234
2235 static void *s_next(struct seq_file *m, void *v, loff_t *pos)
2236 {
2237         struct trace_iterator *iter = m->private;
2238         int i = (int)*pos;
2239         void *ent;
2240
2241         WARN_ON_ONCE(iter->leftover);
2242
2243         (*pos)++;
2244
2245         /* can't go backwards */
2246         if (iter->idx > i)
2247                 return NULL;
2248
2249         if (iter->idx < 0)
2250                 ent = trace_find_next_entry_inc(iter);
2251         else
2252                 ent = iter;
2253
2254         while (ent && iter->idx < i)
2255                 ent = trace_find_next_entry_inc(iter);
2256
2257         iter->pos = *pos;
2258
2259         return ent;
2260 }
2261
2262 void tracing_iter_reset(struct trace_iterator *iter, int cpu)
2263 {
2264         struct ring_buffer_event *event;
2265         struct ring_buffer_iter *buf_iter;
2266         unsigned long entries = 0;
2267         u64 ts;
2268
2269         per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
2270
2271         buf_iter = trace_buffer_iter(iter, cpu);
2272         if (!buf_iter)
2273                 return;
2274
2275         ring_buffer_iter_reset(buf_iter);
2276
2277         /*
2278          * We could have the case with the max latency tracers
2279          * that a reset never took place on a cpu. This is evident
2280          * by the timestamp being before the start of the buffer.
2281          */
2282         while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
2283                 if (ts >= iter->trace_buffer->time_start)
2284                         break;
2285                 entries++;
2286                 ring_buffer_read(buf_iter, NULL);
2287         }
2288
2289         per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
2290 }
2291
2292 /*
2293  * The current tracer is copied to avoid a global locking
2294  * all around.
2295  */
2296 static void *s_start(struct seq_file *m, loff_t *pos)
2297 {
2298         struct trace_iterator *iter = m->private;
2299         struct trace_array *tr = iter->tr;
2300         int cpu_file = iter->cpu_file;
2301         void *p = NULL;
2302         loff_t l = 0;
2303         int cpu;
2304
2305         /*
2306          * copy the tracer to avoid using a global lock all around.
2307          * iter->trace is a copy of current_trace, the pointer to the
2308          * name may be used instead of a strcmp(), as iter->trace->name
2309          * will point to the same string as current_trace->name.
2310          */
2311         mutex_lock(&trace_types_lock);
2312         if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
2313                 *iter->trace = *tr->current_trace;
2314         mutex_unlock(&trace_types_lock);
2315
2316 #ifdef CONFIG_TRACER_MAX_TRACE
2317         if (iter->snapshot && iter->trace->use_max_tr)
2318                 return ERR_PTR(-EBUSY);
2319 #endif
2320
2321         if (!iter->snapshot)
2322                 atomic_inc(&trace_record_cmdline_disabled);
2323
2324         if (*pos != iter->pos) {
2325                 iter->ent = NULL;
2326                 iter->cpu = 0;
2327                 iter->idx = -1;
2328
2329                 if (cpu_file == RING_BUFFER_ALL_CPUS) {
2330                         for_each_tracing_cpu(cpu)
2331                                 tracing_iter_reset(iter, cpu);
2332                 } else
2333                         tracing_iter_reset(iter, cpu_file);
2334
2335                 iter->leftover = 0;
2336                 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
2337                         ;
2338
2339         } else {
2340                 /*
2341                  * If we overflowed the seq_file before, then we want
2342                  * to just reuse the trace_seq buffer again.
2343                  */
2344                 if (iter->leftover)
2345                         p = iter;
2346                 else {
2347                         l = *pos - 1;
2348                         p = s_next(m, p, &l);
2349                 }
2350         }
2351
2352         trace_event_read_lock();
2353         trace_access_lock(cpu_file);
2354         return p;
2355 }
2356
2357 static void s_stop(struct seq_file *m, void *p)
2358 {
2359         struct trace_iterator *iter = m->private;
2360
2361 #ifdef CONFIG_TRACER_MAX_TRACE
2362         if (iter->snapshot && iter->trace->use_max_tr)
2363                 return;
2364 #endif
2365
2366         if (!iter->snapshot)
2367                 atomic_dec(&trace_record_cmdline_disabled);
2368
2369         trace_access_unlock(iter->cpu_file);
2370         trace_event_read_unlock();
2371 }
2372
2373 static void
2374 get_total_entries(struct trace_buffer *buf,
2375                   unsigned long *total, unsigned long *entries)
2376 {
2377         unsigned long count;
2378         int cpu;
2379
2380         *total = 0;
2381         *entries = 0;
2382
2383         for_each_tracing_cpu(cpu) {
2384                 count = ring_buffer_entries_cpu(buf->buffer, cpu);
2385                 /*
2386                  * If this buffer has skipped entries, then we hold all
2387                  * entries for the trace and we need to ignore the
2388                  * ones before the time stamp.
2389                  */
2390                 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
2391                         count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
2392                         /* total is the same as the entries */
2393                         *total += count;
2394                 } else
2395                         *total += count +
2396                                 ring_buffer_overrun_cpu(buf->buffer, cpu);
2397                 *entries += count;
2398         }
2399 }
2400
2401 static void print_lat_help_header(struct seq_file *m)
2402 {
2403         seq_puts(m, "#                  _------=> CPU#            \n");
2404         seq_puts(m, "#                 / _-----=> irqs-off        \n");
2405         seq_puts(m, "#                | / _----=> need-resched    \n");
2406         seq_puts(m, "#                || / _---=> hardirq/softirq \n");
2407         seq_puts(m, "#                ||| / _--=> preempt-depth   \n");
2408         seq_puts(m, "#                |||| /     delay             \n");
2409         seq_puts(m, "#  cmd     pid   ||||| time  |   caller      \n");
2410         seq_puts(m, "#     \\   /      |||||  \\    |   /           \n");
2411 }
2412
2413 static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
2414 {
2415         unsigned long total;
2416         unsigned long entries;
2417
2418         get_total_entries(buf, &total, &entries);
2419         seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu   #P:%d\n",
2420                    entries, total, num_online_cpus());
2421         seq_puts(m, "#\n");
2422 }
2423
2424 static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
2425 {
2426         print_event_info(buf, m);
2427         seq_puts(m, "#           TASK-PID   CPU#      TIMESTAMP  FUNCTION\n");
2428         seq_puts(m, "#              | |       |          |         |\n");
2429 }
2430
2431 static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
2432 {
2433         print_event_info(buf, m);
2434         seq_puts(m, "#                              _-----=> irqs-off\n");
2435         seq_puts(m, "#                             / _----=> need-resched\n");
2436         seq_puts(m, "#                            | / _---=> hardirq/softirq\n");
2437         seq_puts(m, "#                            || / _--=> preempt-depth\n");
2438         seq_puts(m, "#                            ||| /     delay\n");
2439         seq_puts(m, "#           TASK-PID   CPU#  ||||    TIMESTAMP  FUNCTION\n");
2440         seq_puts(m, "#              | |       |   ||||       |         |\n");
2441 }
2442
2443 void
2444 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
2445 {
2446         unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
2447         struct trace_buffer *buf = iter->trace_buffer;
2448         struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
2449         struct tracer *type = iter->trace;
2450         unsigned long entries;
2451         unsigned long total;
2452         const char *name = "preemption";
2453
2454         name = type->name;
2455
2456         get_total_entries(buf, &total, &entries);
2457
2458         seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
2459                    name, UTS_RELEASE);
2460         seq_puts(m, "# -----------------------------------"
2461                  "---------------------------------\n");
2462         seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
2463                    " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
2464                    nsecs_to_usecs(data->saved_latency),
2465                    entries,
2466                    total,
2467                    buf->cpu,
2468 #if defined(CONFIG_PREEMPT_NONE)
2469                    "server",
2470 #elif defined(CONFIG_PREEMPT_VOLUNTARY)
2471                    "desktop",
2472 #elif defined(CONFIG_PREEMPT)
2473                    "preempt",
2474 #else
2475                    "unknown",
2476 #endif
2477                    /* These are reserved for later use */
2478                    0, 0, 0, 0);
2479 #ifdef CONFIG_SMP
2480         seq_printf(m, " #P:%d)\n", num_online_cpus());
2481 #else
2482         seq_puts(m, ")\n");
2483 #endif
2484         seq_puts(m, "#    -----------------\n");
2485         seq_printf(m, "#    | task: %.16s-%d "
2486                    "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
2487                    data->comm, data->pid,
2488                    from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
2489                    data->policy, data->rt_priority);
2490         seq_puts(m, "#    -----------------\n");
2491
2492         if (data->critical_start) {
2493                 seq_puts(m, "#  => started at: ");
2494                 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
2495                 trace_print_seq(m, &iter->seq);
2496                 seq_puts(m, "\n#  => ended at:   ");
2497                 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
2498                 trace_print_seq(m, &iter->seq);
2499                 seq_puts(m, "\n#\n");
2500         }
2501
2502         seq_puts(m, "#\n");
2503 }
2504
2505 static void test_cpu_buff_start(struct trace_iterator *iter)
2506 {
2507         struct trace_seq *s = &iter->seq;
2508
2509         if (!(trace_flags & TRACE_ITER_ANNOTATE))
2510                 return;
2511
2512         if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
2513                 return;
2514
2515         if (cpumask_test_cpu(iter->cpu, iter->started))
2516                 return;
2517
2518         if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
2519                 return;
2520
2521         cpumask_set_cpu(iter->cpu, iter->started);
2522
2523         /* Don't print started cpu buffer for the first entry of the trace */
2524         if (iter->idx > 1)
2525                 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
2526                                 iter->cpu);
2527 }
2528
2529 static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
2530 {
2531         struct trace_seq *s = &iter->seq;
2532         unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
2533         struct trace_entry *entry;
2534         struct trace_event *event;
2535
2536         entry = iter->ent;
2537
2538         test_cpu_buff_start(iter);
2539
2540         event = ftrace_find_event(entry->type);
2541
2542         if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2543                 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2544                         if (!trace_print_lat_context(iter))
2545                                 goto partial;
2546                 } else {
2547                         if (!trace_print_context(iter))
2548                                 goto partial;
2549                 }
2550         }
2551
2552         if (event)
2553                 return event->funcs->trace(iter, sym_flags, event);
2554
2555         if (!trace_seq_printf(s, "Unknown type %d\n", entry->type))
2556                 goto partial;
2557
2558         return TRACE_TYPE_HANDLED;
2559 partial:
2560         return TRACE_TYPE_PARTIAL_LINE;
2561 }
2562
2563 static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
2564 {
2565         struct trace_seq *s = &iter->seq;
2566         struct trace_entry *entry;
2567         struct trace_event *event;
2568
2569         entry = iter->ent;
2570
2571         if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2572                 if (!trace_seq_printf(s, "%d %d %llu ",
2573                                       entry->pid, iter->cpu, iter->ts))
2574                         goto partial;
2575         }
2576
2577         event = ftrace_find_event(entry->type);
2578         if (event)
2579                 return event->funcs->raw(iter, 0, event);
2580
2581         if (!trace_seq_printf(s, "%d ?\n", entry->type))
2582                 goto partial;
2583
2584         return TRACE_TYPE_HANDLED;
2585 partial:
2586         return TRACE_TYPE_PARTIAL_LINE;
2587 }
2588
2589 static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
2590 {
2591         struct trace_seq *s = &iter->seq;
2592         unsigned char newline = '\n';
2593         struct trace_entry *entry;
2594         struct trace_event *event;
2595
2596         entry = iter->ent;
2597
2598         if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2599                 SEQ_PUT_HEX_FIELD_RET(s, entry->pid);
2600                 SEQ_PUT_HEX_FIELD_RET(s, iter->cpu);
2601                 SEQ_PUT_HEX_FIELD_RET(s, iter->ts);
2602         }
2603
2604         event = ftrace_find_event(entry->type);
2605         if (event) {
2606                 enum print_line_t ret = event->funcs->hex(iter, 0, event);
2607                 if (ret != TRACE_TYPE_HANDLED)
2608                         return ret;
2609         }
2610
2611         SEQ_PUT_FIELD_RET(s, newline);
2612
2613         return TRACE_TYPE_HANDLED;
2614 }
2615
2616 static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
2617 {
2618         struct trace_seq *s = &iter->seq;
2619         struct trace_entry *entry;
2620         struct trace_event *event;
2621
2622         entry = iter->ent;
2623
2624         if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2625                 SEQ_PUT_FIELD_RET(s, entry->pid);
2626                 SEQ_PUT_FIELD_RET(s, iter->cpu);
2627                 SEQ_PUT_FIELD_RET(s, iter->ts);
2628         }
2629
2630         event = ftrace_find_event(entry->type);
2631         return event ? event->funcs->binary(iter, 0, event) :
2632                 TRACE_TYPE_HANDLED;
2633 }
2634
2635 int trace_empty(struct trace_iterator *iter)
2636 {
2637         struct ring_buffer_iter *buf_iter;
2638         int cpu;
2639
2640         /* If we are looking at one CPU buffer, only check that one */
2641         if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
2642                 cpu = iter->cpu_file;
2643                 buf_iter = trace_buffer_iter(iter, cpu);
2644                 if (buf_iter) {
2645                         if (!ring_buffer_iter_empty(buf_iter))
2646                                 return 0;
2647                 } else {
2648                         if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
2649                                 return 0;
2650                 }
2651                 return 1;
2652         }
2653
2654         for_each_tracing_cpu(cpu) {
2655                 buf_iter = trace_buffer_iter(iter, cpu);
2656                 if (buf_iter) {
2657                         if (!ring_buffer_iter_empty(buf_iter))
2658                                 return 0;
2659                 } else {
2660                         if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
2661                                 return 0;
2662                 }
2663         }
2664
2665         return 1;
2666 }
2667
2668 /*  Called with trace_event_read_lock() held. */
2669 enum print_line_t print_trace_line(struct trace_iterator *iter)
2670 {
2671         enum print_line_t ret;
2672
2673         if (iter->lost_events &&
2674             !trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
2675                                  iter->cpu, iter->lost_events))
2676                 return TRACE_TYPE_PARTIAL_LINE;
2677
2678         if (iter->trace && iter->trace->print_line) {
2679                 ret = iter->trace->print_line(iter);
2680                 if (ret != TRACE_TYPE_UNHANDLED)
2681                         return ret;
2682         }
2683
2684         if (iter->ent->type == TRACE_BPUTS &&
2685                         trace_flags & TRACE_ITER_PRINTK &&
2686                         trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2687                 return trace_print_bputs_msg_only(iter);
2688
2689         if (iter->ent->type == TRACE_BPRINT &&
2690                         trace_flags & TRACE_ITER_PRINTK &&
2691                         trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2692                 return trace_print_bprintk_msg_only(iter);
2693
2694         if (iter->ent->type == TRACE_PRINT &&
2695                         trace_flags & TRACE_ITER_PRINTK &&
2696                         trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2697                 return trace_print_printk_msg_only(iter);
2698
2699         if (trace_flags & TRACE_ITER_BIN)
2700                 return print_bin_fmt(iter);
2701
2702         if (trace_flags & TRACE_ITER_HEX)
2703                 return print_hex_fmt(iter);
2704
2705         if (trace_flags & TRACE_ITER_RAW)
2706                 return print_raw_fmt(iter);
2707
2708         return print_trace_fmt(iter);
2709 }
2710
2711 void trace_latency_header(struct seq_file *m)
2712 {
2713         struct trace_iterator *iter = m->private;
2714
2715         /* print nothing if the buffers are empty */
2716         if (trace_empty(iter))
2717                 return;
2718
2719         if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2720                 print_trace_header(m, iter);
2721
2722         if (!(trace_flags & TRACE_ITER_VERBOSE))
2723                 print_lat_help_header(m);
2724 }
2725
2726 void trace_default_header(struct seq_file *m)
2727 {
2728         struct trace_iterator *iter = m->private;
2729
2730         if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
2731                 return;
2732
2733         if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2734                 /* print nothing if the buffers are empty */
2735                 if (trace_empty(iter))
2736                         return;
2737                 print_trace_header(m, iter);
2738                 if (!(trace_flags & TRACE_ITER_VERBOSE))
2739                         print_lat_help_header(m);
2740         } else {
2741                 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
2742                         if (trace_flags & TRACE_ITER_IRQ_INFO)
2743                                 print_func_help_header_irq(iter->trace_buffer, m);
2744                         else
2745                                 print_func_help_header(iter->trace_buffer, m);
2746                 }
2747         }
2748 }
2749
2750 static void test_ftrace_alive(struct seq_file *m)
2751 {
2752         if (!ftrace_is_dead())
2753                 return;
2754         seq_printf(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n");
2755         seq_printf(m, "#          MAY BE MISSING FUNCTION EVENTS\n");
2756 }
2757
2758 #ifdef CONFIG_TRACER_MAX_TRACE
2759 static void show_snapshot_main_help(struct seq_file *m)
2760 {
2761         seq_printf(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n");
2762         seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n");
2763         seq_printf(m, "#                      Takes a snapshot of the main buffer.\n");
2764         seq_printf(m, "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate)\n");
2765         seq_printf(m, "#                      (Doesn't have to be '2' works with any number that\n");
2766         seq_printf(m, "#                       is not a '0' or '1')\n");
2767 }
2768
2769 static void show_snapshot_percpu_help(struct seq_file *m)
2770 {
2771         seq_printf(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
2772 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
2773         seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n");
2774         seq_printf(m, "#                      Takes a snapshot of the main buffer for this cpu.\n");
2775 #else
2776         seq_printf(m, "# echo 1 > snapshot : Not supported with this kernel.\n");
2777         seq_printf(m, "#                     Must use main snapshot file to allocate.\n");
2778 #endif
2779         seq_printf(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n");
2780         seq_printf(m, "#                      (Doesn't have to be '2' works with any number that\n");
2781         seq_printf(m, "#                       is not a '0' or '1')\n");
2782 }
2783
2784 static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
2785 {
2786         if (iter->tr->allocated_snapshot)
2787                 seq_printf(m, "#\n# * Snapshot is allocated *\n#\n");
2788         else
2789                 seq_printf(m, "#\n# * Snapshot is freed *\n#\n");
2790
2791         seq_printf(m, "# Snapshot commands:\n");
2792         if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
2793                 show_snapshot_main_help(m);
2794         else
2795                 show_snapshot_percpu_help(m);
2796 }
2797 #else
2798 /* Should never be called */
2799 static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
2800 #endif
2801
2802 static int s_show(struct seq_file *m, void *v)
2803 {
2804         struct trace_iterator *iter = v;
2805         int ret;
2806
2807         if (iter->ent == NULL) {
2808                 if (iter->tr) {
2809                         seq_printf(m, "# tracer: %s\n", iter->trace->name);
2810                         seq_puts(m, "#\n");
2811                         test_ftrace_alive(m);
2812                 }
2813                 if (iter->snapshot && trace_empty(iter))
2814                         print_snapshot_help(m, iter);
2815                 else if (iter->trace && iter->trace->print_header)
2816                         iter->trace->print_header(m);
2817                 else
2818                         trace_default_header(m);
2819
2820         } else if (iter->leftover) {
2821                 /*
2822                  * If we filled the seq_file buffer earlier, we
2823                  * want to just show it now.
2824                  */
2825                 ret = trace_print_seq(m, &iter->seq);
2826
2827                 /* ret should this time be zero, but you never know */
2828                 iter->leftover = ret;
2829
2830         } else {
2831                 print_trace_line(iter);
2832                 ret = trace_print_seq(m, &iter->seq);
2833                 /*
2834                  * If we overflow the seq_file buffer, then it will
2835                  * ask us for this data again at start up.
2836                  * Use that instead.
2837                  *  ret is 0 if seq_file write succeeded.
2838                  *        -1 otherwise.
2839                  */
2840                 iter->leftover = ret;
2841         }
2842
2843         return 0;
2844 }
2845
2846 static const struct seq_operations tracer_seq_ops = {
2847         .start          = s_start,
2848         .next           = s_next,
2849         .stop           = s_stop,
2850         .show           = s_show,
2851 };
2852
2853 static struct trace_iterator *
2854 __tracing_open(struct trace_array *tr, struct trace_cpu *tc,
2855                struct inode *inode, struct file *file, bool snapshot)
2856 {
2857         struct trace_iterator *iter;
2858         int cpu;
2859
2860         if (tracing_disabled)
2861                 return ERR_PTR(-ENODEV);
2862
2863         iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
2864         if (!iter)
2865                 return ERR_PTR(-ENOMEM);
2866
2867         iter->buffer_iter = kzalloc(sizeof(*iter->buffer_iter) * num_possible_cpus(),
2868                                     GFP_KERNEL);
2869         if (!iter->buffer_iter)
2870                 goto release;
2871
2872         /*
2873          * We make a copy of the current tracer to avoid concurrent
2874          * changes on it while we are reading.
2875          */
2876         mutex_lock(&trace_types_lock);
2877         iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
2878         if (!iter->trace)
2879                 goto fail;
2880
2881         *iter->trace = *tr->current_trace;
2882
2883         if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
2884                 goto fail;
2885
2886         iter->tr = tr;
2887
2888 #ifdef CONFIG_TRACER_MAX_TRACE
2889         /* Currently only the top directory has a snapshot */
2890         if (tr->current_trace->print_max || snapshot)
2891                 iter->trace_buffer = &tr->max_buffer;
2892         else
2893 #endif
2894                 iter->trace_buffer = &tr->trace_buffer;
2895         iter->snapshot = snapshot;
2896         iter->pos = -1;
2897         mutex_init(&iter->mutex);
2898         iter->cpu_file = tc->cpu;
2899
2900         /* Notify the tracer early; before we stop tracing. */
2901         if (iter->trace && iter->trace->open)
2902                 iter->trace->open(iter);
2903
2904         /* Annotate start of buffers if we had overruns */
2905         if (ring_buffer_overruns(iter->trace_buffer->buffer))
2906                 iter->iter_flags |= TRACE_FILE_ANNOTATE;
2907
2908         /* Output in nanoseconds only if we are using a clock in nanoseconds. */
2909         if (trace_clocks[tr->clock_id].in_ns)
2910                 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
2911
2912         /* stop the trace while dumping if we are not opening "snapshot" */
2913         if (!iter->snapshot)
2914                 tracing_stop_tr(tr);
2915
2916         if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
2917                 for_each_tracing_cpu(cpu) {
2918                         iter->buffer_iter[cpu] =
2919                                 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
2920                 }
2921                 ring_buffer_read_prepare_sync();
2922                 for_each_tracing_cpu(cpu) {
2923                         ring_buffer_read_start(iter->buffer_iter[cpu]);
2924                         tracing_iter_reset(iter, cpu);
2925                 }
2926         } else {
2927                 cpu = iter->cpu_file;
2928                 iter->buffer_iter[cpu] =
2929                         ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
2930                 ring_buffer_read_prepare_sync();
2931                 ring_buffer_read_start(iter->buffer_iter[cpu]);
2932                 tracing_iter_reset(iter, cpu);
2933         }
2934
2935         mutex_unlock(&trace_types_lock);
2936
2937         return iter;
2938
2939  fail:
2940         mutex_unlock(&trace_types_lock);
2941         kfree(iter->trace);
2942         kfree(iter->buffer_iter);
2943 release:
2944         seq_release_private(inode, file);
2945         return ERR_PTR(-ENOMEM);
2946 }
2947
2948 int tracing_open_generic(struct inode *inode, struct file *filp)
2949 {
2950         if (tracing_disabled)
2951                 return -ENODEV;
2952
2953         filp->private_data = inode->i_private;
2954         return 0;
2955 }
2956
2957 /*
2958  * Open and update trace_array ref count.
2959  * Must have the current trace_array passed to it.
2960  */
2961 static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
2962 {
2963         struct trace_array *tr = inode->i_private;
2964
2965         if (tracing_disabled)
2966                 return -ENODEV;
2967
2968         if (trace_array_get(tr) < 0)
2969                 return -ENODEV;
2970
2971         filp->private_data = inode->i_private;
2972
2973         return 0;
2974         
2975 }
2976
2977 static int tracing_open_generic_tc(struct inode *inode, struct file *filp)
2978 {
2979         struct trace_cpu *tc = inode->i_private;
2980         struct trace_array *tr = tc->tr;
2981
2982         if (tracing_disabled)
2983                 return -ENODEV;
2984
2985         if (trace_array_get(tr) < 0)
2986                 return -ENODEV;
2987
2988         filp->private_data = inode->i_private;
2989
2990         return 0;
2991         
2992 }
2993
2994 static int tracing_release(struct inode *inode, struct file *file)
2995 {
2996         struct seq_file *m = file->private_data;
2997         struct trace_iterator *iter;
2998         struct trace_array *tr;
2999         int cpu;
3000
3001         /* Writes do not use seq_file, need to grab tr from inode */
3002         if (!(file->f_mode & FMODE_READ)) {
3003                 struct trace_cpu *tc = inode->i_private;
3004
3005                 trace_array_put(tc->tr);
3006                 return 0;
3007         }
3008
3009         iter = m->private;
3010         tr = iter->tr;
3011
3012         mutex_lock(&trace_types_lock);
3013
3014         for_each_tracing_cpu(cpu) {
3015                 if (iter->buffer_iter[cpu])
3016                         ring_buffer_read_finish(iter->buffer_iter[cpu]);
3017         }
3018
3019         if (iter->trace && iter->trace->close)
3020                 iter->trace->close(iter);
3021
3022         if (!iter->snapshot)
3023                 /* reenable tracing if it was previously enabled */
3024                 tracing_start_tr(tr);
3025
3026         __trace_array_put(tr);
3027
3028         mutex_unlock(&trace_types_lock);
3029
3030         mutex_destroy(&iter->mutex);
3031         free_cpumask_var(iter->started);
3032         kfree(iter->trace);
3033         kfree(iter->buffer_iter);
3034         seq_release_private(inode, file);
3035
3036         return 0;
3037 }
3038
3039 static int tracing_release_generic_tr(struct inode *inode, struct file *file)
3040 {
3041         struct trace_array *tr = inode->i_private;
3042
3043         trace_array_put(tr);
3044         return 0;
3045 }
3046
3047 static int tracing_release_generic_tc(struct inode *inode, struct file *file)
3048 {
3049         struct trace_cpu *tc = inode->i_private;
3050         struct trace_array *tr = tc->tr;
3051
3052         trace_array_put(tr);
3053         return 0;
3054 }
3055
3056 static int tracing_single_release_tr(struct inode *inode, struct file *file)
3057 {
3058         struct trace_array *tr = inode->i_private;
3059
3060         trace_array_put(tr);
3061
3062         return single_release(inode, file);
3063 }
3064
3065 static int tracing_open(struct inode *inode, struct file *file)
3066 {
3067         struct trace_cpu *tc = inode->i_private;
3068         struct trace_array *tr = tc->tr;
3069         struct trace_iterator *iter;
3070         int ret = 0;
3071
3072         if (trace_array_get(tr) < 0)
3073                 return -ENODEV;
3074
3075         /* If this file was open for write, then erase contents */
3076         if ((file->f_mode & FMODE_WRITE) &&
3077             (file->f_flags & O_TRUNC)) {
3078                 if (tc->cpu == RING_BUFFER_ALL_CPUS)
3079                         tracing_reset_online_cpus(&tr->trace_buffer);
3080                 else
3081                         tracing_reset(&tr->trace_buffer, tc->cpu);
3082         }
3083
3084         if (file->f_mode & FMODE_READ) {
3085                 iter = __tracing_open(tr, tc, inode, file, false);
3086                 if (IS_ERR(iter))
3087                         ret = PTR_ERR(iter);
3088                 else if (trace_flags & TRACE_ITER_LATENCY_FMT)
3089                         iter->iter_flags |= TRACE_FILE_LAT_FMT;
3090         }
3091
3092         if (ret < 0)
3093                 trace_array_put(tr);
3094
3095         return ret;
3096 }
3097
3098 static void *
3099 t_next(struct seq_file *m, void *v, loff_t *pos)
3100 {
3101         struct tracer *t = v;
3102
3103         (*pos)++;
3104
3105         if (t)
3106                 t = t->next;
3107
3108         return t;
3109 }
3110
3111 static void *t_start(struct seq_file *m, loff_t *pos)
3112 {
3113         struct tracer *t;
3114         loff_t l = 0;
3115
3116         mutex_lock(&trace_types_lock);
3117         for (t = trace_types; t && l < *pos; t = t_next(m, t, &l))
3118                 ;
3119
3120         return t;
3121 }
3122
3123 static void t_stop(struct seq_file *m, void *p)
3124 {
3125         mutex_unlock(&trace_types_lock);
3126 }
3127
3128 static int t_show(struct seq_file *m, void *v)
3129 {
3130         struct tracer *t = v;
3131
3132         if (!t)
3133                 return 0;
3134
3135         seq_printf(m, "%s", t->name);
3136         if (t->next)
3137                 seq_putc(m, ' ');
3138         else
3139                 seq_putc(m, '\n');
3140
3141         return 0;
3142 }
3143
3144 static const struct seq_operations show_traces_seq_ops = {
3145         .start          = t_start,
3146         .next           = t_next,
3147         .stop           = t_stop,
3148         .show           = t_show,
3149 };
3150
3151 static int show_traces_open(struct inode *inode, struct file *file)
3152 {
3153         if (tracing_disabled)
3154                 return -ENODEV;
3155
3156         return seq_open(file, &show_traces_seq_ops);
3157 }
3158
3159 static ssize_t
3160 tracing_write_stub(struct file *filp, const char __user *ubuf,
3161                    size_t count, loff_t *ppos)
3162 {
3163         return count;
3164 }
3165
3166 static loff_t tracing_seek(struct file *file, loff_t offset, int origin)
3167 {
3168         if (file->f_mode & FMODE_READ)
3169                 return seq_lseek(file, offset, origin);
3170         else
3171                 return 0;
3172 }
3173
3174 static const struct file_operations tracing_fops = {
3175         .open           = tracing_open,
3176         .read           = seq_read,
3177         .write          = tracing_write_stub,
3178         .llseek         = tracing_seek,
3179         .release        = tracing_release,
3180 };
3181
3182 static const struct file_operations show_traces_fops = {
3183         .open           = show_traces_open,
3184         .read           = seq_read,
3185         .release        = seq_release,
3186         .llseek         = seq_lseek,
3187 };
3188
3189 /*
3190  * Only trace on a CPU if the bitmask is set:
3191  */
3192 static cpumask_var_t tracing_cpumask;
3193
3194 /*
3195  * The tracer itself will not take this lock, but still we want
3196  * to provide a consistent cpumask to user-space:
3197  */
3198 static DEFINE_MUTEX(tracing_cpumask_update_lock);
3199
3200 /*
3201  * Temporary storage for the character representation of the
3202  * CPU bitmask (and one more byte for the newline):
3203  */
3204 static char mask_str[NR_CPUS + 1];
3205
3206 static ssize_t
3207 tracing_cpumask_read(struct file *filp, char __user *ubuf,
3208                      size_t count, loff_t *ppos)
3209 {
3210         int len;
3211
3212         mutex_lock(&tracing_cpumask_update_lock);
3213
3214         len = cpumask_scnprintf(mask_str, count, tracing_cpumask);
3215         if (count - len < 2) {
3216                 count = -EINVAL;
3217                 goto out_err;
3218         }
3219         len += sprintf(mask_str + len, "\n");
3220         count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
3221
3222 out_err:
3223         mutex_unlock(&tracing_cpumask_update_lock);
3224
3225         return count;
3226 }
3227
3228 static ssize_t
3229 tracing_cpumask_write(struct file *filp, const char __user *ubuf,
3230                       size_t count, loff_t *ppos)
3231 {
3232         struct trace_array *tr = filp->private_data;
3233         cpumask_var_t tracing_cpumask_new;
3234         int err, cpu;
3235
3236         if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
3237                 return -ENOMEM;
3238
3239         err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
3240         if (err)
3241                 goto err_unlock;
3242
3243         mutex_lock(&tracing_cpumask_update_lock);
3244
3245         local_irq_disable();
3246         arch_spin_lock(&ftrace_max_lock);
3247         for_each_tracing_cpu(cpu) {
3248                 /*
3249                  * Increase/decrease the disabled counter if we are
3250                  * about to flip a bit in the cpumask:
3251                  */
3252                 if (cpumask_test_cpu(cpu, tracing_cpumask) &&
3253                                 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
3254                         atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3255                         ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
3256                 }
3257                 if (!cpumask_test_cpu(cpu, tracing_cpumask) &&
3258                                 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
3259                         atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3260                         ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
3261                 }
3262         }
3263         arch_spin_unlock(&ftrace_max_lock);
3264         local_irq_enable();
3265
3266         cpumask_copy(tracing_cpumask, tracing_cpumask_new);
3267
3268         mutex_unlock(&tracing_cpumask_update_lock);
3269         free_cpumask_var(tracing_cpumask_new);
3270
3271         return count;
3272
3273 err_unlock:
3274         free_cpumask_var(tracing_cpumask_new);
3275
3276         return err;
3277 }
3278
3279 static const struct file_operations tracing_cpumask_fops = {
3280         .open           = tracing_open_generic,
3281         .read           = tracing_cpumask_read,
3282         .write          = tracing_cpumask_write,
3283         .llseek         = generic_file_llseek,
3284 };
3285
3286 static int tracing_trace_options_show(struct seq_file *m, void *v)
3287 {
3288         struct tracer_opt *trace_opts;
3289         struct trace_array *tr = m->private;
3290         u32 tracer_flags;
3291         int i;
3292
3293         mutex_lock(&trace_types_lock);
3294         tracer_flags = tr->current_trace->flags->val;
3295         trace_opts = tr->current_trace->flags->opts;
3296
3297         for (i = 0; trace_options[i]; i++) {
3298                 if (trace_flags & (1 << i))
3299                         seq_printf(m, "%s\n", trace_options[i]);
3300                 else
3301                         seq_printf(m, "no%s\n", trace_options[i]);
3302         }
3303
3304         for (i = 0; trace_opts[i].name; i++) {
3305                 if (tracer_flags & trace_opts[i].bit)
3306                         seq_printf(m, "%s\n", trace_opts[i].name);
3307                 else
3308                         seq_printf(m, "no%s\n", trace_opts[i].name);
3309         }
3310         mutex_unlock(&trace_types_lock);
3311
3312         return 0;
3313 }
3314
3315 static int __set_tracer_option(struct tracer *trace,
3316                                struct tracer_flags *tracer_flags,
3317                                struct tracer_opt *opts, int neg)
3318 {
3319         int ret;
3320
3321         ret = trace->set_flag(tracer_flags->val, opts->bit, !neg);
3322         if (ret)
3323                 return ret;
3324
3325         if (neg)
3326                 tracer_flags->val &= ~opts->bit;
3327         else
3328                 tracer_flags->val |= opts->bit;
3329         return 0;
3330 }
3331
3332 /* Try to assign a tracer specific option */
3333 static int set_tracer_option(struct tracer *trace, char *cmp, int neg)
3334 {
3335         struct tracer_flags *tracer_flags = trace->flags;
3336         struct tracer_opt *opts = NULL;
3337         int i;
3338
3339         for (i = 0; tracer_flags->opts[i].name; i++) {
3340                 opts = &tracer_flags->opts[i];
3341
3342                 if (strcmp(cmp, opts->name) == 0)
3343                         return __set_tracer_option(trace, trace->flags,
3344                                                    opts, neg);
3345         }
3346
3347         return -EINVAL;
3348 }
3349
3350 /* Some tracers require overwrite to stay enabled */
3351 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
3352 {
3353         if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
3354                 return -1;
3355
3356         return 0;
3357 }
3358
3359 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
3360 {
3361         /* do nothing if flag is already set */
3362         if (!!(trace_flags & mask) == !!enabled)
3363                 return 0;
3364
3365         /* Give the tracer a chance to approve the change */
3366         if (tr->current_trace->flag_changed)
3367                 if (tr->current_trace->flag_changed(tr->current_trace, mask, !!enabled))
3368                         return -EINVAL;
3369
3370         if (enabled)
3371                 trace_flags |= mask;
3372         else
3373                 trace_flags &= ~mask;
3374
3375         if (mask == TRACE_ITER_RECORD_CMD)
3376                 trace_event_enable_cmd_record(enabled);
3377
3378         if (mask == TRACE_ITER_OVERWRITE) {
3379                 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
3380 #ifdef CONFIG_TRACER_MAX_TRACE
3381                 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
3382 #endif
3383         }
3384
3385         if (mask == TRACE_ITER_PRINTK)
3386                 trace_printk_start_stop_comm(enabled);
3387
3388         return 0;
3389 }
3390
3391 static int trace_set_options(struct trace_array *tr, char *option)
3392 {
3393         char *cmp;
3394         int neg = 0;
3395         int ret = -ENODEV;
3396         int i;
3397
3398         cmp = strstrip(option);
3399
3400         if (strncmp(cmp, "no", 2) == 0) {
3401                 neg = 1;
3402                 cmp += 2;
3403         }
3404
3405         mutex_lock(&trace_types_lock);
3406
3407         for (i = 0; trace_options[i]; i++) {
3408                 if (strcmp(cmp, trace_options[i]) == 0) {
3409                         ret = set_tracer_flag(tr, 1 << i, !neg);
3410                         break;
3411                 }
3412         }
3413
3414         /* If no option could be set, test the specific tracer options */
3415         if (!trace_options[i])
3416                 ret = set_tracer_option(tr->current_trace, cmp, neg);
3417
3418         mutex_unlock(&trace_types_lock);
3419
3420         return ret;
3421 }
3422
3423 static ssize_t
3424 tracing_trace_options_write(struct file *filp, const char __user *ubuf,
3425                         size_t cnt, loff_t *ppos)
3426 {
3427         struct seq_file *m = filp->private_data;
3428         struct trace_array *tr = m->private;
3429         char buf[64];
3430         int ret;
3431
3432         if (cnt >= sizeof(buf))
3433                 return -EINVAL;
3434
3435         if (copy_from_user(&buf, ubuf, cnt))
3436                 return -EFAULT;
3437
3438         buf[cnt] = 0;
3439
3440         ret = trace_set_options(tr, buf);
3441         if (ret < 0)
3442                 return ret;
3443
3444         *ppos += cnt;
3445
3446         return cnt;
3447 }
3448
3449 static int tracing_trace_options_open(struct inode *inode, struct file *file)
3450 {
3451         struct trace_array *tr = inode->i_private;
3452         int ret;
3453
3454         if (tracing_disabled)
3455                 return -ENODEV;
3456
3457         if (trace_array_get(tr) < 0)
3458                 return -ENODEV;
3459
3460         ret = single_open(file, tracing_trace_options_show, inode->i_private);
3461         if (ret < 0)
3462                 trace_array_put(tr);
3463
3464         return ret;
3465 }
3466
3467 static const struct file_operations tracing_iter_fops = {
3468         .open           = tracing_trace_options_open,
3469         .read           = seq_read,
3470         .llseek         = seq_lseek,
3471         .release        = tracing_single_release_tr,
3472         .write          = tracing_trace_options_write,
3473 };
3474
3475 static const char readme_msg[] =
3476         "tracing mini-HOWTO:\n\n"
3477         "# echo 0 > tracing_on : quick way to disable tracing\n"
3478         "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
3479         " Important files:\n"
3480         "  trace\t\t\t- The static contents of the buffer\n"
3481         "\t\t\t  To clear the buffer write into this file: echo > trace\n"
3482         "  trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
3483         "  current_tracer\t- function and latency tracers\n"
3484         "  available_tracers\t- list of configured tracers for current_tracer\n"
3485         "  buffer_size_kb\t- view and modify size of per cpu buffer\n"
3486         "  buffer_total_size_kb  - view total size of all cpu buffers\n\n"
3487         "  trace_clock\t\t-change the clock used to order events\n"
3488         "       local:   Per cpu clock but may not be synced across CPUs\n"
3489         "      global:   Synced across CPUs but slows tracing down.\n"
3490         "     counter:   Not a clock, but just an increment\n"
3491         "      uptime:   Jiffy counter from time of boot\n"
3492         "        perf:   Same clock that perf events use\n"
3493 #ifdef CONFIG_X86_64
3494         "     x86-tsc:   TSC cycle counter\n"
3495 #endif
3496         "\n  trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
3497         "  tracing_cpumask\t- Limit which CPUs to trace\n"
3498         "  instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
3499         "\t\t\t  Remove sub-buffer with rmdir\n"
3500         "  trace_options\t\t- Set format or modify how tracing happens\n"
3501         "\t\t\t  Disable an option by adding a suffix 'no' to the option name\n"
3502 #ifdef CONFIG_DYNAMIC_FTRACE
3503         "\n  available_filter_functions - list of functions that can be filtered on\n"
3504         "  set_ftrace_filter\t- echo function name in here to only trace these functions\n"
3505         "            accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3506         "            modules: Can select a group via module\n"
3507         "             Format: :mod:<module-name>\n"
3508         "             example: echo :mod:ext3 > set_ftrace_filter\n"
3509         "            triggers: a command to perform when function is hit\n"
3510         "              Format: <function>:<trigger>[:count]\n"
3511         "             trigger: traceon, traceoff\n"
3512         "                      enable_event:<system>:<event>\n"
3513         "                      disable_event:<system>:<event>\n"
3514 #ifdef CONFIG_STACKTRACE
3515         "                      stacktrace\n"
3516 #endif
3517 #ifdef CONFIG_TRACER_SNAPSHOT
3518         "                      snapshot\n"
3519 #endif
3520         "             example: echo do_fault:traceoff > set_ftrace_filter\n"
3521         "                      echo do_trap:traceoff:3 > set_ftrace_filter\n"
3522         "             The first one will disable tracing every time do_fault is hit\n"
3523         "             The second will disable tracing at most 3 times when do_trap is hit\n"
3524         "               The first time do trap is hit and it disables tracing, the counter\n"
3525         "               will decrement to 2. If tracing is already disabled, the counter\n"
3526         "               will not decrement. It only decrements when the trigger did work\n"
3527         "             To remove trigger without count:\n"
3528         "               echo '!<function>:<trigger> > set_ftrace_filter\n"
3529         "             To remove trigger with a count:\n"
3530         "               echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
3531         "  set_ftrace_notrace\t- echo function name in here to never trace.\n"
3532         "            accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3533         "            modules: Can select a group via module command :mod:\n"
3534         "            Does not accept triggers\n"
3535 #endif /* CONFIG_DYNAMIC_FTRACE */
3536 #ifdef CONFIG_FUNCTION_TRACER
3537         "  set_ftrace_pid\t- Write pid(s) to only function trace those pids (function)\n"
3538 #endif
3539 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3540         "  set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
3541         "  max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
3542 #endif
3543 #ifdef CONFIG_TRACER_SNAPSHOT
3544         "\n  snapshot\t\t- Like 'trace' but shows the content of the static snapshot buffer\n"
3545         "\t\t\t  Read the contents for more information\n"
3546 #endif
3547 #ifdef CONFIG_STACK_TRACER
3548         "  stack_trace\t\t- Shows the max stack trace when active\n"
3549         "  stack_max_size\t- Shows current max stack size that was traced\n"
3550         "\t\t\t  Write into this file to reset the max size (trigger a new trace)\n"
3551 #ifdef CONFIG_DYNAMIC_FTRACE
3552         "  stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace traces\n"
3553 #endif
3554 #endif /* CONFIG_STACK_TRACER */
3555 ;
3556
3557 static ssize_t
3558 tracing_readme_read(struct file *filp, char __user *ubuf,
3559                        size_t cnt, loff_t *ppos)
3560 {
3561         return simple_read_from_buffer(ubuf, cnt, ppos,
3562                                         readme_msg, strlen(readme_msg));
3563 }
3564
3565 static const struct file_operations tracing_readme_fops = {
3566         .open           = tracing_open_generic,
3567         .read           = tracing_readme_read,
3568         .llseek         = generic_file_llseek,
3569 };
3570
3571 static ssize_t
3572 tracing_saved_cmdlines_read(struct file *file, char __user *ubuf,
3573                                 size_t cnt, loff_t *ppos)
3574 {
3575         char *buf_comm;
3576         char *file_buf;
3577         char *buf;
3578         int len = 0;
3579         int pid;
3580         int i;
3581
3582         file_buf = kmalloc(SAVED_CMDLINES*(16+TASK_COMM_LEN), GFP_KERNEL);
3583         if (!file_buf)
3584                 return -ENOMEM;
3585
3586         buf_comm = kmalloc(TASK_COMM_LEN, GFP_KERNEL);
3587         if (!buf_comm) {
3588                 kfree(file_buf);
3589                 return -ENOMEM;
3590         }
3591
3592         buf = file_buf;
3593
3594         for (i = 0; i < SAVED_CMDLINES; i++) {
3595                 int r;
3596
3597                 pid = map_cmdline_to_pid[i];
3598                 if (pid == -1 || pid == NO_CMDLINE_MAP)
3599                         continue;
3600
3601                 trace_find_cmdline(pid, buf_comm);
3602                 r = sprintf(buf, "%d %s\n", pid, buf_comm);
3603                 buf += r;
3604                 len += r;
3605         }
3606
3607         len = simple_read_from_buffer(ubuf, cnt, ppos,
3608                                       file_buf, len);
3609
3610         kfree(file_buf);
3611         kfree(buf_comm);
3612
3613         return len;
3614 }
3615
3616 static const struct file_operations tracing_saved_cmdlines_fops = {
3617     .open       = tracing_open_generic,
3618     .read       = tracing_saved_cmdlines_read,
3619     .llseek     = generic_file_llseek,
3620 };
3621
3622 static ssize_t
3623 tracing_set_trace_read(struct file *filp, char __user *ubuf,
3624                        size_t cnt, loff_t *ppos)
3625 {
3626         struct trace_array *tr = filp->private_data;
3627         char buf[MAX_TRACER_SIZE+2];
3628         int r;
3629
3630         mutex_lock(&trace_types_lock);
3631         r = sprintf(buf, "%s\n", tr->current_trace->name);
3632         mutex_unlock(&trace_types_lock);
3633
3634         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3635 }
3636
3637 int tracer_init(struct tracer *t, struct trace_array *tr)
3638 {
3639         tracing_reset_online_cpus(&tr->trace_buffer);
3640         return t->init(tr);
3641 }
3642
3643 static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
3644 {
3645         int cpu;
3646
3647         for_each_tracing_cpu(cpu)
3648                 per_cpu_ptr(buf->data, cpu)->entries = val;
3649 }
3650
3651 #ifdef CONFIG_TRACER_MAX_TRACE
3652 /* resize @tr's buffer to the size of @size_tr's entries */
3653 static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
3654                                         struct trace_buffer *size_buf, int cpu_id)
3655 {
3656         int cpu, ret = 0;
3657
3658         if (cpu_id == RING_BUFFER_ALL_CPUS) {
3659                 for_each_tracing_cpu(cpu) {
3660                         ret = ring_buffer_resize(trace_buf->buffer,
3661                                  per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
3662                         if (ret < 0)
3663                                 break;
3664                         per_cpu_ptr(trace_buf->data, cpu)->entries =
3665                                 per_cpu_ptr(size_buf->data, cpu)->entries;
3666                 }
3667         } else {
3668                 ret = ring_buffer_resize(trace_buf->buffer,
3669                                  per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
3670                 if (ret == 0)
3671                         per_cpu_ptr(trace_buf->data, cpu_id)->entries =
3672                                 per_cpu_ptr(size_buf->data, cpu_id)->entries;
3673         }
3674
3675         return ret;
3676 }
3677 #endif /* CONFIG_TRACER_MAX_TRACE */
3678
3679 static int __tracing_resize_ring_buffer(struct trace_array *tr,
3680                                         unsigned long size, int cpu)
3681 {
3682         int ret;
3683
3684         /*
3685          * If kernel or user changes the size of the ring buffer
3686          * we use the size that was given, and we can forget about
3687          * expanding it later.
3688          */
3689         ring_buffer_expanded = true;
3690
3691         /* May be called before buffers are initialized */
3692         if (!tr->trace_buffer.buffer)
3693                 return 0;
3694
3695         ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
3696         if (ret < 0)
3697                 return ret;
3698
3699 #ifdef CONFIG_TRACER_MAX_TRACE
3700         if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
3701             !tr->current_trace->use_max_tr)
3702                 goto out;
3703
3704         ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
3705         if (ret < 0) {
3706                 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
3707                                                      &tr->trace_buffer, cpu);
3708                 if (r < 0) {
3709                         /*
3710                          * AARGH! We are left with different
3711                          * size max buffer!!!!
3712                          * The max buffer is our "snapshot" buffer.
3713                          * When a tracer needs a snapshot (one of the
3714                          * latency tracers), it swaps the max buffer
3715                          * with the saved snap shot. We succeeded to
3716                          * update the size of the main buffer, but failed to
3717                          * update the size of the max buffer. But when we tried
3718                          * to reset the main buffer to the original size, we
3719                          * failed there too. This is very unlikely to
3720                          * happen, but if it does, warn and kill all
3721                          * tracing.
3722                          */
3723                         WARN_ON(1);
3724                         tracing_disabled = 1;
3725                 }
3726                 return ret;
3727         }
3728
3729         if (cpu == RING_BUFFER_ALL_CPUS)
3730                 set_buffer_entries(&tr->max_buffer, size);
3731         else
3732                 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
3733
3734  out:
3735 #endif /* CONFIG_TRACER_MAX_TRACE */
3736
3737         if (cpu == RING_BUFFER_ALL_CPUS)
3738                 set_buffer_entries(&tr->trace_buffer, size);
3739         else
3740                 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
3741
3742         return ret;
3743 }
3744
3745 static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
3746                                           unsigned long size, int cpu_id)
3747 {
3748         int ret = size;
3749
3750         mutex_lock(&trace_types_lock);
3751
3752         if (cpu_id != RING_BUFFER_ALL_CPUS) {
3753                 /* make sure, this cpu is enabled in the mask */
3754                 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
3755                         ret = -EINVAL;
3756                         goto out;
3757                 }
3758         }
3759
3760         ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
3761         if (ret < 0)
3762                 ret = -ENOMEM;
3763
3764 out:
3765         mutex_unlock(&trace_types_lock);
3766
3767         return ret;
3768 }
3769
3770
3771 /**
3772  * tracing_update_buffers - used by tracing facility to expand ring buffers
3773  *
3774  * To save on memory when the tracing is never used on a system with it
3775  * configured in. The ring buffers are set to a minimum size. But once
3776  * a user starts to use the tracing facility, then they need to grow
3777  * to their default size.
3778  *
3779  * This function is to be called when a tracer is about to be used.
3780  */
3781 int tracing_update_buffers(void)
3782 {
3783         int ret = 0;
3784
3785         mutex_lock(&trace_types_lock);
3786         if (!ring_buffer_expanded)
3787                 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
3788                                                 RING_BUFFER_ALL_CPUS);
3789         mutex_unlock(&trace_types_lock);
3790
3791         return ret;
3792 }
3793
3794 struct trace_option_dentry;
3795
3796 static struct trace_option_dentry *
3797 create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
3798
3799 static void
3800 destroy_trace_option_files(struct trace_option_dentry *topts);
3801
3802 static int tracing_set_tracer(const char *buf)
3803 {
3804         static struct trace_option_dentry *topts;
3805         struct trace_array *tr = &global_trace;
3806         struct tracer *t;
3807 #ifdef CONFIG_TRACER_MAX_TRACE
3808         bool had_max_tr;
3809 #endif
3810         int ret = 0;
3811
3812         mutex_lock(&trace_types_lock);
3813
3814         if (!ring_buffer_expanded) {
3815                 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
3816                                                 RING_BUFFER_ALL_CPUS);
3817                 if (ret < 0)
3818                         goto out;
3819                 ret = 0;
3820         }
3821
3822         for (t = trace_types; t; t = t->next) {
3823                 if (strcmp(t->name, buf) == 0)
3824                         break;
3825         }
3826         if (!t) {
3827                 ret = -EINVAL;
3828                 goto out;
3829         }
3830         if (t == tr->current_trace)
3831                 goto out;
3832
3833         trace_branch_disable();
3834
3835         tr->current_trace->enabled = false;
3836
3837         if (tr->current_trace->reset)
3838                 tr->current_trace->reset(tr);
3839
3840         /* Current trace needs to be nop_trace before synchronize_sched */
3841         tr->current_trace = &nop_trace;
3842
3843 #ifdef CONFIG_TRACER_MAX_TRACE
3844         had_max_tr = tr->allocated_snapshot;
3845
3846         if (had_max_tr && !t->use_max_tr) {
3847                 /*
3848                  * We need to make sure that the update_max_tr sees that
3849                  * current_trace changed to nop_trace to keep it from
3850                  * swapping the buffers after we resize it.
3851                  * The update_max_tr is called from interrupts disabled
3852                  * so a synchronized_sched() is sufficient.
3853                  */
3854                 synchronize_sched();
3855                 free_snapshot(tr);
3856         }
3857 #endif
3858         destroy_trace_option_files(topts);
3859
3860         topts = create_trace_option_files(tr, t);
3861
3862 #ifdef CONFIG_TRACER_MAX_TRACE
3863         if (t->use_max_tr && !had_max_tr) {
3864                 ret = alloc_snapshot(tr);
3865                 if (ret < 0)
3866                         goto out;
3867         }
3868 #endif
3869
3870         if (t->init) {
3871                 ret = tracer_init(t, tr);
3872                 if (ret)
3873                         goto out;
3874         }
3875
3876         tr->current_trace = t;
3877         tr->current_trace->enabled = true;
3878         trace_branch_enable(tr);
3879  out:
3880         mutex_unlock(&trace_types_lock);
3881
3882         return ret;
3883 }
3884
3885 static ssize_t
3886 tracing_set_trace_write(struct file *filp, const char __user *ubuf,
3887                         size_t cnt, loff_t *ppos)
3888 {
3889         char buf[MAX_TRACER_SIZE+1];
3890         int i;
3891         size_t ret;
3892         int err;
3893
3894         ret = cnt;
3895
3896         if (cnt > MAX_TRACER_SIZE)
3897                 cnt = MAX_TRACER_SIZE;
3898
3899         if (copy_from_user(&buf, ubuf, cnt))
3900                 return -EFAULT;
3901
3902         buf[cnt] = 0;
3903
3904         /* strip ending whitespace. */
3905         for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
3906                 buf[i] = 0;
3907
3908         err = tracing_set_tracer(buf);
3909         if (err)
3910                 return err;
3911
3912         *ppos += ret;
3913
3914         return ret;
3915 }
3916
3917 static ssize_t
3918 tracing_max_lat_read(struct file *filp, char __user *ubuf,
3919                      size_t cnt, loff_t *ppos)
3920 {
3921         unsigned long *ptr = filp->private_data;
3922         char buf[64];
3923         int r;
3924
3925         r = snprintf(buf, sizeof(buf), "%ld\n",
3926                      *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
3927         if (r > sizeof(buf))
3928                 r = sizeof(buf);
3929         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3930 }
3931
3932 static ssize_t
3933 tracing_max_lat_write(struct file *filp, const char __user *ubuf,
3934                       size_t cnt, loff_t *ppos)
3935 {
3936         unsigned long *ptr = filp->private_data;
3937         unsigned long val;
3938         int ret;
3939
3940         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
3941         if (ret)
3942                 return ret;
3943
3944         *ptr = val * 1000;
3945
3946         return cnt;
3947 }
3948
3949 static int tracing_open_pipe(struct inode *inode, struct file *filp)
3950 {
3951         struct trace_cpu *tc = inode->i_private;
3952         struct trace_array *tr = tc->tr;
3953         struct trace_iterator *iter;
3954         int ret = 0;
3955
3956         if (tracing_disabled)
3957                 return -ENODEV;
3958
3959         if (trace_array_get(tr) < 0)
3960                 return -ENODEV;
3961
3962         mutex_lock(&trace_types_lock);
3963
3964         /* create a buffer to store the information to pass to userspace */
3965         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
3966         if (!iter) {
3967                 ret = -ENOMEM;
3968                 __trace_array_put(tr);
3969                 goto out;
3970         }
3971
3972         /*
3973          * We make a copy of the current tracer to avoid concurrent
3974          * changes on it while we are reading.
3975          */
3976         iter->trace = kmalloc(sizeof(*iter->trace), GFP_KERNEL);
3977         if (!iter->trace) {
3978                 ret = -ENOMEM;
3979                 goto fail;
3980         }
3981         *iter->trace = *tr->current_trace;
3982
3983         if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
3984                 ret = -ENOMEM;
3985                 goto fail;
3986         }
3987
3988         /* trace pipe does not show start of buffer */
3989         cpumask_setall(iter->started);
3990
3991         if (trace_flags & TRACE_ITER_LATENCY_FMT)
3992                 iter->iter_flags |= TRACE_FILE_LAT_FMT;
3993
3994         /* Output in nanoseconds only if we are using a clock in nanoseconds. */
3995         if (trace_clocks[tr->clock_id].in_ns)
3996                 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3997
3998         iter->cpu_file = tc->cpu;
3999         iter->tr = tc->tr;
4000         iter->trace_buffer = &tc->tr->trace_buffer;
4001         mutex_init(&iter->mutex);
4002         filp->private_data = iter;
4003
4004         if (iter->trace->pipe_open)
4005                 iter->trace->pipe_open(iter);
4006
4007         nonseekable_open(inode, filp);
4008 out:
4009         mutex_unlock(&trace_types_lock);
4010         return ret;
4011
4012 fail:
4013         kfree(iter->trace);
4014         kfree(iter);
4015         __trace_array_put(tr);
4016         mutex_unlock(&trace_types_lock);
4017         return ret;
4018 }
4019
4020 static int tracing_release_pipe(struct inode *inode, struct file *file)
4021 {
4022         struct trace_iterator *iter = file->private_data;
4023         struct trace_cpu *tc = inode->i_private;
4024         struct trace_array *tr = tc->tr;
4025
4026         mutex_lock(&trace_types_lock);
4027
4028         if (iter->trace->pipe_close)
4029                 iter->trace->pipe_close(iter);
4030
4031         mutex_unlock(&trace_types_lock);
4032
4033         free_cpumask_var(iter->started);
4034         mutex_destroy(&iter->mutex);
4035         kfree(iter->trace);
4036         kfree(iter);
4037
4038         trace_array_put(tr);
4039
4040         return 0;
4041 }
4042
4043 static unsigned int
4044 trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
4045 {
4046         /* Iterators are static, they should be filled or empty */
4047         if (trace_buffer_iter(iter, iter->cpu_file))
4048                 return POLLIN | POLLRDNORM;
4049
4050         if (trace_flags & TRACE_ITER_BLOCK)
4051                 /*
4052                  * Always select as readable when in blocking mode
4053                  */
4054                 return POLLIN | POLLRDNORM;
4055         else
4056                 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
4057                                              filp, poll_table);
4058 }
4059
4060 static unsigned int
4061 tracing_poll_pipe(struct file *filp, poll_table *poll_table)
4062 {
4063         struct trace_iterator *iter = filp->private_data;
4064
4065         return trace_poll(iter, filp, poll_table);
4066 }
4067
4068 /*
4069  * This is a make-shift waitqueue.
4070  * A tracer might use this callback on some rare cases:
4071  *
4072  *  1) the current tracer might hold the runqueue lock when it wakes up
4073  *     a reader, hence a deadlock (sched, function, and function graph tracers)
4074  *  2) the function tracers, trace all functions, we don't want
4075  *     the overhead of calling wake_up and friends
4076  *     (and tracing them too)
4077  *
4078  *     Anyway, this is really very primitive wakeup.
4079  */
4080 void poll_wait_pipe(struct trace_iterator *iter)
4081 {
4082         set_current_state(TASK_INTERRUPTIBLE);
4083         /* sleep for 100 msecs, and try again. */
4084         schedule_timeout(HZ / 10);
4085 }
4086
4087 /* Must be called with trace_types_lock mutex held. */
4088 static int tracing_wait_pipe(struct file *filp)
4089 {
4090         struct trace_iterator *iter = filp->private_data;
4091
4092         while (trace_empty(iter)) {
4093
4094                 if ((filp->f_flags & O_NONBLOCK)) {
4095                         return -EAGAIN;
4096                 }
4097
4098                 mutex_unlock(&iter->mutex);
4099
4100                 iter->trace->wait_pipe(iter);
4101
4102                 mutex_lock(&iter->mutex);
4103
4104                 if (signal_pending(current))
4105                         return -EINTR;
4106
4107                 /*
4108                  * We block until we read something and tracing is disabled.
4109                  * We still block if tracing is disabled, but we have never
4110                  * read anything. This allows a user to cat this file, and
4111                  * then enable tracing. But after we have read something,
4112                  * we give an EOF when tracing is again disabled.
4113                  *
4114                  * iter->pos will be 0 if we haven't read anything.
4115                  */
4116                 if (!tracing_is_on() && iter->pos)
4117                         break;
4118         }
4119
4120         return 1;
4121 }
4122
4123 /*
4124  * Consumer reader.
4125  */
4126 static ssize_t
4127 tracing_read_pipe(struct file *filp, char __user *ubuf,
4128                   size_t cnt, loff_t *ppos)
4129 {
4130         struct trace_iterator *iter = filp->private_data;
4131         struct trace_array *tr = iter->tr;
4132         ssize_t sret;
4133
4134         /* return any leftover data */
4135         sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4136         if (sret != -EBUSY)
4137                 return sret;
4138
4139         trace_seq_init(&iter->seq);
4140
4141         /* copy the tracer to avoid using a global lock all around */
4142         mutex_lock(&trace_types_lock);
4143         if (unlikely(iter->trace->name != tr->current_trace->name))
4144                 *iter->trace = *tr->current_trace;
4145         mutex_unlock(&trace_types_lock);
4146
4147         /*
4148          * Avoid more than one consumer on a single file descriptor
4149          * This is just a matter of traces coherency, the ring buffer itself
4150          * is protected.
4151          */
4152         mutex_lock(&iter->mutex);
4153         if (iter->trace->read) {
4154                 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
4155                 if (sret)
4156                         goto out;
4157         }
4158
4159 waitagain:
4160         sret = tracing_wait_pipe(filp);
4161         if (sret <= 0)
4162                 goto out;
4163
4164         /* stop when tracing is finished */
4165         if (trace_empty(iter)) {
4166                 sret = 0;
4167                 goto out;
4168         }
4169
4170         if (cnt >= PAGE_SIZE)
4171                 cnt = PAGE_SIZE - 1;
4172
4173         /* reset all but tr, trace, and overruns */
4174         memset(&iter->seq, 0,
4175                sizeof(struct trace_iterator) -
4176                offsetof(struct trace_iterator, seq));
4177         iter->pos = -1;
4178
4179         trace_event_read_lock();
4180         trace_access_lock(iter->cpu_file);
4181         while (trace_find_next_entry_inc(iter) != NULL) {
4182                 enum print_line_t ret;
4183                 int len = iter->seq.len;
4184
4185                 ret = print_trace_line(iter);
4186                 if (ret == TRACE_TYPE_PARTIAL_LINE) {
4187                         /* don't print partial lines */
4188                         iter->seq.len = len;
4189                         break;
4190                 }
4191                 if (ret != TRACE_TYPE_NO_CONSUME)
4192                         trace_consume(iter);
4193
4194                 if (iter->seq.len >= cnt)
4195                         break;
4196
4197                 /*
4198                  * Setting the full flag means we reached the trace_seq buffer
4199                  * size and we should leave by partial output condition above.
4200                  * One of the trace_seq_* functions is not used properly.
4201                  */
4202                 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
4203                           iter->ent->type);
4204         }
4205         trace_access_unlock(iter->cpu_file);
4206         trace_event_read_unlock();
4207
4208         /* Now copy what we have to the user */
4209         sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4210         if (iter->seq.readpos >= iter->seq.len)
4211                 trace_seq_init(&iter->seq);
4212
4213         /*
4214          * If there was nothing to send to user, in spite of consuming trace
4215          * entries, go back to wait for more entries.
4216          */
4217         if (sret == -EBUSY)
4218                 goto waitagain;
4219
4220 out:
4221         mutex_unlock(&iter->mutex);
4222
4223         return sret;
4224 }
4225
4226 static void tracing_pipe_buf_release(struct pipe_inode_info *pipe,
4227                                      struct pipe_buffer *buf)
4228 {
4229         __free_page(buf->page);
4230 }
4231
4232 static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
4233                                      unsigned int idx)
4234 {
4235         __free_page(spd->pages[idx]);
4236 }
4237
4238 static const struct pipe_buf_operations tracing_pipe_buf_ops = {
4239         .can_merge              = 0,
4240         .map                    = generic_pipe_buf_map,
4241         .unmap                  = generic_pipe_buf_unmap,
4242         .confirm                = generic_pipe_buf_confirm,
4243         .release                = tracing_pipe_buf_release,
4244         .steal                  = generic_pipe_buf_steal,
4245         .get                    = generic_pipe_buf_get,
4246 };
4247
4248 static size_t
4249 tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
4250 {
4251         size_t count;
4252         int ret;
4253
4254         /* Seq buffer is page-sized, exactly what we need. */
4255         for (;;) {
4256                 count = iter->seq.len;
4257                 ret = print_trace_line(iter);
4258                 count = iter->seq.len - count;
4259                 if (rem < count) {
4260                         rem = 0;
4261                         iter->seq.len -= count;
4262                         break;
4263                 }
4264                 if (ret == TRACE_TYPE_PARTIAL_LINE) {
4265                         iter->seq.len -= count;
4266                         break;
4267                 }
4268
4269                 if (ret != TRACE_TYPE_NO_CONSUME)
4270                         trace_consume(iter);
4271                 rem -= count;
4272                 if (!trace_find_next_entry_inc(iter))   {
4273                         rem = 0;
4274                         iter->ent = NULL;
4275                         break;
4276                 }
4277         }
4278
4279         return rem;
4280 }
4281
4282 static ssize_t tracing_splice_read_pipe(struct file *filp,
4283                                         loff_t *ppos,
4284                                         struct pipe_inode_info *pipe,
4285                                         size_t len,
4286                                         unsigned int flags)
4287 {
4288         struct page *pages_def[PIPE_DEF_BUFFERS];
4289         struct partial_page partial_def[PIPE_DEF_BUFFERS];
4290         struct trace_iterator *iter = filp->private_data;
4291         struct splice_pipe_desc spd = {
4292                 .pages          = pages_def,
4293                 .partial        = partial_def,
4294                 .nr_pages       = 0, /* This gets updated below. */
4295                 .nr_pages_max   = PIPE_DEF_BUFFERS,
4296                 .flags          = flags,
4297                 .ops            = &tracing_pipe_buf_ops,
4298                 .spd_release    = tracing_spd_release_pipe,
4299         };
4300         struct trace_array *tr = iter->tr;
4301         ssize_t ret;
4302         size_t rem;
4303         unsigned int i;
4304
4305         if (splice_grow_spd(pipe, &spd))
4306                 return -ENOMEM;
4307
4308         /* copy the tracer to avoid using a global lock all around */
4309         mutex_lock(&trace_types_lock);
4310         if (unlikely(iter->trace->name != tr->current_trace->name))
4311                 *iter->trace = *tr->current_trace;
4312         mutex_unlock(&trace_types_lock);
4313
4314         mutex_lock(&iter->mutex);
4315
4316         if (iter->trace->splice_read) {
4317                 ret = iter->trace->splice_read(iter, filp,
4318                                                ppos, pipe, len, flags);
4319                 if (ret)
4320                         goto out_err;
4321         }
4322
4323         ret = tracing_wait_pipe(filp);
4324         if (ret <= 0)
4325                 goto out_err;
4326
4327         if (!iter->ent && !trace_find_next_entry_inc(iter)) {
4328                 ret = -EFAULT;
4329                 goto out_err;
4330         }
4331
4332         trace_event_read_lock();
4333         trace_access_lock(iter->cpu_file);
4334
4335         /* Fill as many pages as possible. */
4336         for (i = 0, rem = len; i < pipe->buffers && rem; i++) {
4337                 spd.pages[i] = alloc_page(GFP_KERNEL);
4338                 if (!spd.pages[i])
4339                         break;
4340
4341                 rem = tracing_fill_pipe_page(rem, iter);
4342
4343                 /* Copy the data into the page, so we can start over. */
4344                 ret = trace_seq_to_buffer(&iter->seq,
4345                                           page_address(spd.pages[i]),
4346                                           iter->seq.len);
4347                 if (ret < 0) {
4348                         __free_page(spd.pages[i]);
4349                         break;
4350                 }
4351                 spd.partial[i].offset = 0;
4352                 spd.partial[i].len = iter->seq.len;
4353
4354                 trace_seq_init(&iter->seq);
4355         }
4356
4357         trace_access_unlock(iter->cpu_file);
4358         trace_event_read_unlock();
4359         mutex_unlock(&iter->mutex);
4360
4361         spd.nr_pages = i;
4362
4363         ret = splice_to_pipe(pipe, &spd);
4364 out:
4365         splice_shrink_spd(&spd);
4366         return ret;
4367
4368 out_err:
4369         mutex_unlock(&iter->mutex);
4370         goto out;
4371 }
4372
4373 static ssize_t
4374 tracing_entries_read(struct file *filp, char __user *ubuf,
4375                      size_t cnt, loff_t *ppos)
4376 {
4377         struct trace_cpu *tc = filp->private_data;
4378         struct trace_array *tr = tc->tr;
4379         char buf[64];
4380         int r = 0;
4381         ssize_t ret;
4382
4383         mutex_lock(&trace_types_lock);
4384
4385         if (tc->cpu == RING_BUFFER_ALL_CPUS) {
4386                 int cpu, buf_size_same;
4387                 unsigned long size;
4388
4389                 size = 0;
4390                 buf_size_same = 1;
4391                 /* check if all cpu sizes are same */
4392                 for_each_tracing_cpu(cpu) {
4393                         /* fill in the size from first enabled cpu */
4394                         if (size == 0)
4395                                 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
4396                         if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
4397                                 buf_size_same = 0;
4398                                 break;
4399                         }
4400                 }
4401
4402                 if (buf_size_same) {
4403                         if (!ring_buffer_expanded)
4404                                 r = sprintf(buf, "%lu (expanded: %lu)\n",
4405                                             size >> 10,
4406                                             trace_buf_size >> 10);
4407                         else
4408                                 r = sprintf(buf, "%lu\n", size >> 10);
4409                 } else
4410                         r = sprintf(buf, "X\n");
4411         } else
4412                 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, tc->cpu)->entries >> 10);
4413
4414         mutex_unlock(&trace_types_lock);
4415
4416         ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4417         return ret;
4418 }
4419
4420 static ssize_t
4421 tracing_entries_write(struct file *filp, const char __user *ubuf,
4422                       size_t cnt, loff_t *ppos)
4423 {
4424         struct trace_cpu *tc = filp->private_data;
4425         unsigned long val;
4426         int ret;
4427
4428         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4429         if (ret)
4430                 return ret;
4431
4432         /* must have at least 1 entry */
4433         if (!val)
4434                 return -EINVAL;
4435
4436         /* value is in KB */
4437         val <<= 10;
4438
4439         ret = tracing_resize_ring_buffer(tc->tr, val, tc->cpu);
4440         if (ret < 0)
4441                 return ret;
4442
4443         *ppos += cnt;
4444
4445         return cnt;
4446 }
4447
4448 static ssize_t
4449 tracing_total_entries_read(struct file *filp, char __user *ubuf,
4450                                 size_t cnt, loff_t *ppos)
4451 {
4452         struct trace_array *tr = filp->private_data;
4453         char buf[64];
4454         int r, cpu;
4455         unsigned long size = 0, expanded_size = 0;
4456
4457         mutex_lock(&trace_types_lock);
4458         for_each_tracing_cpu(cpu) {
4459                 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
4460                 if (!ring_buffer_expanded)
4461                         expanded_size += trace_buf_size >> 10;
4462         }
4463         if (ring_buffer_expanded)
4464                 r = sprintf(buf, "%lu\n", size);
4465         else
4466                 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
4467         mutex_unlock(&trace_types_lock);
4468
4469         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4470 }
4471
4472 static ssize_t
4473 tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
4474                           size_t cnt, loff_t *ppos)
4475 {
4476         /*
4477          * There is no need to read what the user has written, this function
4478          * is just to make sure that there is no error when "echo" is used
4479          */
4480
4481         *ppos += cnt;
4482
4483         return cnt;
4484 }
4485
4486 static int
4487 tracing_free_buffer_release(struct inode *inode, struct file *filp)
4488 {
4489         struct trace_array *tr = inode->i_private;
4490
4491         /* disable tracing ? */
4492         if (trace_flags & TRACE_ITER_STOP_ON_FREE)
4493                 tracing_off();
4494         /* resize the ring buffer to 0 */
4495         tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
4496
4497         trace_array_put(tr);
4498
4499         return 0;
4500 }
4501
4502 static ssize_t
4503 tracing_mark_write(struct file *filp, const char __user *ubuf,
4504                                         size_t cnt, loff_t *fpos)
4505 {
4506         unsigned long addr = (unsigned long)ubuf;
4507         struct trace_array *tr = filp->private_data;
4508         struct ring_buffer_event *event;
4509         struct ring_buffer *buffer;
4510         struct print_entry *entry;
4511         unsigned long irq_flags;
4512         struct page *pages[2];
4513         void *map_page[2];
4514         int nr_pages = 1;
4515         ssize_t written;
4516         int offset;
4517         int size;
4518         int len;
4519         int ret;
4520         int i;
4521
4522         if (tracing_disabled)
4523                 return -EINVAL;
4524
4525         if (!(trace_flags & TRACE_ITER_MARKERS))
4526                 return -EINVAL;
4527
4528         if (cnt > TRACE_BUF_SIZE)
4529                 cnt = TRACE_BUF_SIZE;
4530
4531         /*
4532          * Userspace is injecting traces into the kernel trace buffer.
4533          * We want to be as non intrusive as possible.
4534          * To do so, we do not want to allocate any special buffers
4535          * or take any locks, but instead write the userspace data
4536          * straight into the ring buffer.
4537          *
4538          * First we need to pin the userspace buffer into memory,
4539          * which, most likely it is, because it just referenced it.
4540          * But there's no guarantee that it is. By using get_user_pages_fast()
4541          * and kmap_atomic/kunmap_atomic() we can get access to the
4542          * pages directly. We then write the data directly into the
4543          * ring buffer.
4544          */
4545         BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
4546
4547         /* check if we cross pages */
4548         if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
4549                 nr_pages = 2;
4550
4551         offset = addr & (PAGE_SIZE - 1);
4552         addr &= PAGE_MASK;
4553
4554         ret = get_user_pages_fast(addr, nr_pages, 0, pages);
4555         if (ret < nr_pages) {
4556                 while (--ret >= 0)
4557                         put_page(pages[ret]);
4558                 written = -EFAULT;
4559                 goto out;
4560         }
4561
4562         for (i = 0; i < nr_pages; i++)
4563                 map_page[i] = kmap_atomic(pages[i]);
4564
4565         local_save_flags(irq_flags);
4566         size = sizeof(*entry) + cnt + 2; /* possible \n added */
4567         buffer = tr->trace_buffer.buffer;
4568         event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
4569                                           irq_flags, preempt_count());
4570         if (!event) {
4571                 /* Ring buffer disabled, return as if not open for write */
4572                 written = -EBADF;
4573                 goto out_unlock;
4574         }
4575
4576         entry = ring_buffer_event_data(event);
4577         entry->ip = _THIS_IP_;
4578
4579         if (nr_pages == 2) {
4580                 len = PAGE_SIZE - offset;
4581                 memcpy(&entry->buf, map_page[0] + offset, len);
4582                 memcpy(&entry->buf[len], map_page[1], cnt - len);
4583         } else
4584                 memcpy(&entry->buf, map_page[0] + offset, cnt);
4585
4586         if (entry->buf[cnt - 1] != '\n') {
4587                 entry->buf[cnt] = '\n';
4588                 entry->buf[cnt + 1] = '\0';
4589         } else
4590                 entry->buf[cnt] = '\0';
4591
4592         __buffer_unlock_commit(buffer, event);
4593
4594         written = cnt;
4595
4596         *fpos += written;
4597
4598  out_unlock:
4599         for (i = 0; i < nr_pages; i++){
4600                 kunmap_atomic(map_page[i]);
4601                 put_page(pages[i]);
4602         }
4603  out:
4604         return written;
4605 }
4606
4607 static int tracing_clock_show(struct seq_file *m, void *v)
4608 {
4609         struct trace_array *tr = m->private;
4610         int i;
4611
4612         for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
4613                 seq_printf(m,
4614                         "%s%s%s%s", i ? " " : "",
4615                         i == tr->clock_id ? "[" : "", trace_clocks[i].name,
4616                         i == tr->clock_id ? "]" : "");
4617         seq_putc(m, '\n');
4618
4619         return 0;
4620 }
4621
4622 static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
4623                                    size_t cnt, loff_t *fpos)
4624 {
4625         struct seq_file *m = filp->private_data;
4626         struct trace_array *tr = m->private;
4627         char buf[64];
4628         const char *clockstr;
4629         int i;
4630
4631         if (cnt >= sizeof(buf))
4632                 return -EINVAL;
4633
4634         if (copy_from_user(&buf, ubuf, cnt))
4635                 return -EFAULT;
4636
4637         buf[cnt] = 0;
4638
4639         clockstr = strstrip(buf);
4640
4641         for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
4642                 if (strcmp(trace_clocks[i].name, clockstr) == 0)
4643                         break;
4644         }
4645         if (i == ARRAY_SIZE(trace_clocks))
4646                 return -EINVAL;
4647
4648         mutex_lock(&trace_types_lock);
4649
4650         tr->clock_id = i;
4651
4652         ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
4653
4654         /*
4655          * New clock may not be consistent with the previous clock.
4656          * Reset the buffer so that it doesn't have incomparable timestamps.
4657          */
4658         tracing_reset_online_cpus(&global_trace.trace_buffer);
4659
4660 #ifdef CONFIG_TRACER_MAX_TRACE
4661         if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
4662                 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
4663         tracing_reset_online_cpus(&global_trace.max_buffer);
4664 #endif
4665
4666         mutex_unlock(&trace_types_lock);
4667
4668         *fpos += cnt;
4669
4670         return cnt;
4671 }
4672
4673 static int tracing_clock_open(struct inode *inode, struct file *file)
4674 {
4675         struct trace_array *tr = inode->i_private;
4676         int ret;
4677
4678         if (tracing_disabled)
4679                 return -ENODEV;
4680
4681         if (trace_array_get(tr))
4682                 return -ENODEV;
4683
4684         ret = single_open(file, tracing_clock_show, inode->i_private);
4685         if (ret < 0)
4686                 trace_array_put(tr);
4687
4688         return ret;
4689 }
4690
4691 struct ftrace_buffer_info {
4692         struct trace_iterator   iter;
4693         void                    *spare;
4694         unsigned int            read;
4695 };
4696
4697 #ifdef CONFIG_TRACER_SNAPSHOT
4698 static int tracing_snapshot_open(struct inode *inode, struct file *file)
4699 {
4700         struct trace_cpu *tc = inode->i_private;
4701         struct trace_array *tr = tc->tr;
4702         struct trace_iterator *iter;
4703         struct seq_file *m;
4704         int ret = 0;
4705
4706         if (trace_array_get(tr) < 0)
4707                 return -ENODEV;
4708
4709         if (file->f_mode & FMODE_READ) {
4710                 iter = __tracing_open(tr, tc, inode, file, true);
4711                 if (IS_ERR(iter))
4712                         ret = PTR_ERR(iter);
4713         } else {
4714                 /* Writes still need the seq_file to hold the private data */
4715                 ret = -ENOMEM;
4716                 m = kzalloc(sizeof(*m), GFP_KERNEL);
4717                 if (!m)
4718                         goto out;
4719                 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
4720                 if (!iter) {
4721                         kfree(m);
4722                         goto out;
4723                 }
4724                 ret = 0;
4725
4726                 iter->tr = tr;
4727                 iter->trace_buffer = &tc->tr->max_buffer;
4728                 iter->cpu_file = tc->cpu;
4729                 m->private = iter;
4730                 file->private_data = m;
4731         }
4732 out:
4733         if (ret < 0)
4734                 trace_array_put(tr);
4735
4736         return ret;
4737 }
4738
4739 static ssize_t
4740 tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
4741                        loff_t *ppos)
4742 {
4743         struct seq_file *m = filp->private_data;
4744         struct trace_iterator *iter = m->private;
4745         struct trace_array *tr = iter->tr;
4746         unsigned long val;
4747         int ret;
4748
4749         ret = tracing_update_buffers();
4750         if (ret < 0)
4751                 return ret;
4752
4753         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4754         if (ret)
4755                 return ret;
4756
4757         mutex_lock(&trace_types_lock);
4758
4759         if (tr->current_trace->use_max_tr) {
4760                 ret = -EBUSY;
4761                 goto out;
4762         }
4763
4764         switch (val) {
4765         case 0:
4766                 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
4767                         ret = -EINVAL;
4768                         break;
4769                 }
4770                 if (tr->allocated_snapshot)
4771                         free_snapshot(tr);
4772                 break;
4773         case 1:
4774 /* Only allow per-cpu swap if the ring buffer supports it */
4775 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
4776                 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
4777                         ret = -EINVAL;
4778                         break;
4779                 }
4780 #endif
4781                 if (!tr->allocated_snapshot) {
4782                         ret = alloc_snapshot(tr);
4783                         if (ret < 0)
4784                                 break;
4785                 }
4786                 local_irq_disable();
4787                 /* Now, we're going to swap */
4788                 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
4789                         update_max_tr(tr, current, smp_processor_id());
4790                 else
4791                         update_max_tr_single(tr, current, iter->cpu_file);
4792                 local_irq_enable();
4793                 break;
4794         default:
4795                 if (tr->allocated_snapshot) {
4796                         if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
4797                                 tracing_reset_online_cpus(&tr->max_buffer);
4798                         else
4799                                 tracing_reset(&tr->max_buffer, iter->cpu_file);
4800                 }
4801                 break;
4802         }
4803
4804         if (ret >= 0) {
4805                 *ppos += cnt;
4806                 ret = cnt;
4807         }
4808 out:
4809         mutex_unlock(&trace_types_lock);
4810         return ret;
4811 }
4812
4813 static int tracing_snapshot_release(struct inode *inode, struct file *file)
4814 {
4815         struct seq_file *m = file->private_data;
4816         int ret;
4817
4818         ret = tracing_release(inode, file);
4819
4820         if (file->f_mode & FMODE_READ)
4821                 return ret;
4822
4823         /* If write only, the seq_file is just a stub */
4824         if (m)
4825                 kfree(m->private);
4826         kfree(m);
4827
4828         return 0;
4829 }
4830
4831 static int tracing_buffers_open(struct inode *inode, struct file *filp);
4832 static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
4833                                     size_t count, loff_t *ppos);
4834 static int tracing_buffers_release(struct inode *inode, struct file *file);
4835 static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
4836                    struct pipe_inode_info *pipe, size_t len, unsigned int flags);
4837
4838 static int snapshot_raw_open(struct inode *inode, struct file *filp)
4839 {
4840         struct ftrace_buffer_info *info;
4841         int ret;
4842
4843         ret = tracing_buffers_open(inode, filp);
4844         if (ret < 0)
4845                 return ret;
4846
4847         info = filp->private_data;
4848
4849         if (info->iter.trace->use_max_tr) {
4850                 tracing_buffers_release(inode, filp);
4851                 return -EBUSY;
4852         }
4853
4854         info->iter.snapshot = true;
4855         info->iter.trace_buffer = &info->iter.tr->max_buffer;
4856
4857         return ret;
4858 }
4859
4860 #endif /* CONFIG_TRACER_SNAPSHOT */
4861
4862
4863 static const struct file_operations tracing_max_lat_fops = {
4864         .open           = tracing_open_generic,
4865         .read           = tracing_max_lat_read,
4866         .write          = tracing_max_lat_write,
4867         .llseek         = generic_file_llseek,
4868 };
4869
4870 static const struct file_operations set_tracer_fops = {
4871         .open           = tracing_open_generic,
4872         .read           = tracing_set_trace_read,
4873         .write          = tracing_set_trace_write,
4874         .llseek         = generic_file_llseek,
4875 };
4876
4877 static const struct file_operations tracing_pipe_fops = {
4878         .open           = tracing_open_pipe,
4879         .poll           = tracing_poll_pipe,
4880         .read           = tracing_read_pipe,
4881         .splice_read    = tracing_splice_read_pipe,
4882         .release        = tracing_release_pipe,
4883         .llseek         = no_llseek,
4884 };
4885
4886 static const struct file_operations tracing_entries_fops = {
4887         .open           = tracing_open_generic_tc,
4888         .read           = tracing_entries_read,
4889         .write          = tracing_entries_write,
4890         .llseek         = generic_file_llseek,
4891         .release        = tracing_release_generic_tc,
4892 };
4893
4894 static const struct file_operations tracing_total_entries_fops = {
4895         .open           = tracing_open_generic_tr,
4896         .read           = tracing_total_entries_read,
4897         .llseek         = generic_file_llseek,
4898         .release        = tracing_release_generic_tr,
4899 };
4900
4901 static const struct file_operations tracing_free_buffer_fops = {
4902         .open           = tracing_open_generic_tr,
4903         .write          = tracing_free_buffer_write,
4904         .release        = tracing_free_buffer_release,
4905 };
4906
4907 static const struct file_operations tracing_mark_fops = {
4908         .open           = tracing_open_generic_tr,
4909         .write          = tracing_mark_write,
4910         .llseek         = generic_file_llseek,
4911         .release        = tracing_release_generic_tr,
4912 };
4913
4914 static const struct file_operations trace_clock_fops = {
4915         .open           = tracing_clock_open,
4916         .read           = seq_read,
4917         .llseek         = seq_lseek,
4918         .release        = tracing_single_release_tr,
4919         .write          = tracing_clock_write,
4920 };
4921
4922 #ifdef CONFIG_TRACER_SNAPSHOT
4923 static const struct file_operations snapshot_fops = {
4924         .open           = tracing_snapshot_open,
4925         .read           = seq_read,
4926         .write          = tracing_snapshot_write,
4927         .llseek         = tracing_seek,
4928         .release        = tracing_snapshot_release,
4929 };
4930
4931 static const struct file_operations snapshot_raw_fops = {
4932         .open           = snapshot_raw_open,
4933         .read           = tracing_buffers_read,
4934         .release        = tracing_buffers_release,
4935         .splice_read    = tracing_buffers_splice_read,
4936         .llseek         = no_llseek,
4937 };
4938
4939 #endif /* CONFIG_TRACER_SNAPSHOT */
4940
4941 static int tracing_buffers_open(struct inode *inode, struct file *filp)
4942 {
4943         struct trace_cpu *tc = inode->i_private;
4944         struct trace_array *tr = tc->tr;
4945         struct ftrace_buffer_info *info;
4946         int ret;
4947
4948         if (tracing_disabled)
4949                 return -ENODEV;
4950
4951         if (trace_array_get(tr) < 0)
4952                 return -ENODEV;
4953
4954         info = kzalloc(sizeof(*info), GFP_KERNEL);
4955         if (!info) {
4956                 trace_array_put(tr);
4957                 return -ENOMEM;
4958         }
4959
4960         mutex_lock(&trace_types_lock);
4961
4962         info->iter.tr           = tr;
4963         info->iter.cpu_file     = tc->cpu;
4964         info->iter.trace        = tr->current_trace;
4965         info->iter.trace_buffer = &tr->trace_buffer;
4966         info->spare             = NULL;
4967         /* Force reading ring buffer for first read */
4968         info->read              = (unsigned int)-1;
4969
4970         filp->private_data = info;
4971
4972         mutex_unlock(&trace_types_lock);
4973
4974         ret = nonseekable_open(inode, filp);
4975         if (ret < 0)
4976                 trace_array_put(tr);
4977
4978         return ret;
4979 }
4980
4981 static unsigned int
4982 tracing_buffers_poll(struct file *filp, poll_table *poll_table)
4983 {
4984         struct ftrace_buffer_info *info = filp->private_data;
4985         struct trace_iterator *iter = &info->iter;
4986
4987         return trace_poll(iter, filp, poll_table);
4988 }
4989
4990 static ssize_t
4991 tracing_buffers_read(struct file *filp, char __user *ubuf,
4992                      size_t count, loff_t *ppos)
4993 {
4994         struct ftrace_buffer_info *info = filp->private_data;
4995         struct trace_iterator *iter = &info->iter;
4996         ssize_t ret;
4997         ssize_t size;
4998
4999         if (!count)
5000                 return 0;
5001
5002         mutex_lock(&trace_types_lock);
5003
5004 #ifdef CONFIG_TRACER_MAX_TRACE
5005         if (iter->snapshot && iter->tr->current_trace->use_max_tr) {
5006                 size = -EBUSY;
5007                 goto out_unlock;
5008         }
5009 #endif
5010
5011         if (!info->spare)
5012                 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
5013                                                           iter->cpu_file);
5014         size = -ENOMEM;
5015         if (!info->spare)
5016                 goto out_unlock;
5017
5018         /* Do we have previous read data to read? */
5019         if (info->read < PAGE_SIZE)
5020                 goto read;
5021
5022  again:
5023         trace_access_lock(iter->cpu_file);
5024         ret = ring_buffer_read_page(iter->trace_buffer->buffer,
5025                                     &info->spare,
5026                                     count,
5027                                     iter->cpu_file, 0);
5028         trace_access_unlock(iter->cpu_file);
5029
5030         if (ret < 0) {
5031                 if (trace_empty(iter)) {
5032                         if ((filp->f_flags & O_NONBLOCK)) {
5033                                 size = -EAGAIN;
5034                                 goto out_unlock;
5035                         }
5036                         mutex_unlock(&trace_types_lock);
5037                         iter->trace->wait_pipe(iter);
5038                         mutex_lock(&trace_types_lock);
5039                         if (signal_pending(current)) {
5040                                 size = -EINTR;
5041                                 goto out_unlock;
5042                         }
5043                         goto again;
5044                 }
5045                 size = 0;
5046                 goto out_unlock;
5047         }
5048
5049         info->read = 0;
5050  read:
5051         size = PAGE_SIZE - info->read;
5052         if (size > count)
5053                 size = count;
5054
5055         ret = copy_to_user(ubuf, info->spare + info->read, size);
5056         if (ret == size) {
5057                 size = -EFAULT;
5058                 goto out_unlock;
5059         }
5060         size -= ret;
5061
5062         *ppos += size;
5063         info->read += size;
5064
5065  out_unlock:
5066         mutex_unlock(&trace_types_lock);
5067
5068         return size;
5069 }
5070
5071 static int tracing_buffers_release(struct inode *inode, struct file *file)
5072 {
5073         struct ftrace_buffer_info *info = file->private_data;
5074         struct trace_iterator *iter = &info->iter;
5075
5076         mutex_lock(&trace_types_lock);
5077
5078         __trace_array_put(iter->tr);
5079
5080         if (info->spare)
5081                 ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare);
5082         kfree(info);
5083
5084         mutex_unlock(&trace_types_lock);
5085
5086         return 0;
5087 }
5088
5089 struct buffer_ref {
5090         struct ring_buffer      *buffer;
5091         void                    *page;
5092         int                     ref;
5093 };
5094
5095 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
5096                                     struct pipe_buffer *buf)
5097 {
5098         struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5099
5100         if (--ref->ref)
5101                 return;
5102
5103         ring_buffer_free_read_page(ref->buffer, ref->page);
5104         kfree(ref);
5105         buf->private = 0;
5106 }
5107
5108 static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
5109                                 struct pipe_buffer *buf)
5110 {
5111         struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5112
5113         ref->ref++;
5114 }
5115
5116 /* Pipe buffer operations for a buffer. */
5117 static const struct pipe_buf_operations buffer_pipe_buf_ops = {
5118         .can_merge              = 0,
5119         .map                    = generic_pipe_buf_map,
5120         .unmap                  = generic_pipe_buf_unmap,
5121         .confirm                = generic_pipe_buf_confirm,
5122         .release                = buffer_pipe_buf_release,
5123         .steal                  = generic_pipe_buf_steal,
5124         .get                    = buffer_pipe_buf_get,
5125 };
5126
5127 /*
5128  * Callback from splice_to_pipe(), if we need to release some pages
5129  * at the end of the spd in case we error'ed out in filling the pipe.
5130  */
5131 static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
5132 {
5133         struct buffer_ref *ref =
5134                 (struct buffer_ref *)spd->partial[i].private;
5135
5136         if (--ref->ref)
5137                 return;
5138
5139         ring_buffer_free_read_page(ref->buffer, ref->page);
5140         kfree(ref);
5141         spd->partial[i].private = 0;
5142 }
5143
5144 static ssize_t
5145 tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5146                             struct pipe_inode_info *pipe, size_t len,
5147                             unsigned int flags)
5148 {
5149         struct ftrace_buffer_info *info = file->private_data;
5150         struct trace_iterator *iter = &info->iter;
5151         struct partial_page partial_def[PIPE_DEF_BUFFERS];
5152         struct page *pages_def[PIPE_DEF_BUFFERS];
5153         struct splice_pipe_desc spd = {
5154                 .pages          = pages_def,
5155                 .partial        = partial_def,
5156                 .nr_pages_max   = PIPE_DEF_BUFFERS,
5157                 .flags          = flags,
5158                 .ops            = &buffer_pipe_buf_ops,
5159                 .spd_release    = buffer_spd_release,
5160         };
5161         struct buffer_ref *ref;
5162         int entries, size, i;
5163         ssize_t ret;
5164
5165         mutex_lock(&trace_types_lock);
5166
5167 #ifdef CONFIG_TRACER_MAX_TRACE
5168         if (iter->snapshot && iter->tr->current_trace->use_max_tr) {
5169                 ret = -EBUSY;
5170                 goto out;
5171         }
5172 #endif
5173
5174         if (splice_grow_spd(pipe, &spd)) {
5175                 ret = -ENOMEM;
5176                 goto out;
5177         }
5178
5179         if (*ppos & (PAGE_SIZE - 1)) {
5180                 ret = -EINVAL;
5181                 goto out;
5182         }
5183
5184         if (len & (PAGE_SIZE - 1)) {
5185                 if (len < PAGE_SIZE) {
5186                         ret = -EINVAL;
5187                         goto out;
5188                 }
5189                 len &= PAGE_MASK;
5190         }
5191
5192  again:
5193         trace_access_lock(iter->cpu_file);
5194         entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
5195
5196         for (i = 0; i < pipe->buffers && len && entries; i++, len -= PAGE_SIZE) {
5197                 struct page *page;
5198                 int r;
5199
5200                 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
5201                 if (!ref)
5202                         break;
5203
5204                 ref->ref = 1;
5205                 ref->buffer = iter->trace_buffer->buffer;
5206                 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
5207                 if (!ref->page) {
5208                         kfree(ref);
5209                         break;
5210                 }
5211
5212                 r = ring_buffer_read_page(ref->buffer, &ref->page,
5213                                           len, iter->cpu_file, 1);
5214                 if (r < 0) {
5215                         ring_buffer_free_read_page(ref->buffer, ref->page);
5216                         kfree(ref);
5217                         break;
5218                 }
5219
5220                 /*
5221                  * zero out any left over data, this is going to
5222                  * user land.
5223                  */
5224                 size = ring_buffer_page_len(ref->page);
5225                 if (size < PAGE_SIZE)
5226                         memset(ref->page + size, 0, PAGE_SIZE - size);
5227
5228                 page = virt_to_page(ref->page);
5229
5230                 spd.pages[i] = page;
5231                 spd.partial[i].len = PAGE_SIZE;
5232                 spd.partial[i].offset = 0;
5233                 spd.partial[i].private = (unsigned long)ref;
5234                 spd.nr_pages++;
5235                 *ppos += PAGE_SIZE;
5236
5237                 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
5238         }
5239
5240         trace_access_unlock(iter->cpu_file);
5241         spd.nr_pages = i;
5242
5243         /* did we read anything? */
5244         if (!spd.nr_pages) {
5245                 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK)) {
5246                         ret = -EAGAIN;
5247                         goto out;
5248                 }
5249                 mutex_unlock(&trace_types_lock);
5250                 iter->trace->wait_pipe(iter);
5251                 mutex_lock(&trace_types_lock);
5252                 if (signal_pending(current)) {
5253                         ret = -EINTR;
5254                         goto out;
5255                 }
5256                 goto again;
5257         }
5258
5259         ret = splice_to_pipe(pipe, &spd);
5260         splice_shrink_spd(&spd);
5261 out:
5262         mutex_unlock(&trace_types_lock);
5263
5264         return ret;
5265 }
5266
5267 static const struct file_operations tracing_buffers_fops = {
5268         .open           = tracing_buffers_open,
5269         .read           = tracing_buffers_read,
5270         .poll           = tracing_buffers_poll,
5271         .release        = tracing_buffers_release,
5272         .splice_read    = tracing_buffers_splice_read,
5273         .llseek         = no_llseek,
5274 };
5275
5276 static ssize_t
5277 tracing_stats_read(struct file *filp, char __user *ubuf,
5278                    size_t count, loff_t *ppos)
5279 {
5280         struct trace_cpu *tc = filp->private_data;
5281         struct trace_array *tr = tc->tr;
5282         struct trace_buffer *trace_buf = &tr->trace_buffer;
5283         struct trace_seq *s;
5284         unsigned long cnt;
5285         unsigned long long t;
5286         unsigned long usec_rem;
5287         int cpu = tc->cpu;
5288
5289         s = kmalloc(sizeof(*s), GFP_KERNEL);
5290         if (!s)
5291                 return -ENOMEM;
5292
5293         trace_seq_init(s);
5294
5295         cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
5296         trace_seq_printf(s, "entries: %ld\n", cnt);
5297
5298         cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
5299         trace_seq_printf(s, "overrun: %ld\n", cnt);
5300
5301         cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
5302         trace_seq_printf(s, "commit overrun: %ld\n", cnt);
5303
5304         cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
5305         trace_seq_printf(s, "bytes: %ld\n", cnt);
5306
5307         if (trace_clocks[tr->clock_id].in_ns) {
5308                 /* local or global for trace_clock */
5309                 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
5310                 usec_rem = do_div(t, USEC_PER_SEC);
5311                 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
5312                                                                 t, usec_rem);
5313
5314                 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
5315                 usec_rem = do_div(t, USEC_PER_SEC);
5316                 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
5317         } else {
5318                 /* counter or tsc mode for trace_clock */
5319                 trace_seq_printf(s, "oldest event ts: %llu\n",
5320                                 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
5321
5322                 trace_seq_printf(s, "now ts: %llu\n",
5323                                 ring_buffer_time_stamp(trace_buf->buffer, cpu));
5324         }
5325
5326         cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
5327         trace_seq_printf(s, "dropped events: %ld\n", cnt);
5328
5329         cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
5330         trace_seq_printf(s, "read events: %ld\n", cnt);
5331
5332         count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len);
5333
5334         kfree(s);
5335
5336         return count;
5337 }
5338
5339 static const struct file_operations tracing_stats_fops = {
5340         .open           = tracing_open_generic_tc,
5341         .read           = tracing_stats_read,
5342         .llseek         = generic_file_llseek,
5343         .release        = tracing_release_generic_tc,
5344 };
5345
5346 #ifdef CONFIG_DYNAMIC_FTRACE
5347
5348 int __weak ftrace_arch_read_dyn_info(char *buf, int size)
5349 {
5350         return 0;
5351 }
5352
5353 static ssize_t
5354 tracing_read_dyn_info(struct file *filp, char __user *ubuf,
5355                   size_t cnt, loff_t *ppos)
5356 {
5357         static char ftrace_dyn_info_buffer[1024];
5358         static DEFINE_MUTEX(dyn_info_mutex);
5359         unsigned long *p = filp->private_data;
5360         char *buf = ftrace_dyn_info_buffer;
5361         int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
5362         int r;
5363
5364         mutex_lock(&dyn_info_mutex);
5365         r = sprintf(buf, "%ld ", *p);
5366
5367         r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
5368         buf[r++] = '\n';
5369
5370         r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5371
5372         mutex_unlock(&dyn_info_mutex);
5373
5374         return r;
5375 }
5376
5377 static const struct file_operations tracing_dyn_info_fops = {
5378         .open           = tracing_open_generic,
5379         .read           = tracing_read_dyn_info,
5380         .llseek         = generic_file_llseek,
5381 };
5382 #endif /* CONFIG_DYNAMIC_FTRACE */
5383
5384 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
5385 static void
5386 ftrace_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
5387 {
5388         tracing_snapshot();
5389 }
5390
5391 static void
5392 ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
5393 {
5394         unsigned long *count = (long *)data;
5395
5396         if (!*count)
5397                 return;
5398
5399         if (*count != -1)
5400                 (*count)--;
5401
5402         tracing_snapshot();
5403 }
5404
5405 static int
5406 ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
5407                       struct ftrace_probe_ops *ops, void *data)
5408 {
5409         long count = (long)data;
5410
5411         seq_printf(m, "%ps:", (void *)ip);
5412
5413         seq_printf(m, "snapshot");
5414
5415         if (count == -1)
5416                 seq_printf(m, ":unlimited\n");
5417         else
5418                 seq_printf(m, ":count=%ld\n", count);
5419
5420         return 0;
5421 }
5422
5423 static struct ftrace_probe_ops snapshot_probe_ops = {
5424         .func                   = ftrace_snapshot,
5425         .print                  = ftrace_snapshot_print,
5426 };
5427
5428 static struct ftrace_probe_ops snapshot_count_probe_ops = {
5429         .func                   = ftrace_count_snapshot,
5430         .print                  = ftrace_snapshot_print,
5431 };
5432
5433 static int
5434 ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
5435                                char *glob, char *cmd, char *param, int enable)
5436 {
5437         struct ftrace_probe_ops *ops;
5438         void *count = (void *)-1;
5439         char *number;
5440         int ret;
5441
5442         /* hash funcs only work with set_ftrace_filter */
5443         if (!enable)
5444                 return -EINVAL;
5445
5446         ops = param ? &snapshot_count_probe_ops :  &snapshot_probe_ops;
5447
5448         if (glob[0] == '!') {
5449                 unregister_ftrace_function_probe_func(glob+1, ops);
5450                 return 0;
5451         }
5452
5453         if (!param)
5454                 goto out_reg;
5455
5456         number = strsep(&param, ":");
5457
5458         if (!strlen(number))
5459                 goto out_reg;
5460
5461         /*
5462          * We use the callback data field (which is a pointer)
5463          * as our counter.
5464          */
5465         ret = kstrtoul(number, 0, (unsigned long *)&count);
5466         if (ret)
5467                 return ret;
5468
5469  out_reg:
5470         ret = register_ftrace_function_probe(glob, ops, count);
5471
5472         if (ret >= 0)
5473                 alloc_snapshot(&global_trace);
5474
5475         return ret < 0 ? ret : 0;
5476 }
5477
5478 static struct ftrace_func_command ftrace_snapshot_cmd = {
5479         .name                   = "snapshot",
5480         .func                   = ftrace_trace_snapshot_callback,
5481 };
5482
5483 static int register_snapshot_cmd(void)
5484 {
5485         return register_ftrace_command(&ftrace_snapshot_cmd);
5486 }
5487 #else
5488 static inline int register_snapshot_cmd(void) { return 0; }
5489 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
5490
5491 struct dentry *tracing_init_dentry_tr(struct trace_array *tr)
5492 {
5493         if (tr->dir)
5494                 return tr->dir;
5495
5496         if (!debugfs_initialized())
5497                 return NULL;
5498
5499         if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
5500                 tr->dir = debugfs_create_dir("tracing", NULL);
5501
5502         if (!tr->dir)
5503                 pr_warn_once("Could not create debugfs directory 'tracing'\n");
5504
5505         return tr->dir;
5506 }
5507
5508 struct dentry *tracing_init_dentry(void)
5509 {
5510         return tracing_init_dentry_tr(&global_trace);
5511 }
5512
5513 static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
5514 {
5515         struct dentry *d_tracer;
5516
5517         if (tr->percpu_dir)
5518                 return tr->percpu_dir;
5519
5520         d_tracer = tracing_init_dentry_tr(tr);
5521         if (!d_tracer)
5522                 return NULL;
5523
5524         tr->percpu_dir = debugfs_create_dir("per_cpu", d_tracer);
5525
5526         WARN_ONCE(!tr->percpu_dir,
5527                   "Could not create debugfs directory 'per_cpu/%d'\n", cpu);
5528
5529         return tr->percpu_dir;
5530 }
5531
5532 static void
5533 tracing_init_debugfs_percpu(struct trace_array *tr, long cpu)
5534 {
5535         struct trace_array_cpu *data = per_cpu_ptr(tr->trace_buffer.data, cpu);
5536         struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
5537         struct dentry *d_cpu;
5538         char cpu_dir[30]; /* 30 characters should be more than enough */
5539
5540         if (!d_percpu)
5541                 return;
5542
5543         snprintf(cpu_dir, 30, "cpu%ld", cpu);
5544         d_cpu = debugfs_create_dir(cpu_dir, d_percpu);
5545         if (!d_cpu) {
5546                 pr_warning("Could not create debugfs '%s' entry\n", cpu_dir);
5547                 return;
5548         }
5549
5550         /* per cpu trace_pipe */
5551         trace_create_file("trace_pipe", 0444, d_cpu,
5552                         (void *)&data->trace_cpu, &tracing_pipe_fops);
5553
5554         /* per cpu trace */
5555         trace_create_file("trace", 0644, d_cpu,
5556                         (void *)&data->trace_cpu, &tracing_fops);
5557
5558         trace_create_file("trace_pipe_raw", 0444, d_cpu,
5559                         (void *)&data->trace_cpu, &tracing_buffers_fops);
5560
5561         trace_create_file("stats", 0444, d_cpu,
5562                         (void *)&data->trace_cpu, &tracing_stats_fops);
5563
5564         trace_create_file("buffer_size_kb", 0444, d_cpu,
5565                         (void *)&data->trace_cpu, &tracing_entries_fops);
5566
5567 #ifdef CONFIG_TRACER_SNAPSHOT
5568         trace_create_file("snapshot", 0644, d_cpu,
5569                           (void *)&data->trace_cpu, &snapshot_fops);
5570
5571         trace_create_file("snapshot_raw", 0444, d_cpu,
5572                         (void *)&data->trace_cpu, &snapshot_raw_fops);
5573 #endif
5574 }
5575
5576 #ifdef CONFIG_FTRACE_SELFTEST
5577 /* Let selftest have access to static functions in this file */
5578 #include "trace_selftest.c"
5579 #endif
5580
5581 struct trace_option_dentry {
5582         struct tracer_opt               *opt;
5583         struct tracer_flags             *flags;
5584         struct trace_array              *tr;
5585         struct dentry                   *entry;
5586 };
5587
5588 static ssize_t
5589 trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
5590                         loff_t *ppos)
5591 {
5592         struct trace_option_dentry *topt = filp->private_data;
5593         char *buf;
5594
5595         if (topt->flags->val & topt->opt->bit)
5596                 buf = "1\n";
5597         else
5598                 buf = "0\n";
5599
5600         return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
5601 }
5602
5603 static ssize_t
5604 trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
5605                          loff_t *ppos)
5606 {
5607         struct trace_option_dentry *topt = filp->private_data;
5608         unsigned long val;
5609         int ret;
5610
5611         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5612         if (ret)
5613                 return ret;
5614
5615         if (val != 0 && val != 1)
5616                 return -EINVAL;
5617
5618         if (!!(topt->flags->val & topt->opt->bit) != val) {
5619                 mutex_lock(&trace_types_lock);
5620                 ret = __set_tracer_option(topt->tr->current_trace, topt->flags,
5621                                           topt->opt, !val);
5622                 mutex_unlock(&trace_types_lock);
5623                 if (ret)
5624                         return ret;
5625         }
5626
5627         *ppos += cnt;
5628
5629         return cnt;
5630 }
5631
5632
5633 static const struct file_operations trace_options_fops = {
5634         .open = tracing_open_generic,
5635         .read = trace_options_read,
5636         .write = trace_options_write,
5637         .llseek = generic_file_llseek,
5638 };
5639
5640 static ssize_t
5641 trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
5642                         loff_t *ppos)
5643 {
5644         long index = (long)filp->private_data;
5645         char *buf;
5646
5647         if (trace_flags & (1 << index))
5648                 buf = "1\n";
5649         else
5650                 buf = "0\n";
5651
5652         return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
5653 }
5654
5655 static ssize_t
5656 trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
5657                          loff_t *ppos)
5658 {
5659         struct trace_array *tr = &global_trace;
5660         long index = (long)filp->private_data;
5661         unsigned long val;
5662         int ret;
5663
5664         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5665         if (ret)
5666                 return ret;
5667
5668         if (val != 0 && val != 1)
5669                 return -EINVAL;
5670
5671         mutex_lock(&trace_types_lock);
5672         ret = set_tracer_flag(tr, 1 << index, val);
5673         mutex_unlock(&trace_types_lock);
5674
5675         if (ret < 0)
5676                 return ret;
5677
5678         *ppos += cnt;
5679
5680         return cnt;
5681 }
5682
5683 static const struct file_operations trace_options_core_fops = {
5684         .open = tracing_open_generic,
5685         .read = trace_options_core_read,
5686         .write = trace_options_core_write,
5687         .llseek = generic_file_llseek,
5688 };
5689
5690 struct dentry *trace_create_file(const char *name,
5691                                  umode_t mode,
5692                                  struct dentry *parent,
5693                                  void *data,
5694                                  const struct file_operations *fops)
5695 {
5696         struct dentry *ret;
5697
5698         ret = debugfs_create_file(name, mode, parent, data, fops);
5699         if (!ret)
5700                 pr_warning("Could not create debugfs '%s' entry\n", name);
5701
5702         return ret;
5703 }
5704
5705
5706 static struct dentry *trace_options_init_dentry(struct trace_array *tr)
5707 {
5708         struct dentry *d_tracer;
5709
5710         if (tr->options)
5711                 return tr->options;
5712
5713         d_tracer = tracing_init_dentry_tr(tr);
5714         if (!d_tracer)
5715                 return NULL;
5716
5717         tr->options = debugfs_create_dir("options", d_tracer);
5718         if (!tr->options) {
5719                 pr_warning("Could not create debugfs directory 'options'\n");
5720                 return NULL;
5721         }
5722
5723         return tr->options;
5724 }
5725
5726 static void
5727 create_trace_option_file(struct trace_array *tr,
5728                          struct trace_option_dentry *topt,
5729                          struct tracer_flags *flags,
5730                          struct tracer_opt *opt)
5731 {
5732         struct dentry *t_options;
5733
5734         t_options = trace_options_init_dentry(tr);
5735         if (!t_options)
5736                 return;
5737
5738         topt->flags = flags;
5739         topt->opt = opt;
5740         topt->tr = tr;
5741
5742         topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
5743                                     &trace_options_fops);
5744
5745 }
5746
5747 static struct trace_option_dentry *
5748 create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
5749 {
5750         struct trace_option_dentry *topts;
5751         struct tracer_flags *flags;
5752         struct tracer_opt *opts;
5753         int cnt;
5754
5755         if (!tracer)
5756                 return NULL;
5757
5758         flags = tracer->flags;
5759
5760         if (!flags || !flags->opts)
5761                 return NULL;
5762
5763         opts = flags->opts;
5764
5765         for (cnt = 0; opts[cnt].name; cnt++)
5766                 ;
5767
5768         topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
5769         if (!topts)
5770                 return NULL;
5771
5772         for (cnt = 0; opts[cnt].name; cnt++)
5773                 create_trace_option_file(tr, &topts[cnt], flags,
5774                                          &opts[cnt]);
5775
5776         return topts;
5777 }
5778
5779 static void
5780 destroy_trace_option_files(struct trace_option_dentry *topts)
5781 {
5782         int cnt;
5783
5784         if (!topts)
5785                 return;
5786
5787         for (cnt = 0; topts[cnt].opt; cnt++) {
5788                 if (topts[cnt].entry)
5789                         debugfs_remove(topts[cnt].entry);
5790         }
5791
5792         kfree(topts);
5793 }
5794
5795 static struct dentry *
5796 create_trace_option_core_file(struct trace_array *tr,
5797                               const char *option, long index)
5798 {
5799         struct dentry *t_options;
5800
5801         t_options = trace_options_init_dentry(tr);
5802         if (!t_options)
5803                 return NULL;
5804
5805         return trace_create_file(option, 0644, t_options, (void *)index,
5806                                     &trace_options_core_fops);
5807 }
5808
5809 static __init void create_trace_options_dir(struct trace_array *tr)
5810 {
5811         struct dentry *t_options;
5812         int i;
5813
5814         t_options = trace_options_init_dentry(tr);
5815         if (!t_options)
5816                 return;
5817
5818         for (i = 0; trace_options[i]; i++)
5819                 create_trace_option_core_file(tr, trace_options[i], i);
5820 }
5821
5822 static ssize_t
5823 rb_simple_read(struct file *filp, char __user *ubuf,
5824                size_t cnt, loff_t *ppos)
5825 {
5826         struct trace_array *tr = filp->private_data;
5827         char buf[64];
5828         int r;
5829
5830         r = tracer_tracing_is_on(tr);
5831         r = sprintf(buf, "%d\n", r);
5832
5833         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5834 }
5835
5836 static ssize_t
5837 rb_simple_write(struct file *filp, const char __user *ubuf,
5838                 size_t cnt, loff_t *ppos)
5839 {
5840         struct trace_array *tr = filp->private_data;
5841         struct ring_buffer *buffer = tr->trace_buffer.buffer;
5842         unsigned long val;
5843         int ret;
5844
5845         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5846         if (ret)
5847                 return ret;
5848
5849         if (buffer) {
5850                 mutex_lock(&trace_types_lock);
5851                 if (val) {
5852                         tracer_tracing_on(tr);
5853                         if (tr->current_trace->start)
5854                                 tr->current_trace->start(tr);
5855                 } else {
5856                         tracer_tracing_off(tr);
5857                         if (tr->current_trace->stop)
5858                                 tr->current_trace->stop(tr);
5859                 }
5860                 mutex_unlock(&trace_types_lock);
5861         }
5862
5863         (*ppos)++;
5864
5865         return cnt;
5866 }
5867
5868 static const struct file_operations rb_simple_fops = {
5869         .open           = tracing_open_generic_tr,
5870         .read           = rb_simple_read,
5871         .write          = rb_simple_write,
5872         .release        = tracing_release_generic_tr,
5873         .llseek         = default_llseek,
5874 };
5875
5876 struct dentry *trace_instance_dir;
5877
5878 static void
5879 init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer);
5880
5881 static void init_trace_buffers(struct trace_array *tr, struct trace_buffer *buf)
5882 {
5883         int cpu;
5884
5885         for_each_tracing_cpu(cpu) {
5886                 memset(per_cpu_ptr(buf->data, cpu), 0, sizeof(struct trace_array_cpu));
5887                 per_cpu_ptr(buf->data, cpu)->trace_cpu.cpu = cpu;
5888                 per_cpu_ptr(buf->data, cpu)->trace_cpu.tr = tr;
5889         }
5890 }
5891
5892 static int
5893 allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
5894 {
5895         enum ring_buffer_flags rb_flags;
5896
5897         rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
5898
5899         buf->buffer = ring_buffer_alloc(size, rb_flags);
5900         if (!buf->buffer)
5901                 return -ENOMEM;
5902
5903         buf->data = alloc_percpu(struct trace_array_cpu);
5904         if (!buf->data) {
5905                 ring_buffer_free(buf->buffer);
5906                 return -ENOMEM;
5907         }
5908
5909         init_trace_buffers(tr, buf);
5910
5911         /* Allocate the first page for all buffers */
5912         set_buffer_entries(&tr->trace_buffer,
5913                            ring_buffer_size(tr->trace_buffer.buffer, 0));
5914
5915         return 0;
5916 }
5917
5918 static int allocate_trace_buffers(struct trace_array *tr, int size)
5919 {
5920         int ret;
5921
5922         ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
5923         if (ret)
5924                 return ret;
5925
5926 #ifdef CONFIG_TRACER_MAX_TRACE
5927         ret = allocate_trace_buffer(tr, &tr->max_buffer,
5928                                     allocate_snapshot ? size : 1);
5929         if (WARN_ON(ret)) {
5930                 ring_buffer_free(tr->trace_buffer.buffer);
5931                 free_percpu(tr->trace_buffer.data);
5932                 return -ENOMEM;
5933         }
5934         tr->allocated_snapshot = allocate_snapshot;
5935
5936         /*
5937          * Only the top level trace array gets its snapshot allocated
5938          * from the kernel command line.
5939          */
5940         allocate_snapshot = false;
5941 #endif
5942         return 0;
5943 }
5944
5945 static int new_instance_create(const char *name)
5946 {
5947         struct trace_array *tr;
5948         int ret;
5949
5950         mutex_lock(&trace_types_lock);
5951
5952         ret = -EEXIST;
5953         list_for_each_entry(tr, &ftrace_trace_arrays, list) {
5954                 if (tr->name && strcmp(tr->name, name) == 0)
5955                         goto out_unlock;
5956         }
5957
5958         ret = -ENOMEM;
5959         tr = kzalloc(sizeof(*tr), GFP_KERNEL);
5960         if (!tr)
5961                 goto out_unlock;
5962
5963         tr->name = kstrdup(name, GFP_KERNEL);
5964         if (!tr->name)
5965                 goto out_free_tr;
5966
5967         raw_spin_lock_init(&tr->start_lock);
5968
5969         tr->current_trace = &nop_trace;
5970
5971         INIT_LIST_HEAD(&tr->systems);
5972         INIT_LIST_HEAD(&tr->events);
5973
5974         if (allocate_trace_buffers(tr, trace_buf_size) < 0)
5975                 goto out_free_tr;
5976
5977         /* Holder for file callbacks */
5978         tr->trace_cpu.cpu = RING_BUFFER_ALL_CPUS;
5979         tr->trace_cpu.tr = tr;
5980
5981         tr->dir = debugfs_create_dir(name, trace_instance_dir);
5982         if (!tr->dir)
5983                 goto out_free_tr;
5984
5985         ret = event_trace_add_tracer(tr->dir, tr);
5986         if (ret) {
5987                 debugfs_remove_recursive(tr->dir);
5988                 goto out_free_tr;
5989         }
5990
5991         init_tracer_debugfs(tr, tr->dir);
5992
5993         list_add(&tr->list, &ftrace_trace_arrays);
5994
5995         mutex_unlock(&trace_types_lock);
5996
5997         return 0;
5998
5999  out_free_tr:
6000         if (tr->trace_buffer.buffer)
6001                 ring_buffer_free(tr->trace_buffer.buffer);
6002         kfree(tr->name);
6003         kfree(tr);
6004
6005  out_unlock:
6006         mutex_unlock(&trace_types_lock);
6007
6008         return ret;
6009
6010 }
6011
6012 static int instance_delete(const char *name)
6013 {
6014         struct trace_array *tr;
6015         int found = 0;
6016         int ret;
6017
6018         mutex_lock(&trace_types_lock);
6019
6020         ret = -ENODEV;
6021         list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6022                 if (tr->name && strcmp(tr->name, name) == 0) {
6023                         found = 1;
6024                         break;
6025                 }
6026         }
6027         if (!found)
6028                 goto out_unlock;
6029
6030         ret = -EBUSY;
6031         if (tr->ref)
6032                 goto out_unlock;
6033
6034         list_del(&tr->list);
6035
6036         event_trace_del_tracer(tr);
6037         debugfs_remove_recursive(tr->dir);
6038         free_percpu(tr->trace_buffer.data);
6039         ring_buffer_free(tr->trace_buffer.buffer);
6040
6041         kfree(tr->name);
6042         kfree(tr);
6043
6044         ret = 0;
6045
6046  out_unlock:
6047         mutex_unlock(&trace_types_lock);
6048
6049         return ret;
6050 }
6051
6052 static int instance_mkdir (struct inode *inode, struct dentry *dentry, umode_t mode)
6053 {
6054         struct dentry *parent;
6055         int ret;
6056
6057         /* Paranoid: Make sure the parent is the "instances" directory */
6058         parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
6059         if (WARN_ON_ONCE(parent != trace_instance_dir))
6060                 return -ENOENT;
6061
6062         /*
6063          * The inode mutex is locked, but debugfs_create_dir() will also
6064          * take the mutex. As the instances directory can not be destroyed
6065          * or changed in any other way, it is safe to unlock it, and
6066          * let the dentry try. If two users try to make the same dir at
6067          * the same time, then the new_instance_create() will determine the
6068          * winner.
6069          */
6070         mutex_unlock(&inode->i_mutex);
6071
6072         ret = new_instance_create(dentry->d_iname);
6073
6074         mutex_lock(&inode->i_mutex);
6075
6076         return ret;
6077 }
6078
6079 static int instance_rmdir(struct inode *inode, struct dentry *dentry)
6080 {
6081         struct dentry *parent;
6082         int ret;
6083
6084         /* Paranoid: Make sure the parent is the "instances" directory */
6085         parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
6086         if (WARN_ON_ONCE(parent != trace_instance_dir))
6087                 return -ENOENT;
6088
6089         /* The caller did a dget() on dentry */
6090         mutex_unlock(&dentry->d_inode->i_mutex);
6091
6092         /*
6093          * The inode mutex is locked, but debugfs_create_dir() will also
6094          * take the mutex. As the instances directory can not be destroyed
6095          * or changed in any other way, it is safe to unlock it, and
6096          * let the dentry try. If two users try to make the same dir at
6097          * the same time, then the instance_delete() will determine the
6098          * winner.
6099          */
6100         mutex_unlock(&inode->i_mutex);
6101
6102         ret = instance_delete(dentry->d_iname);
6103
6104         mutex_lock_nested(&inode->i_mutex, I_MUTEX_PARENT);
6105         mutex_lock(&dentry->d_inode->i_mutex);
6106
6107         return ret;
6108 }
6109
6110 static const struct inode_operations instance_dir_inode_operations = {
6111         .lookup         = simple_lookup,
6112         .mkdir          = instance_mkdir,
6113         .rmdir          = instance_rmdir,
6114 };
6115
6116 static __init void create_trace_instances(struct dentry *d_tracer)
6117 {
6118         trace_instance_dir = debugfs_create_dir("instances", d_tracer);
6119         if (WARN_ON(!trace_instance_dir))
6120                 return;
6121
6122         /* Hijack the dir inode operations, to allow mkdir */
6123         trace_instance_dir->d_inode->i_op = &instance_dir_inode_operations;
6124 }
6125
6126 static void
6127 init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer)
6128 {
6129         int cpu;
6130
6131         trace_create_file("trace_options", 0644, d_tracer,
6132                           tr, &tracing_iter_fops);
6133
6134         trace_create_file("trace", 0644, d_tracer,
6135                         (void *)&tr->trace_cpu, &tracing_fops);
6136
6137         trace_create_file("trace_pipe", 0444, d_tracer,
6138                         (void *)&tr->trace_cpu, &tracing_pipe_fops);
6139
6140         trace_create_file("buffer_size_kb", 0644, d_tracer,
6141                         (void *)&tr->trace_cpu, &tracing_entries_fops);
6142
6143         trace_create_file("buffer_total_size_kb", 0444, d_tracer,
6144                           tr, &tracing_total_entries_fops);
6145
6146         trace_create_file("free_buffer", 0200, d_tracer,
6147                           tr, &tracing_free_buffer_fops);
6148
6149         trace_create_file("trace_marker", 0220, d_tracer,
6150                           tr, &tracing_mark_fops);
6151
6152         trace_create_file("trace_clock", 0644, d_tracer, tr,
6153                           &trace_clock_fops);
6154
6155         trace_create_file("tracing_on", 0644, d_tracer,
6156                             tr, &rb_simple_fops);
6157
6158 #ifdef CONFIG_TRACER_SNAPSHOT
6159         trace_create_file("snapshot", 0644, d_tracer,
6160                           (void *)&tr->trace_cpu, &snapshot_fops);
6161 #endif
6162
6163         for_each_tracing_cpu(cpu)
6164                 tracing_init_debugfs_percpu(tr, cpu);
6165
6166 }
6167
6168 static __init int tracer_init_debugfs(void)
6169 {
6170         struct dentry *d_tracer;
6171
6172         trace_access_lock_init();
6173
6174         d_tracer = tracing_init_dentry();
6175         if (!d_tracer)
6176                 return 0;
6177
6178         init_tracer_debugfs(&global_trace, d_tracer);
6179
6180         trace_create_file("tracing_cpumask", 0644, d_tracer,
6181                         &global_trace, &tracing_cpumask_fops);
6182
6183         trace_create_file("available_tracers", 0444, d_tracer,
6184                         &global_trace, &show_traces_fops);
6185
6186         trace_create_file("current_tracer", 0644, d_tracer,
6187                         &global_trace, &set_tracer_fops);
6188
6189 #ifdef CONFIG_TRACER_MAX_TRACE
6190         trace_create_file("tracing_max_latency", 0644, d_tracer,
6191                         &tracing_max_latency, &tracing_max_lat_fops);
6192 #endif
6193
6194         trace_create_file("tracing_thresh", 0644, d_tracer,
6195                         &tracing_thresh, &tracing_max_lat_fops);
6196
6197         trace_create_file("README", 0444, d_tracer,
6198                         NULL, &tracing_readme_fops);
6199
6200         trace_create_file("saved_cmdlines", 0444, d_tracer,
6201                         NULL, &tracing_saved_cmdlines_fops);
6202
6203 #ifdef CONFIG_DYNAMIC_FTRACE
6204         trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
6205                         &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
6206 #endif
6207
6208         create_trace_instances(d_tracer);
6209
6210         create_trace_options_dir(&global_trace);
6211
6212         return 0;
6213 }
6214
6215 static int trace_panic_handler(struct notifier_block *this,
6216                                unsigned long event, void *unused)
6217 {
6218         if (ftrace_dump_on_oops)
6219                 ftrace_dump(ftrace_dump_on_oops);
6220         return NOTIFY_OK;
6221 }
6222
6223 static struct notifier_block trace_panic_notifier = {
6224         .notifier_call  = trace_panic_handler,
6225         .next           = NULL,
6226         .priority       = 150   /* priority: INT_MAX >= x >= 0 */
6227 };
6228
6229 static int trace_die_handler(struct notifier_block *self,
6230                              unsigned long val,
6231                              void *data)
6232 {
6233         switch (val) {
6234         case DIE_OOPS:
6235                 if (ftrace_dump_on_oops)
6236                         ftrace_dump(ftrace_dump_on_oops);
6237                 break;
6238         default:
6239                 break;
6240         }
6241         return NOTIFY_OK;
6242 }
6243
6244 static struct notifier_block trace_die_notifier = {
6245         .notifier_call = trace_die_handler,
6246         .priority = 200
6247 };
6248
6249 /*
6250  * printk is set to max of 1024, we really don't need it that big.
6251  * Nothing should be printing 1000 characters anyway.
6252  */
6253 #define TRACE_MAX_PRINT         1000
6254
6255 /*
6256  * Define here KERN_TRACE so that we have one place to modify
6257  * it if we decide to change what log level the ftrace dump
6258  * should be at.
6259  */
6260 #define KERN_TRACE              KERN_EMERG
6261
6262 void
6263 trace_printk_seq(struct trace_seq *s)
6264 {
6265         /* Probably should print a warning here. */
6266         if (s->len >= TRACE_MAX_PRINT)
6267                 s->len = TRACE_MAX_PRINT;
6268
6269         /* should be zero ended, but we are paranoid. */
6270         s->buffer[s->len] = 0;
6271
6272         printk(KERN_TRACE "%s", s->buffer);
6273
6274         trace_seq_init(s);
6275 }
6276
6277 void trace_init_global_iter(struct trace_iterator *iter)
6278 {
6279         iter->tr = &global_trace;
6280         iter->trace = iter->tr->current_trace;
6281         iter->cpu_file = RING_BUFFER_ALL_CPUS;
6282         iter->trace_buffer = &global_trace.trace_buffer;
6283 }
6284
6285 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
6286 {
6287         /* use static because iter can be a bit big for the stack */
6288         static struct trace_iterator iter;
6289         static atomic_t dump_running;
6290         unsigned int old_userobj;
6291         unsigned long flags;
6292         int cnt = 0, cpu;
6293
6294         /* Only allow one dump user at a time. */
6295         if (atomic_inc_return(&dump_running) != 1) {
6296                 atomic_dec(&dump_running);
6297                 return;
6298         }
6299
6300         /*
6301          * Always turn off tracing when we dump.
6302          * We don't need to show trace output of what happens
6303          * between multiple crashes.
6304          *
6305          * If the user does a sysrq-z, then they can re-enable
6306          * tracing with echo 1 > tracing_on.
6307          */
6308         tracing_off();
6309
6310         local_irq_save(flags);
6311
6312         /* Simulate the iterator */
6313         trace_init_global_iter(&iter);
6314
6315         for_each_tracing_cpu(cpu) {
6316                 atomic_inc(&per_cpu_ptr(iter.tr->trace_buffer.data, cpu)->disabled);
6317         }
6318
6319         old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ;
6320
6321         /* don't look at user memory in panic mode */
6322         trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
6323
6324         switch (oops_dump_mode) {
6325         case DUMP_ALL:
6326                 iter.cpu_file = RING_BUFFER_ALL_CPUS;
6327                 break;
6328         case DUMP_ORIG:
6329                 iter.cpu_file = raw_smp_processor_id();
6330                 break;
6331         case DUMP_NONE:
6332                 goto out_enable;
6333         default:
6334                 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
6335                 iter.cpu_file = RING_BUFFER_ALL_CPUS;
6336         }
6337
6338         printk(KERN_TRACE "Dumping ftrace buffer:\n");
6339
6340         /* Did function tracer already get disabled? */
6341         if (ftrace_is_dead()) {
6342                 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
6343                 printk("#          MAY BE MISSING FUNCTION EVENTS\n");
6344         }
6345
6346         /*
6347          * We need to stop all tracing on all CPUS to read the
6348          * the next buffer. This is a bit expensive, but is
6349          * not done often. We fill all what we can read,
6350          * and then release the locks again.
6351          */
6352
6353         while (!trace_empty(&iter)) {
6354
6355                 if (!cnt)
6356                         printk(KERN_TRACE "---------------------------------\n");
6357
6358                 cnt++;
6359
6360                 /* reset all but tr, trace, and overruns */
6361                 memset(&iter.seq, 0,
6362                        sizeof(struct trace_iterator) -
6363                        offsetof(struct trace_iterator, seq));
6364                 iter.iter_flags |= TRACE_FILE_LAT_FMT;
6365                 iter.pos = -1;
6366
6367                 if (trace_find_next_entry_inc(&iter) != NULL) {
6368                         int ret;
6369
6370                         ret = print_trace_line(&iter);
6371                         if (ret != TRACE_TYPE_NO_CONSUME)
6372                                 trace_consume(&iter);
6373                 }
6374                 touch_nmi_watchdog();
6375
6376                 trace_printk_seq(&iter.seq);
6377         }
6378
6379         if (!cnt)
6380                 printk(KERN_TRACE "   (ftrace buffer empty)\n");
6381         else
6382                 printk(KERN_TRACE "---------------------------------\n");
6383
6384  out_enable:
6385         trace_flags |= old_userobj;
6386
6387         for_each_tracing_cpu(cpu) {
6388                 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
6389         }
6390         atomic_dec(&dump_running);
6391         local_irq_restore(flags);
6392 }
6393 EXPORT_SYMBOL_GPL(ftrace_dump);
6394
6395 __init static int tracer_alloc_buffers(void)
6396 {
6397         int ring_buf_size;
6398         int ret = -ENOMEM;
6399
6400
6401         if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
6402                 goto out;
6403
6404         if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL))
6405                 goto out_free_buffer_mask;
6406
6407         /* Only allocate trace_printk buffers if a trace_printk exists */
6408         if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
6409                 /* Must be called before global_trace.buffer is allocated */
6410                 trace_printk_init_buffers();
6411
6412         /* To save memory, keep the ring buffer size to its minimum */
6413         if (ring_buffer_expanded)
6414                 ring_buf_size = trace_buf_size;
6415         else
6416                 ring_buf_size = 1;
6417
6418         cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
6419         cpumask_copy(tracing_cpumask, cpu_all_mask);
6420
6421         raw_spin_lock_init(&global_trace.start_lock);
6422
6423         /* TODO: make the number of buffers hot pluggable with CPUS */
6424         if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
6425                 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
6426                 WARN_ON(1);
6427                 goto out_free_cpumask;
6428         }
6429
6430         if (global_trace.buffer_disabled)
6431                 tracing_off();
6432
6433         trace_init_cmdlines();
6434
6435         /*
6436          * register_tracer() might reference current_trace, so it
6437          * needs to be set before we register anything. This is
6438          * just a bootstrap of current_trace anyway.
6439          */
6440         global_trace.current_trace = &nop_trace;
6441
6442         register_tracer(&nop_trace);
6443
6444         /* All seems OK, enable tracing */
6445         tracing_disabled = 0;
6446
6447         atomic_notifier_chain_register(&panic_notifier_list,
6448                                        &trace_panic_notifier);
6449
6450         register_die_notifier(&trace_die_notifier);
6451
6452         global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
6453
6454         /* Holder for file callbacks */
6455         global_trace.trace_cpu.cpu = RING_BUFFER_ALL_CPUS;
6456         global_trace.trace_cpu.tr = &global_trace;
6457
6458         INIT_LIST_HEAD(&global_trace.systems);
6459         INIT_LIST_HEAD(&global_trace.events);
6460         list_add(&global_trace.list, &ftrace_trace_arrays);
6461
6462         while (trace_boot_options) {
6463                 char *option;
6464
6465                 option = strsep(&trace_boot_options, ",");
6466                 trace_set_options(&global_trace, option);
6467         }
6468
6469         register_snapshot_cmd();
6470
6471         return 0;
6472
6473 out_free_cpumask:
6474         free_percpu(global_trace.trace_buffer.data);
6475 #ifdef CONFIG_TRACER_MAX_TRACE
6476         free_percpu(global_trace.max_buffer.data);
6477 #endif
6478         free_cpumask_var(tracing_cpumask);
6479 out_free_buffer_mask:
6480         free_cpumask_var(tracing_buffer_mask);
6481 out:
6482         return ret;
6483 }
6484
6485 __init static int clear_boot_tracer(void)
6486 {
6487         /*
6488          * The default tracer at boot buffer is an init section.
6489          * This function is called in lateinit. If we did not
6490          * find the boot tracer, then clear it out, to prevent
6491          * later registration from accessing the buffer that is
6492          * about to be freed.
6493          */
6494         if (!default_bootup_tracer)
6495                 return 0;
6496
6497         printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
6498                default_bootup_tracer);
6499         default_bootup_tracer = NULL;
6500
6501         return 0;
6502 }
6503
6504 early_initcall(tracer_alloc_buffers);
6505 fs_initcall(tracer_init_debugfs);
6506 late_initcall(clear_boot_tracer);