]> rtime.felk.cvut.cz Git - linux-imx.git/blob - kernel/trace/ftrace.c
Merge tag 'drm-intel-fixes-2013-08-08' of git://people.freedesktop.org/~danvet/drm...
[linux-imx.git] / kernel / trace / ftrace.c
1 /*
2  * Infrastructure for profiling code inserted by 'gcc -pg'.
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Originally ported from the -rt patch by:
8  *   Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9  *
10  * Based on code in the latency_tracer, that is:
11  *
12  *  Copyright (C) 2004-2006 Ingo Molnar
13  *  Copyright (C) 2004 Nadia Yvette Chambers
14  */
15
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/suspend.h>
21 #include <linux/debugfs.h>
22 #include <linux/hardirq.h>
23 #include <linux/kthread.h>
24 #include <linux/uaccess.h>
25 #include <linux/bsearch.h>
26 #include <linux/module.h>
27 #include <linux/ftrace.h>
28 #include <linux/sysctl.h>
29 #include <linux/slab.h>
30 #include <linux/ctype.h>
31 #include <linux/sort.h>
32 #include <linux/list.h>
33 #include <linux/hash.h>
34 #include <linux/rcupdate.h>
35
36 #include <trace/events/sched.h>
37
38 #include <asm/setup.h>
39
40 #include "trace_output.h"
41 #include "trace_stat.h"
42
43 #define FTRACE_WARN_ON(cond)                    \
44         ({                                      \
45                 int ___r = cond;                \
46                 if (WARN_ON(___r))              \
47                         ftrace_kill();          \
48                 ___r;                           \
49         })
50
51 #define FTRACE_WARN_ON_ONCE(cond)               \
52         ({                                      \
53                 int ___r = cond;                \
54                 if (WARN_ON_ONCE(___r))         \
55                         ftrace_kill();          \
56                 ___r;                           \
57         })
58
59 /* hash bits for specific function selection */
60 #define FTRACE_HASH_BITS 7
61 #define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
62 #define FTRACE_HASH_DEFAULT_BITS 10
63 #define FTRACE_HASH_MAX_BITS 12
64
65 #define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_CONTROL)
66
67 #ifdef CONFIG_DYNAMIC_FTRACE
68 #define INIT_REGEX_LOCK(opsname)        \
69         .regex_lock     = __MUTEX_INITIALIZER(opsname.regex_lock),
70 #else
71 #define INIT_REGEX_LOCK(opsname)
72 #endif
73
74 static struct ftrace_ops ftrace_list_end __read_mostly = {
75         .func           = ftrace_stub,
76         .flags          = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB,
77 };
78
79 /* ftrace_enabled is a method to turn ftrace on or off */
80 int ftrace_enabled __read_mostly;
81 static int last_ftrace_enabled;
82
83 /* Quick disabling of function tracer. */
84 int function_trace_stop __read_mostly;
85
86 /* Current function tracing op */
87 struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end;
88
89 /* List for set_ftrace_pid's pids. */
90 LIST_HEAD(ftrace_pids);
91 struct ftrace_pid {
92         struct list_head list;
93         struct pid *pid;
94 };
95
96 /*
97  * ftrace_disabled is set when an anomaly is discovered.
98  * ftrace_disabled is much stronger than ftrace_enabled.
99  */
100 static int ftrace_disabled __read_mostly;
101
102 static DEFINE_MUTEX(ftrace_lock);
103
104 static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end;
105 static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end;
106 static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
107 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
108 ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
109 static struct ftrace_ops global_ops;
110 static struct ftrace_ops control_ops;
111
112 #if ARCH_SUPPORTS_FTRACE_OPS
113 static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
114                                  struct ftrace_ops *op, struct pt_regs *regs);
115 #else
116 /* See comment below, where ftrace_ops_list_func is defined */
117 static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip);
118 #define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops)
119 #endif
120
121 /*
122  * Traverse the ftrace_global_list, invoking all entries.  The reason that we
123  * can use rcu_dereference_raw_notrace() is that elements removed from this list
124  * are simply leaked, so there is no need to interact with a grace-period
125  * mechanism.  The rcu_dereference_raw_notrace() calls are needed to handle
126  * concurrent insertions into the ftrace_global_list.
127  *
128  * Silly Alpha and silly pointer-speculation compiler optimizations!
129  */
130 #define do_for_each_ftrace_op(op, list)                 \
131         op = rcu_dereference_raw_notrace(list);                 \
132         do
133
134 /*
135  * Optimized for just a single item in the list (as that is the normal case).
136  */
137 #define while_for_each_ftrace_op(op)                            \
138         while (likely(op = rcu_dereference_raw_notrace((op)->next)) &&  \
139                unlikely((op) != &ftrace_list_end))
140
141 static inline void ftrace_ops_init(struct ftrace_ops *ops)
142 {
143 #ifdef CONFIG_DYNAMIC_FTRACE
144         if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) {
145                 mutex_init(&ops->regex_lock);
146                 ops->flags |= FTRACE_OPS_FL_INITIALIZED;
147         }
148 #endif
149 }
150
151 /**
152  * ftrace_nr_registered_ops - return number of ops registered
153  *
154  * Returns the number of ftrace_ops registered and tracing functions
155  */
156 int ftrace_nr_registered_ops(void)
157 {
158         struct ftrace_ops *ops;
159         int cnt = 0;
160
161         mutex_lock(&ftrace_lock);
162
163         for (ops = ftrace_ops_list;
164              ops != &ftrace_list_end; ops = ops->next)
165                 cnt++;
166
167         mutex_unlock(&ftrace_lock);
168
169         return cnt;
170 }
171
172 static void
173 ftrace_global_list_func(unsigned long ip, unsigned long parent_ip,
174                         struct ftrace_ops *op, struct pt_regs *regs)
175 {
176         int bit;
177
178         bit = trace_test_and_set_recursion(TRACE_GLOBAL_START, TRACE_GLOBAL_MAX);
179         if (bit < 0)
180                 return;
181
182         do_for_each_ftrace_op(op, ftrace_global_list) {
183                 op->func(ip, parent_ip, op, regs);
184         } while_for_each_ftrace_op(op);
185
186         trace_clear_recursion(bit);
187 }
188
189 static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
190                             struct ftrace_ops *op, struct pt_regs *regs)
191 {
192         if (!test_tsk_trace_trace(current))
193                 return;
194
195         ftrace_pid_function(ip, parent_ip, op, regs);
196 }
197
198 static void set_ftrace_pid_function(ftrace_func_t func)
199 {
200         /* do not set ftrace_pid_function to itself! */
201         if (func != ftrace_pid_func)
202                 ftrace_pid_function = func;
203 }
204
205 /**
206  * clear_ftrace_function - reset the ftrace function
207  *
208  * This NULLs the ftrace function and in essence stops
209  * tracing.  There may be lag
210  */
211 void clear_ftrace_function(void)
212 {
213         ftrace_trace_function = ftrace_stub;
214         ftrace_pid_function = ftrace_stub;
215 }
216
217 static void control_ops_disable_all(struct ftrace_ops *ops)
218 {
219         int cpu;
220
221         for_each_possible_cpu(cpu)
222                 *per_cpu_ptr(ops->disabled, cpu) = 1;
223 }
224
225 static int control_ops_alloc(struct ftrace_ops *ops)
226 {
227         int __percpu *disabled;
228
229         disabled = alloc_percpu(int);
230         if (!disabled)
231                 return -ENOMEM;
232
233         ops->disabled = disabled;
234         control_ops_disable_all(ops);
235         return 0;
236 }
237
238 static void control_ops_free(struct ftrace_ops *ops)
239 {
240         free_percpu(ops->disabled);
241 }
242
243 static void update_global_ops(void)
244 {
245         ftrace_func_t func;
246
247         /*
248          * If there's only one function registered, then call that
249          * function directly. Otherwise, we need to iterate over the
250          * registered callers.
251          */
252         if (ftrace_global_list == &ftrace_list_end ||
253             ftrace_global_list->next == &ftrace_list_end) {
254                 func = ftrace_global_list->func;
255                 /*
256                  * As we are calling the function directly.
257                  * If it does not have recursion protection,
258                  * the function_trace_op needs to be updated
259                  * accordingly.
260                  */
261                 if (ftrace_global_list->flags & FTRACE_OPS_FL_RECURSION_SAFE)
262                         global_ops.flags |= FTRACE_OPS_FL_RECURSION_SAFE;
263                 else
264                         global_ops.flags &= ~FTRACE_OPS_FL_RECURSION_SAFE;
265         } else {
266                 func = ftrace_global_list_func;
267                 /* The list has its own recursion protection. */
268                 global_ops.flags |= FTRACE_OPS_FL_RECURSION_SAFE;
269         }
270
271
272         /* If we filter on pids, update to use the pid function */
273         if (!list_empty(&ftrace_pids)) {
274                 set_ftrace_pid_function(func);
275                 func = ftrace_pid_func;
276         }
277
278         global_ops.func = func;
279 }
280
281 static void update_ftrace_function(void)
282 {
283         ftrace_func_t func;
284
285         update_global_ops();
286
287         /*
288          * If we are at the end of the list and this ops is
289          * recursion safe and not dynamic and the arch supports passing ops,
290          * then have the mcount trampoline call the function directly.
291          */
292         if (ftrace_ops_list == &ftrace_list_end ||
293             (ftrace_ops_list->next == &ftrace_list_end &&
294              !(ftrace_ops_list->flags & FTRACE_OPS_FL_DYNAMIC) &&
295              (ftrace_ops_list->flags & FTRACE_OPS_FL_RECURSION_SAFE) &&
296              !FTRACE_FORCE_LIST_FUNC)) {
297                 /* Set the ftrace_ops that the arch callback uses */
298                 if (ftrace_ops_list == &global_ops)
299                         function_trace_op = ftrace_global_list;
300                 else
301                         function_trace_op = ftrace_ops_list;
302                 func = ftrace_ops_list->func;
303         } else {
304                 /* Just use the default ftrace_ops */
305                 function_trace_op = &ftrace_list_end;
306                 func = ftrace_ops_list_func;
307         }
308
309         ftrace_trace_function = func;
310 }
311
312 static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
313 {
314         ops->next = *list;
315         /*
316          * We are entering ops into the list but another
317          * CPU might be walking that list. We need to make sure
318          * the ops->next pointer is valid before another CPU sees
319          * the ops pointer included into the list.
320          */
321         rcu_assign_pointer(*list, ops);
322 }
323
324 static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
325 {
326         struct ftrace_ops **p;
327
328         /*
329          * If we are removing the last function, then simply point
330          * to the ftrace_stub.
331          */
332         if (*list == ops && ops->next == &ftrace_list_end) {
333                 *list = &ftrace_list_end;
334                 return 0;
335         }
336
337         for (p = list; *p != &ftrace_list_end; p = &(*p)->next)
338                 if (*p == ops)
339                         break;
340
341         if (*p != ops)
342                 return -1;
343
344         *p = (*p)->next;
345         return 0;
346 }
347
348 static void add_ftrace_list_ops(struct ftrace_ops **list,
349                                 struct ftrace_ops *main_ops,
350                                 struct ftrace_ops *ops)
351 {
352         int first = *list == &ftrace_list_end;
353         add_ftrace_ops(list, ops);
354         if (first)
355                 add_ftrace_ops(&ftrace_ops_list, main_ops);
356 }
357
358 static int remove_ftrace_list_ops(struct ftrace_ops **list,
359                                   struct ftrace_ops *main_ops,
360                                   struct ftrace_ops *ops)
361 {
362         int ret = remove_ftrace_ops(list, ops);
363         if (!ret && *list == &ftrace_list_end)
364                 ret = remove_ftrace_ops(&ftrace_ops_list, main_ops);
365         return ret;
366 }
367
368 static int __register_ftrace_function(struct ftrace_ops *ops)
369 {
370         if (unlikely(ftrace_disabled))
371                 return -ENODEV;
372
373         if (FTRACE_WARN_ON(ops == &global_ops))
374                 return -EINVAL;
375
376         if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
377                 return -EBUSY;
378
379         /* We don't support both control and global flags set. */
380         if ((ops->flags & FL_GLOBAL_CONTROL_MASK) == FL_GLOBAL_CONTROL_MASK)
381                 return -EINVAL;
382
383 #ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS
384         /*
385          * If the ftrace_ops specifies SAVE_REGS, then it only can be used
386          * if the arch supports it, or SAVE_REGS_IF_SUPPORTED is also set.
387          * Setting SAVE_REGS_IF_SUPPORTED makes SAVE_REGS irrelevant.
388          */
389         if (ops->flags & FTRACE_OPS_FL_SAVE_REGS &&
390             !(ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED))
391                 return -EINVAL;
392
393         if (ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED)
394                 ops->flags |= FTRACE_OPS_FL_SAVE_REGS;
395 #endif
396
397         if (!core_kernel_data((unsigned long)ops))
398                 ops->flags |= FTRACE_OPS_FL_DYNAMIC;
399
400         if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
401                 add_ftrace_list_ops(&ftrace_global_list, &global_ops, ops);
402                 ops->flags |= FTRACE_OPS_FL_ENABLED;
403         } else if (ops->flags & FTRACE_OPS_FL_CONTROL) {
404                 if (control_ops_alloc(ops))
405                         return -ENOMEM;
406                 add_ftrace_list_ops(&ftrace_control_list, &control_ops, ops);
407         } else
408                 add_ftrace_ops(&ftrace_ops_list, ops);
409
410         if (ftrace_enabled)
411                 update_ftrace_function();
412
413         return 0;
414 }
415
416 static void ftrace_sync(struct work_struct *work)
417 {
418         /*
419          * This function is just a stub to implement a hard force
420          * of synchronize_sched(). This requires synchronizing
421          * tasks even in userspace and idle.
422          *
423          * Yes, function tracing is rude.
424          */
425 }
426
427 static int __unregister_ftrace_function(struct ftrace_ops *ops)
428 {
429         int ret;
430
431         if (ftrace_disabled)
432                 return -ENODEV;
433
434         if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
435                 return -EBUSY;
436
437         if (FTRACE_WARN_ON(ops == &global_ops))
438                 return -EINVAL;
439
440         if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
441                 ret = remove_ftrace_list_ops(&ftrace_global_list,
442                                              &global_ops, ops);
443                 if (!ret)
444                         ops->flags &= ~FTRACE_OPS_FL_ENABLED;
445         } else if (ops->flags & FTRACE_OPS_FL_CONTROL) {
446                 ret = remove_ftrace_list_ops(&ftrace_control_list,
447                                              &control_ops, ops);
448                 if (!ret) {
449                         /*
450                          * The ftrace_ops is now removed from the list,
451                          * so there'll be no new users. We must ensure
452                          * all current users are done before we free
453                          * the control data.
454                          * Note synchronize_sched() is not enough, as we
455                          * use preempt_disable() to do RCU, but the function
456                          * tracer can be called where RCU is not active
457                          * (before user_exit()).
458                          */
459                         schedule_on_each_cpu(ftrace_sync);
460                         control_ops_free(ops);
461                 }
462         } else
463                 ret = remove_ftrace_ops(&ftrace_ops_list, ops);
464
465         if (ret < 0)
466                 return ret;
467
468         if (ftrace_enabled)
469                 update_ftrace_function();
470
471         /*
472          * Dynamic ops may be freed, we must make sure that all
473          * callers are done before leaving this function.
474          *
475          * Again, normal synchronize_sched() is not good enough.
476          * We need to do a hard force of sched synchronization.
477          */
478         if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
479                 schedule_on_each_cpu(ftrace_sync);
480
481
482         return 0;
483 }
484
485 static void ftrace_update_pid_func(void)
486 {
487         /* Only do something if we are tracing something */
488         if (ftrace_trace_function == ftrace_stub)
489                 return;
490
491         update_ftrace_function();
492 }
493
494 #ifdef CONFIG_FUNCTION_PROFILER
495 struct ftrace_profile {
496         struct hlist_node               node;
497         unsigned long                   ip;
498         unsigned long                   counter;
499 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
500         unsigned long long              time;
501         unsigned long long              time_squared;
502 #endif
503 };
504
505 struct ftrace_profile_page {
506         struct ftrace_profile_page      *next;
507         unsigned long                   index;
508         struct ftrace_profile           records[];
509 };
510
511 struct ftrace_profile_stat {
512         atomic_t                        disabled;
513         struct hlist_head               *hash;
514         struct ftrace_profile_page      *pages;
515         struct ftrace_profile_page      *start;
516         struct tracer_stat              stat;
517 };
518
519 #define PROFILE_RECORDS_SIZE                                            \
520         (PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
521
522 #define PROFILES_PER_PAGE                                       \
523         (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
524
525 static int ftrace_profile_enabled __read_mostly;
526
527 /* ftrace_profile_lock - synchronize the enable and disable of the profiler */
528 static DEFINE_MUTEX(ftrace_profile_lock);
529
530 static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
531
532 #define FTRACE_PROFILE_HASH_BITS 10
533 #define FTRACE_PROFILE_HASH_SIZE (1 << FTRACE_PROFILE_HASH_BITS)
534
535 static void *
536 function_stat_next(void *v, int idx)
537 {
538         struct ftrace_profile *rec = v;
539         struct ftrace_profile_page *pg;
540
541         pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
542
543  again:
544         if (idx != 0)
545                 rec++;
546
547         if ((void *)rec >= (void *)&pg->records[pg->index]) {
548                 pg = pg->next;
549                 if (!pg)
550                         return NULL;
551                 rec = &pg->records[0];
552                 if (!rec->counter)
553                         goto again;
554         }
555
556         return rec;
557 }
558
559 static void *function_stat_start(struct tracer_stat *trace)
560 {
561         struct ftrace_profile_stat *stat =
562                 container_of(trace, struct ftrace_profile_stat, stat);
563
564         if (!stat || !stat->start)
565                 return NULL;
566
567         return function_stat_next(&stat->start->records[0], 0);
568 }
569
570 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
571 /* function graph compares on total time */
572 static int function_stat_cmp(void *p1, void *p2)
573 {
574         struct ftrace_profile *a = p1;
575         struct ftrace_profile *b = p2;
576
577         if (a->time < b->time)
578                 return -1;
579         if (a->time > b->time)
580                 return 1;
581         else
582                 return 0;
583 }
584 #else
585 /* not function graph compares against hits */
586 static int function_stat_cmp(void *p1, void *p2)
587 {
588         struct ftrace_profile *a = p1;
589         struct ftrace_profile *b = p2;
590
591         if (a->counter < b->counter)
592                 return -1;
593         if (a->counter > b->counter)
594                 return 1;
595         else
596                 return 0;
597 }
598 #endif
599
600 static int function_stat_headers(struct seq_file *m)
601 {
602 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
603         seq_printf(m, "  Function                               "
604                    "Hit    Time            Avg             s^2\n"
605                       "  --------                               "
606                    "---    ----            ---             ---\n");
607 #else
608         seq_printf(m, "  Function                               Hit\n"
609                       "  --------                               ---\n");
610 #endif
611         return 0;
612 }
613
614 static int function_stat_show(struct seq_file *m, void *v)
615 {
616         struct ftrace_profile *rec = v;
617         char str[KSYM_SYMBOL_LEN];
618         int ret = 0;
619 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
620         static struct trace_seq s;
621         unsigned long long avg;
622         unsigned long long stddev;
623 #endif
624         mutex_lock(&ftrace_profile_lock);
625
626         /* we raced with function_profile_reset() */
627         if (unlikely(rec->counter == 0)) {
628                 ret = -EBUSY;
629                 goto out;
630         }
631
632         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
633         seq_printf(m, "  %-30.30s  %10lu", str, rec->counter);
634
635 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
636         seq_printf(m, "    ");
637         avg = rec->time;
638         do_div(avg, rec->counter);
639
640         /* Sample standard deviation (s^2) */
641         if (rec->counter <= 1)
642                 stddev = 0;
643         else {
644                 /*
645                  * Apply Welford's method:
646                  * s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2)
647                  */
648                 stddev = rec->counter * rec->time_squared -
649                          rec->time * rec->time;
650
651                 /*
652                  * Divide only 1000 for ns^2 -> us^2 conversion.
653                  * trace_print_graph_duration will divide 1000 again.
654                  */
655                 do_div(stddev, rec->counter * (rec->counter - 1) * 1000);
656         }
657
658         trace_seq_init(&s);
659         trace_print_graph_duration(rec->time, &s);
660         trace_seq_puts(&s, "    ");
661         trace_print_graph_duration(avg, &s);
662         trace_seq_puts(&s, "    ");
663         trace_print_graph_duration(stddev, &s);
664         trace_print_seq(m, &s);
665 #endif
666         seq_putc(m, '\n');
667 out:
668         mutex_unlock(&ftrace_profile_lock);
669
670         return ret;
671 }
672
673 static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
674 {
675         struct ftrace_profile_page *pg;
676
677         pg = stat->pages = stat->start;
678
679         while (pg) {
680                 memset(pg->records, 0, PROFILE_RECORDS_SIZE);
681                 pg->index = 0;
682                 pg = pg->next;
683         }
684
685         memset(stat->hash, 0,
686                FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
687 }
688
689 int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
690 {
691         struct ftrace_profile_page *pg;
692         int functions;
693         int pages;
694         int i;
695
696         /* If we already allocated, do nothing */
697         if (stat->pages)
698                 return 0;
699
700         stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
701         if (!stat->pages)
702                 return -ENOMEM;
703
704 #ifdef CONFIG_DYNAMIC_FTRACE
705         functions = ftrace_update_tot_cnt;
706 #else
707         /*
708          * We do not know the number of functions that exist because
709          * dynamic tracing is what counts them. With past experience
710          * we have around 20K functions. That should be more than enough.
711          * It is highly unlikely we will execute every function in
712          * the kernel.
713          */
714         functions = 20000;
715 #endif
716
717         pg = stat->start = stat->pages;
718
719         pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
720
721         for (i = 1; i < pages; i++) {
722                 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
723                 if (!pg->next)
724                         goto out_free;
725                 pg = pg->next;
726         }
727
728         return 0;
729
730  out_free:
731         pg = stat->start;
732         while (pg) {
733                 unsigned long tmp = (unsigned long)pg;
734
735                 pg = pg->next;
736                 free_page(tmp);
737         }
738
739         stat->pages = NULL;
740         stat->start = NULL;
741
742         return -ENOMEM;
743 }
744
745 static int ftrace_profile_init_cpu(int cpu)
746 {
747         struct ftrace_profile_stat *stat;
748         int size;
749
750         stat = &per_cpu(ftrace_profile_stats, cpu);
751
752         if (stat->hash) {
753                 /* If the profile is already created, simply reset it */
754                 ftrace_profile_reset(stat);
755                 return 0;
756         }
757
758         /*
759          * We are profiling all functions, but usually only a few thousand
760          * functions are hit. We'll make a hash of 1024 items.
761          */
762         size = FTRACE_PROFILE_HASH_SIZE;
763
764         stat->hash = kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL);
765
766         if (!stat->hash)
767                 return -ENOMEM;
768
769         /* Preallocate the function profiling pages */
770         if (ftrace_profile_pages_init(stat) < 0) {
771                 kfree(stat->hash);
772                 stat->hash = NULL;
773                 return -ENOMEM;
774         }
775
776         return 0;
777 }
778
779 static int ftrace_profile_init(void)
780 {
781         int cpu;
782         int ret = 0;
783
784         for_each_online_cpu(cpu) {
785                 ret = ftrace_profile_init_cpu(cpu);
786                 if (ret)
787                         break;
788         }
789
790         return ret;
791 }
792
793 /* interrupts must be disabled */
794 static struct ftrace_profile *
795 ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
796 {
797         struct ftrace_profile *rec;
798         struct hlist_head *hhd;
799         unsigned long key;
800
801         key = hash_long(ip, FTRACE_PROFILE_HASH_BITS);
802         hhd = &stat->hash[key];
803
804         if (hlist_empty(hhd))
805                 return NULL;
806
807         hlist_for_each_entry_rcu_notrace(rec, hhd, node) {
808                 if (rec->ip == ip)
809                         return rec;
810         }
811
812         return NULL;
813 }
814
815 static void ftrace_add_profile(struct ftrace_profile_stat *stat,
816                                struct ftrace_profile *rec)
817 {
818         unsigned long key;
819
820         key = hash_long(rec->ip, FTRACE_PROFILE_HASH_BITS);
821         hlist_add_head_rcu(&rec->node, &stat->hash[key]);
822 }
823
824 /*
825  * The memory is already allocated, this simply finds a new record to use.
826  */
827 static struct ftrace_profile *
828 ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
829 {
830         struct ftrace_profile *rec = NULL;
831
832         /* prevent recursion (from NMIs) */
833         if (atomic_inc_return(&stat->disabled) != 1)
834                 goto out;
835
836         /*
837          * Try to find the function again since an NMI
838          * could have added it
839          */
840         rec = ftrace_find_profiled_func(stat, ip);
841         if (rec)
842                 goto out;
843
844         if (stat->pages->index == PROFILES_PER_PAGE) {
845                 if (!stat->pages->next)
846                         goto out;
847                 stat->pages = stat->pages->next;
848         }
849
850         rec = &stat->pages->records[stat->pages->index++];
851         rec->ip = ip;
852         ftrace_add_profile(stat, rec);
853
854  out:
855         atomic_dec(&stat->disabled);
856
857         return rec;
858 }
859
860 static void
861 function_profile_call(unsigned long ip, unsigned long parent_ip,
862                       struct ftrace_ops *ops, struct pt_regs *regs)
863 {
864         struct ftrace_profile_stat *stat;
865         struct ftrace_profile *rec;
866         unsigned long flags;
867
868         if (!ftrace_profile_enabled)
869                 return;
870
871         local_irq_save(flags);
872
873         stat = &__get_cpu_var(ftrace_profile_stats);
874         if (!stat->hash || !ftrace_profile_enabled)
875                 goto out;
876
877         rec = ftrace_find_profiled_func(stat, ip);
878         if (!rec) {
879                 rec = ftrace_profile_alloc(stat, ip);
880                 if (!rec)
881                         goto out;
882         }
883
884         rec->counter++;
885  out:
886         local_irq_restore(flags);
887 }
888
889 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
890 static int profile_graph_entry(struct ftrace_graph_ent *trace)
891 {
892         function_profile_call(trace->func, 0, NULL, NULL);
893         return 1;
894 }
895
896 static void profile_graph_return(struct ftrace_graph_ret *trace)
897 {
898         struct ftrace_profile_stat *stat;
899         unsigned long long calltime;
900         struct ftrace_profile *rec;
901         unsigned long flags;
902
903         local_irq_save(flags);
904         stat = &__get_cpu_var(ftrace_profile_stats);
905         if (!stat->hash || !ftrace_profile_enabled)
906                 goto out;
907
908         /* If the calltime was zero'd ignore it */
909         if (!trace->calltime)
910                 goto out;
911
912         calltime = trace->rettime - trace->calltime;
913
914         if (!(trace_flags & TRACE_ITER_GRAPH_TIME)) {
915                 int index;
916
917                 index = trace->depth;
918
919                 /* Append this call time to the parent time to subtract */
920                 if (index)
921                         current->ret_stack[index - 1].subtime += calltime;
922
923                 if (current->ret_stack[index].subtime < calltime)
924                         calltime -= current->ret_stack[index].subtime;
925                 else
926                         calltime = 0;
927         }
928
929         rec = ftrace_find_profiled_func(stat, trace->func);
930         if (rec) {
931                 rec->time += calltime;
932                 rec->time_squared += calltime * calltime;
933         }
934
935  out:
936         local_irq_restore(flags);
937 }
938
939 static int register_ftrace_profiler(void)
940 {
941         return register_ftrace_graph(&profile_graph_return,
942                                      &profile_graph_entry);
943 }
944
945 static void unregister_ftrace_profiler(void)
946 {
947         unregister_ftrace_graph();
948 }
949 #else
950 static struct ftrace_ops ftrace_profile_ops __read_mostly = {
951         .func           = function_profile_call,
952         .flags          = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
953         INIT_REGEX_LOCK(ftrace_profile_ops)
954 };
955
956 static int register_ftrace_profiler(void)
957 {
958         return register_ftrace_function(&ftrace_profile_ops);
959 }
960
961 static void unregister_ftrace_profiler(void)
962 {
963         unregister_ftrace_function(&ftrace_profile_ops);
964 }
965 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
966
967 static ssize_t
968 ftrace_profile_write(struct file *filp, const char __user *ubuf,
969                      size_t cnt, loff_t *ppos)
970 {
971         unsigned long val;
972         int ret;
973
974         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
975         if (ret)
976                 return ret;
977
978         val = !!val;
979
980         mutex_lock(&ftrace_profile_lock);
981         if (ftrace_profile_enabled ^ val) {
982                 if (val) {
983                         ret = ftrace_profile_init();
984                         if (ret < 0) {
985                                 cnt = ret;
986                                 goto out;
987                         }
988
989                         ret = register_ftrace_profiler();
990                         if (ret < 0) {
991                                 cnt = ret;
992                                 goto out;
993                         }
994                         ftrace_profile_enabled = 1;
995                 } else {
996                         ftrace_profile_enabled = 0;
997                         /*
998                          * unregister_ftrace_profiler calls stop_machine
999                          * so this acts like an synchronize_sched.
1000                          */
1001                         unregister_ftrace_profiler();
1002                 }
1003         }
1004  out:
1005         mutex_unlock(&ftrace_profile_lock);
1006
1007         *ppos += cnt;
1008
1009         return cnt;
1010 }
1011
1012 static ssize_t
1013 ftrace_profile_read(struct file *filp, char __user *ubuf,
1014                      size_t cnt, loff_t *ppos)
1015 {
1016         char buf[64];           /* big enough to hold a number */
1017         int r;
1018
1019         r = sprintf(buf, "%u\n", ftrace_profile_enabled);
1020         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
1021 }
1022
1023 static const struct file_operations ftrace_profile_fops = {
1024         .open           = tracing_open_generic,
1025         .read           = ftrace_profile_read,
1026         .write          = ftrace_profile_write,
1027         .llseek         = default_llseek,
1028 };
1029
1030 /* used to initialize the real stat files */
1031 static struct tracer_stat function_stats __initdata = {
1032         .name           = "functions",
1033         .stat_start     = function_stat_start,
1034         .stat_next      = function_stat_next,
1035         .stat_cmp       = function_stat_cmp,
1036         .stat_headers   = function_stat_headers,
1037         .stat_show      = function_stat_show
1038 };
1039
1040 static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
1041 {
1042         struct ftrace_profile_stat *stat;
1043         struct dentry *entry;
1044         char *name;
1045         int ret;
1046         int cpu;
1047
1048         for_each_possible_cpu(cpu) {
1049                 stat = &per_cpu(ftrace_profile_stats, cpu);
1050
1051                 /* allocate enough for function name + cpu number */
1052                 name = kmalloc(32, GFP_KERNEL);
1053                 if (!name) {
1054                         /*
1055                          * The files created are permanent, if something happens
1056                          * we still do not free memory.
1057                          */
1058                         WARN(1,
1059                              "Could not allocate stat file for cpu %d\n",
1060                              cpu);
1061                         return;
1062                 }
1063                 stat->stat = function_stats;
1064                 snprintf(name, 32, "function%d", cpu);
1065                 stat->stat.name = name;
1066                 ret = register_stat_tracer(&stat->stat);
1067                 if (ret) {
1068                         WARN(1,
1069                              "Could not register function stat for cpu %d\n",
1070                              cpu);
1071                         kfree(name);
1072                         return;
1073                 }
1074         }
1075
1076         entry = debugfs_create_file("function_profile_enabled", 0644,
1077                                     d_tracer, NULL, &ftrace_profile_fops);
1078         if (!entry)
1079                 pr_warning("Could not create debugfs "
1080                            "'function_profile_enabled' entry\n");
1081 }
1082
1083 #else /* CONFIG_FUNCTION_PROFILER */
1084 static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
1085 {
1086 }
1087 #endif /* CONFIG_FUNCTION_PROFILER */
1088
1089 static struct pid * const ftrace_swapper_pid = &init_struct_pid;
1090
1091 loff_t
1092 ftrace_filter_lseek(struct file *file, loff_t offset, int whence)
1093 {
1094         loff_t ret;
1095
1096         if (file->f_mode & FMODE_READ)
1097                 ret = seq_lseek(file, offset, whence);
1098         else
1099                 file->f_pos = ret = 1;
1100
1101         return ret;
1102 }
1103
1104 #ifdef CONFIG_DYNAMIC_FTRACE
1105
1106 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
1107 # error Dynamic ftrace depends on MCOUNT_RECORD
1108 #endif
1109
1110 static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly;
1111
1112 struct ftrace_func_probe {
1113         struct hlist_node       node;
1114         struct ftrace_probe_ops *ops;
1115         unsigned long           flags;
1116         unsigned long           ip;
1117         void                    *data;
1118         struct list_head        free_list;
1119 };
1120
1121 struct ftrace_func_entry {
1122         struct hlist_node hlist;
1123         unsigned long ip;
1124 };
1125
1126 struct ftrace_hash {
1127         unsigned long           size_bits;
1128         struct hlist_head       *buckets;
1129         unsigned long           count;
1130         struct rcu_head         rcu;
1131 };
1132
1133 /*
1134  * We make these constant because no one should touch them,
1135  * but they are used as the default "empty hash", to avoid allocating
1136  * it all the time. These are in a read only section such that if
1137  * anyone does try to modify it, it will cause an exception.
1138  */
1139 static const struct hlist_head empty_buckets[1];
1140 static const struct ftrace_hash empty_hash = {
1141         .buckets = (struct hlist_head *)empty_buckets,
1142 };
1143 #define EMPTY_HASH      ((struct ftrace_hash *)&empty_hash)
1144
1145 static struct ftrace_ops global_ops = {
1146         .func                   = ftrace_stub,
1147         .notrace_hash           = EMPTY_HASH,
1148         .filter_hash            = EMPTY_HASH,
1149         .flags                  = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
1150         INIT_REGEX_LOCK(global_ops)
1151 };
1152
1153 struct ftrace_page {
1154         struct ftrace_page      *next;
1155         struct dyn_ftrace       *records;
1156         int                     index;
1157         int                     size;
1158 };
1159
1160 static struct ftrace_page *ftrace_new_pgs;
1161
1162 #define ENTRY_SIZE sizeof(struct dyn_ftrace)
1163 #define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE)
1164
1165 /* estimate from running different kernels */
1166 #define NR_TO_INIT              10000
1167
1168 static struct ftrace_page       *ftrace_pages_start;
1169 static struct ftrace_page       *ftrace_pages;
1170
1171 static bool ftrace_hash_empty(struct ftrace_hash *hash)
1172 {
1173         return !hash || !hash->count;
1174 }
1175
1176 static struct ftrace_func_entry *
1177 ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1178 {
1179         unsigned long key;
1180         struct ftrace_func_entry *entry;
1181         struct hlist_head *hhd;
1182
1183         if (ftrace_hash_empty(hash))
1184                 return NULL;
1185
1186         if (hash->size_bits > 0)
1187                 key = hash_long(ip, hash->size_bits);
1188         else
1189                 key = 0;
1190
1191         hhd = &hash->buckets[key];
1192
1193         hlist_for_each_entry_rcu_notrace(entry, hhd, hlist) {
1194                 if (entry->ip == ip)
1195                         return entry;
1196         }
1197         return NULL;
1198 }
1199
1200 static void __add_hash_entry(struct ftrace_hash *hash,
1201                              struct ftrace_func_entry *entry)
1202 {
1203         struct hlist_head *hhd;
1204         unsigned long key;
1205
1206         if (hash->size_bits)
1207                 key = hash_long(entry->ip, hash->size_bits);
1208         else
1209                 key = 0;
1210
1211         hhd = &hash->buckets[key];
1212         hlist_add_head(&entry->hlist, hhd);
1213         hash->count++;
1214 }
1215
1216 static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip)
1217 {
1218         struct ftrace_func_entry *entry;
1219
1220         entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1221         if (!entry)
1222                 return -ENOMEM;
1223
1224         entry->ip = ip;
1225         __add_hash_entry(hash, entry);
1226
1227         return 0;
1228 }
1229
1230 static void
1231 free_hash_entry(struct ftrace_hash *hash,
1232                   struct ftrace_func_entry *entry)
1233 {
1234         hlist_del(&entry->hlist);
1235         kfree(entry);
1236         hash->count--;
1237 }
1238
1239 static void
1240 remove_hash_entry(struct ftrace_hash *hash,
1241                   struct ftrace_func_entry *entry)
1242 {
1243         hlist_del(&entry->hlist);
1244         hash->count--;
1245 }
1246
1247 static void ftrace_hash_clear(struct ftrace_hash *hash)
1248 {
1249         struct hlist_head *hhd;
1250         struct hlist_node *tn;
1251         struct ftrace_func_entry *entry;
1252         int size = 1 << hash->size_bits;
1253         int i;
1254
1255         if (!hash->count)
1256                 return;
1257
1258         for (i = 0; i < size; i++) {
1259                 hhd = &hash->buckets[i];
1260                 hlist_for_each_entry_safe(entry, tn, hhd, hlist)
1261                         free_hash_entry(hash, entry);
1262         }
1263         FTRACE_WARN_ON(hash->count);
1264 }
1265
1266 static void free_ftrace_hash(struct ftrace_hash *hash)
1267 {
1268         if (!hash || hash == EMPTY_HASH)
1269                 return;
1270         ftrace_hash_clear(hash);
1271         kfree(hash->buckets);
1272         kfree(hash);
1273 }
1274
1275 static void __free_ftrace_hash_rcu(struct rcu_head *rcu)
1276 {
1277         struct ftrace_hash *hash;
1278
1279         hash = container_of(rcu, struct ftrace_hash, rcu);
1280         free_ftrace_hash(hash);
1281 }
1282
1283 static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
1284 {
1285         if (!hash || hash == EMPTY_HASH)
1286                 return;
1287         call_rcu_sched(&hash->rcu, __free_ftrace_hash_rcu);
1288 }
1289
1290 void ftrace_free_filter(struct ftrace_ops *ops)
1291 {
1292         ftrace_ops_init(ops);
1293         free_ftrace_hash(ops->filter_hash);
1294         free_ftrace_hash(ops->notrace_hash);
1295 }
1296
1297 static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
1298 {
1299         struct ftrace_hash *hash;
1300         int size;
1301
1302         hash = kzalloc(sizeof(*hash), GFP_KERNEL);
1303         if (!hash)
1304                 return NULL;
1305
1306         size = 1 << size_bits;
1307         hash->buckets = kcalloc(size, sizeof(*hash->buckets), GFP_KERNEL);
1308
1309         if (!hash->buckets) {
1310                 kfree(hash);
1311                 return NULL;
1312         }
1313
1314         hash->size_bits = size_bits;
1315
1316         return hash;
1317 }
1318
1319 static struct ftrace_hash *
1320 alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
1321 {
1322         struct ftrace_func_entry *entry;
1323         struct ftrace_hash *new_hash;
1324         int size;
1325         int ret;
1326         int i;
1327
1328         new_hash = alloc_ftrace_hash(size_bits);
1329         if (!new_hash)
1330                 return NULL;
1331
1332         /* Empty hash? */
1333         if (ftrace_hash_empty(hash))
1334                 return new_hash;
1335
1336         size = 1 << hash->size_bits;
1337         for (i = 0; i < size; i++) {
1338                 hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
1339                         ret = add_hash_entry(new_hash, entry->ip);
1340                         if (ret < 0)
1341                                 goto free_hash;
1342                 }
1343         }
1344
1345         FTRACE_WARN_ON(new_hash->count != hash->count);
1346
1347         return new_hash;
1348
1349  free_hash:
1350         free_ftrace_hash(new_hash);
1351         return NULL;
1352 }
1353
1354 static void
1355 ftrace_hash_rec_disable(struct ftrace_ops *ops, int filter_hash);
1356 static void
1357 ftrace_hash_rec_enable(struct ftrace_ops *ops, int filter_hash);
1358
1359 static int
1360 ftrace_hash_move(struct ftrace_ops *ops, int enable,
1361                  struct ftrace_hash **dst, struct ftrace_hash *src)
1362 {
1363         struct ftrace_func_entry *entry;
1364         struct hlist_node *tn;
1365         struct hlist_head *hhd;
1366         struct ftrace_hash *old_hash;
1367         struct ftrace_hash *new_hash;
1368         int size = src->count;
1369         int bits = 0;
1370         int ret;
1371         int i;
1372
1373         /*
1374          * Remove the current set, update the hash and add
1375          * them back.
1376          */
1377         ftrace_hash_rec_disable(ops, enable);
1378
1379         /*
1380          * If the new source is empty, just free dst and assign it
1381          * the empty_hash.
1382          */
1383         if (!src->count) {
1384                 free_ftrace_hash_rcu(*dst);
1385                 rcu_assign_pointer(*dst, EMPTY_HASH);
1386                 /* still need to update the function records */
1387                 ret = 0;
1388                 goto out;
1389         }
1390
1391         /*
1392          * Make the hash size about 1/2 the # found
1393          */
1394         for (size /= 2; size; size >>= 1)
1395                 bits++;
1396
1397         /* Don't allocate too much */
1398         if (bits > FTRACE_HASH_MAX_BITS)
1399                 bits = FTRACE_HASH_MAX_BITS;
1400
1401         ret = -ENOMEM;
1402         new_hash = alloc_ftrace_hash(bits);
1403         if (!new_hash)
1404                 goto out;
1405
1406         size = 1 << src->size_bits;
1407         for (i = 0; i < size; i++) {
1408                 hhd = &src->buckets[i];
1409                 hlist_for_each_entry_safe(entry, tn, hhd, hlist) {
1410                         remove_hash_entry(src, entry);
1411                         __add_hash_entry(new_hash, entry);
1412                 }
1413         }
1414
1415         old_hash = *dst;
1416         rcu_assign_pointer(*dst, new_hash);
1417         free_ftrace_hash_rcu(old_hash);
1418
1419         ret = 0;
1420  out:
1421         /*
1422          * Enable regardless of ret:
1423          *  On success, we enable the new hash.
1424          *  On failure, we re-enable the original hash.
1425          */
1426         ftrace_hash_rec_enable(ops, enable);
1427
1428         return ret;
1429 }
1430
1431 /*
1432  * Test the hashes for this ops to see if we want to call
1433  * the ops->func or not.
1434  *
1435  * It's a match if the ip is in the ops->filter_hash or
1436  * the filter_hash does not exist or is empty,
1437  *  AND
1438  * the ip is not in the ops->notrace_hash.
1439  *
1440  * This needs to be called with preemption disabled as
1441  * the hashes are freed with call_rcu_sched().
1442  */
1443 static int
1444 ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
1445 {
1446         struct ftrace_hash *filter_hash;
1447         struct ftrace_hash *notrace_hash;
1448         int ret;
1449
1450 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
1451         /*
1452          * There's a small race when adding ops that the ftrace handler
1453          * that wants regs, may be called without them. We can not
1454          * allow that handler to be called if regs is NULL.
1455          */
1456         if (regs == NULL && (ops->flags & FTRACE_OPS_FL_SAVE_REGS))
1457                 return 0;
1458 #endif
1459
1460         filter_hash = rcu_dereference_raw_notrace(ops->filter_hash);
1461         notrace_hash = rcu_dereference_raw_notrace(ops->notrace_hash);
1462
1463         if ((ftrace_hash_empty(filter_hash) ||
1464              ftrace_lookup_ip(filter_hash, ip)) &&
1465             (ftrace_hash_empty(notrace_hash) ||
1466              !ftrace_lookup_ip(notrace_hash, ip)))
1467                 ret = 1;
1468         else
1469                 ret = 0;
1470
1471         return ret;
1472 }
1473
1474 /*
1475  * This is a double for. Do not use 'break' to break out of the loop,
1476  * you must use a goto.
1477  */
1478 #define do_for_each_ftrace_rec(pg, rec)                                 \
1479         for (pg = ftrace_pages_start; pg; pg = pg->next) {              \
1480                 int _____i;                                             \
1481                 for (_____i = 0; _____i < pg->index; _____i++) {        \
1482                         rec = &pg->records[_____i];
1483
1484 #define while_for_each_ftrace_rec()             \
1485                 }                               \
1486         }
1487
1488
1489 static int ftrace_cmp_recs(const void *a, const void *b)
1490 {
1491         const struct dyn_ftrace *key = a;
1492         const struct dyn_ftrace *rec = b;
1493
1494         if (key->flags < rec->ip)
1495                 return -1;
1496         if (key->ip >= rec->ip + MCOUNT_INSN_SIZE)
1497                 return 1;
1498         return 0;
1499 }
1500
1501 static unsigned long ftrace_location_range(unsigned long start, unsigned long end)
1502 {
1503         struct ftrace_page *pg;
1504         struct dyn_ftrace *rec;
1505         struct dyn_ftrace key;
1506
1507         key.ip = start;
1508         key.flags = end;        /* overload flags, as it is unsigned long */
1509
1510         for (pg = ftrace_pages_start; pg; pg = pg->next) {
1511                 if (end < pg->records[0].ip ||
1512                     start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
1513                         continue;
1514                 rec = bsearch(&key, pg->records, pg->index,
1515                               sizeof(struct dyn_ftrace),
1516                               ftrace_cmp_recs);
1517                 if (rec)
1518                         return rec->ip;
1519         }
1520
1521         return 0;
1522 }
1523
1524 /**
1525  * ftrace_location - return true if the ip giving is a traced location
1526  * @ip: the instruction pointer to check
1527  *
1528  * Returns rec->ip if @ip given is a pointer to a ftrace location.
1529  * That is, the instruction that is either a NOP or call to
1530  * the function tracer. It checks the ftrace internal tables to
1531  * determine if the address belongs or not.
1532  */
1533 unsigned long ftrace_location(unsigned long ip)
1534 {
1535         return ftrace_location_range(ip, ip);
1536 }
1537
1538 /**
1539  * ftrace_text_reserved - return true if range contains an ftrace location
1540  * @start: start of range to search
1541  * @end: end of range to search (inclusive). @end points to the last byte to check.
1542  *
1543  * Returns 1 if @start and @end contains a ftrace location.
1544  * That is, the instruction that is either a NOP or call to
1545  * the function tracer. It checks the ftrace internal tables to
1546  * determine if the address belongs or not.
1547  */
1548 int ftrace_text_reserved(void *start, void *end)
1549 {
1550         unsigned long ret;
1551
1552         ret = ftrace_location_range((unsigned long)start,
1553                                     (unsigned long)end);
1554
1555         return (int)!!ret;
1556 }
1557
1558 static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
1559                                      int filter_hash,
1560                                      bool inc)
1561 {
1562         struct ftrace_hash *hash;
1563         struct ftrace_hash *other_hash;
1564         struct ftrace_page *pg;
1565         struct dyn_ftrace *rec;
1566         int count = 0;
1567         int all = 0;
1568
1569         /* Only update if the ops has been registered */
1570         if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1571                 return;
1572
1573         /*
1574          * In the filter_hash case:
1575          *   If the count is zero, we update all records.
1576          *   Otherwise we just update the items in the hash.
1577          *
1578          * In the notrace_hash case:
1579          *   We enable the update in the hash.
1580          *   As disabling notrace means enabling the tracing,
1581          *   and enabling notrace means disabling, the inc variable
1582          *   gets inversed.
1583          */
1584         if (filter_hash) {
1585                 hash = ops->filter_hash;
1586                 other_hash = ops->notrace_hash;
1587                 if (ftrace_hash_empty(hash))
1588                         all = 1;
1589         } else {
1590                 inc = !inc;
1591                 hash = ops->notrace_hash;
1592                 other_hash = ops->filter_hash;
1593                 /*
1594                  * If the notrace hash has no items,
1595                  * then there's nothing to do.
1596                  */
1597                 if (ftrace_hash_empty(hash))
1598                         return;
1599         }
1600
1601         do_for_each_ftrace_rec(pg, rec) {
1602                 int in_other_hash = 0;
1603                 int in_hash = 0;
1604                 int match = 0;
1605
1606                 if (all) {
1607                         /*
1608                          * Only the filter_hash affects all records.
1609                          * Update if the record is not in the notrace hash.
1610                          */
1611                         if (!other_hash || !ftrace_lookup_ip(other_hash, rec->ip))
1612                                 match = 1;
1613                 } else {
1614                         in_hash = !!ftrace_lookup_ip(hash, rec->ip);
1615                         in_other_hash = !!ftrace_lookup_ip(other_hash, rec->ip);
1616
1617                         /*
1618                          *
1619                          */
1620                         if (filter_hash && in_hash && !in_other_hash)
1621                                 match = 1;
1622                         else if (!filter_hash && in_hash &&
1623                                  (in_other_hash || ftrace_hash_empty(other_hash)))
1624                                 match = 1;
1625                 }
1626                 if (!match)
1627                         continue;
1628
1629                 if (inc) {
1630                         rec->flags++;
1631                         if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == FTRACE_REF_MAX))
1632                                 return;
1633                         /*
1634                          * If any ops wants regs saved for this function
1635                          * then all ops will get saved regs.
1636                          */
1637                         if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
1638                                 rec->flags |= FTRACE_FL_REGS;
1639                 } else {
1640                         if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == 0))
1641                                 return;
1642                         rec->flags--;
1643                 }
1644                 count++;
1645                 /* Shortcut, if we handled all records, we are done. */
1646                 if (!all && count == hash->count)
1647                         return;
1648         } while_for_each_ftrace_rec();
1649 }
1650
1651 static void ftrace_hash_rec_disable(struct ftrace_ops *ops,
1652                                     int filter_hash)
1653 {
1654         __ftrace_hash_rec_update(ops, filter_hash, 0);
1655 }
1656
1657 static void ftrace_hash_rec_enable(struct ftrace_ops *ops,
1658                                    int filter_hash)
1659 {
1660         __ftrace_hash_rec_update(ops, filter_hash, 1);
1661 }
1662
1663 static void print_ip_ins(const char *fmt, unsigned char *p)
1664 {
1665         int i;
1666
1667         printk(KERN_CONT "%s", fmt);
1668
1669         for (i = 0; i < MCOUNT_INSN_SIZE; i++)
1670                 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
1671 }
1672
1673 /**
1674  * ftrace_bug - report and shutdown function tracer
1675  * @failed: The failed type (EFAULT, EINVAL, EPERM)
1676  * @ip: The address that failed
1677  *
1678  * The arch code that enables or disables the function tracing
1679  * can call ftrace_bug() when it has detected a problem in
1680  * modifying the code. @failed should be one of either:
1681  * EFAULT - if the problem happens on reading the @ip address
1682  * EINVAL - if what is read at @ip is not what was expected
1683  * EPERM - if the problem happens on writting to the @ip address
1684  */
1685 void ftrace_bug(int failed, unsigned long ip)
1686 {
1687         switch (failed) {
1688         case -EFAULT:
1689                 FTRACE_WARN_ON_ONCE(1);
1690                 pr_info("ftrace faulted on modifying ");
1691                 print_ip_sym(ip);
1692                 break;
1693         case -EINVAL:
1694                 FTRACE_WARN_ON_ONCE(1);
1695                 pr_info("ftrace failed to modify ");
1696                 print_ip_sym(ip);
1697                 print_ip_ins(" actual: ", (unsigned char *)ip);
1698                 printk(KERN_CONT "\n");
1699                 break;
1700         case -EPERM:
1701                 FTRACE_WARN_ON_ONCE(1);
1702                 pr_info("ftrace faulted on writing ");
1703                 print_ip_sym(ip);
1704                 break;
1705         default:
1706                 FTRACE_WARN_ON_ONCE(1);
1707                 pr_info("ftrace faulted on unknown error ");
1708                 print_ip_sym(ip);
1709         }
1710 }
1711
1712 static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update)
1713 {
1714         unsigned long flag = 0UL;
1715
1716         /*
1717          * If we are updating calls:
1718          *
1719          *   If the record has a ref count, then we need to enable it
1720          *   because someone is using it.
1721          *
1722          *   Otherwise we make sure its disabled.
1723          *
1724          * If we are disabling calls, then disable all records that
1725          * are enabled.
1726          */
1727         if (enable && (rec->flags & ~FTRACE_FL_MASK))
1728                 flag = FTRACE_FL_ENABLED;
1729
1730         /*
1731          * If enabling and the REGS flag does not match the REGS_EN, then
1732          * do not ignore this record. Set flags to fail the compare against
1733          * ENABLED.
1734          */
1735         if (flag &&
1736             (!(rec->flags & FTRACE_FL_REGS) != !(rec->flags & FTRACE_FL_REGS_EN)))
1737                 flag |= FTRACE_FL_REGS;
1738
1739         /* If the state of this record hasn't changed, then do nothing */
1740         if ((rec->flags & FTRACE_FL_ENABLED) == flag)
1741                 return FTRACE_UPDATE_IGNORE;
1742
1743         if (flag) {
1744                 /* Save off if rec is being enabled (for return value) */
1745                 flag ^= rec->flags & FTRACE_FL_ENABLED;
1746
1747                 if (update) {
1748                         rec->flags |= FTRACE_FL_ENABLED;
1749                         if (flag & FTRACE_FL_REGS) {
1750                                 if (rec->flags & FTRACE_FL_REGS)
1751                                         rec->flags |= FTRACE_FL_REGS_EN;
1752                                 else
1753                                         rec->flags &= ~FTRACE_FL_REGS_EN;
1754                         }
1755                 }
1756
1757                 /*
1758                  * If this record is being updated from a nop, then
1759                  *   return UPDATE_MAKE_CALL.
1760                  * Otherwise, if the EN flag is set, then return
1761                  *   UPDATE_MODIFY_CALL_REGS to tell the caller to convert
1762                  *   from the non-save regs, to a save regs function.
1763                  * Otherwise,
1764                  *   return UPDATE_MODIFY_CALL to tell the caller to convert
1765                  *   from the save regs, to a non-save regs function.
1766                  */
1767                 if (flag & FTRACE_FL_ENABLED)
1768                         return FTRACE_UPDATE_MAKE_CALL;
1769                 else if (rec->flags & FTRACE_FL_REGS_EN)
1770                         return FTRACE_UPDATE_MODIFY_CALL_REGS;
1771                 else
1772                         return FTRACE_UPDATE_MODIFY_CALL;
1773         }
1774
1775         if (update) {
1776                 /* If there's no more users, clear all flags */
1777                 if (!(rec->flags & ~FTRACE_FL_MASK))
1778                         rec->flags = 0;
1779                 else
1780                         /* Just disable the record (keep REGS state) */
1781                         rec->flags &= ~FTRACE_FL_ENABLED;
1782         }
1783
1784         return FTRACE_UPDATE_MAKE_NOP;
1785 }
1786
1787 /**
1788  * ftrace_update_record, set a record that now is tracing or not
1789  * @rec: the record to update
1790  * @enable: set to 1 if the record is tracing, zero to force disable
1791  *
1792  * The records that represent all functions that can be traced need
1793  * to be updated when tracing has been enabled.
1794  */
1795 int ftrace_update_record(struct dyn_ftrace *rec, int enable)
1796 {
1797         return ftrace_check_record(rec, enable, 1);
1798 }
1799
1800 /**
1801  * ftrace_test_record, check if the record has been enabled or not
1802  * @rec: the record to test
1803  * @enable: set to 1 to check if enabled, 0 if it is disabled
1804  *
1805  * The arch code may need to test if a record is already set to
1806  * tracing to determine how to modify the function code that it
1807  * represents.
1808  */
1809 int ftrace_test_record(struct dyn_ftrace *rec, int enable)
1810 {
1811         return ftrace_check_record(rec, enable, 0);
1812 }
1813
1814 static int
1815 __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
1816 {
1817         unsigned long ftrace_old_addr;
1818         unsigned long ftrace_addr;
1819         int ret;
1820
1821         ret = ftrace_update_record(rec, enable);
1822
1823         if (rec->flags & FTRACE_FL_REGS)
1824                 ftrace_addr = (unsigned long)FTRACE_REGS_ADDR;
1825         else
1826                 ftrace_addr = (unsigned long)FTRACE_ADDR;
1827
1828         switch (ret) {
1829         case FTRACE_UPDATE_IGNORE:
1830                 return 0;
1831
1832         case FTRACE_UPDATE_MAKE_CALL:
1833                 return ftrace_make_call(rec, ftrace_addr);
1834
1835         case FTRACE_UPDATE_MAKE_NOP:
1836                 return ftrace_make_nop(NULL, rec, ftrace_addr);
1837
1838         case FTRACE_UPDATE_MODIFY_CALL_REGS:
1839         case FTRACE_UPDATE_MODIFY_CALL:
1840                 if (rec->flags & FTRACE_FL_REGS)
1841                         ftrace_old_addr = (unsigned long)FTRACE_ADDR;
1842                 else
1843                         ftrace_old_addr = (unsigned long)FTRACE_REGS_ADDR;
1844
1845                 return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr);
1846         }
1847
1848         return -1; /* unknow ftrace bug */
1849 }
1850
1851 void __weak ftrace_replace_code(int enable)
1852 {
1853         struct dyn_ftrace *rec;
1854         struct ftrace_page *pg;
1855         int failed;
1856
1857         if (unlikely(ftrace_disabled))
1858                 return;
1859
1860         do_for_each_ftrace_rec(pg, rec) {
1861                 failed = __ftrace_replace_code(rec, enable);
1862                 if (failed) {
1863                         ftrace_bug(failed, rec->ip);
1864                         /* Stop processing */
1865                         return;
1866                 }
1867         } while_for_each_ftrace_rec();
1868 }
1869
1870 struct ftrace_rec_iter {
1871         struct ftrace_page      *pg;
1872         int                     index;
1873 };
1874
1875 /**
1876  * ftrace_rec_iter_start, start up iterating over traced functions
1877  *
1878  * Returns an iterator handle that is used to iterate over all
1879  * the records that represent address locations where functions
1880  * are traced.
1881  *
1882  * May return NULL if no records are available.
1883  */
1884 struct ftrace_rec_iter *ftrace_rec_iter_start(void)
1885 {
1886         /*
1887          * We only use a single iterator.
1888          * Protected by the ftrace_lock mutex.
1889          */
1890         static struct ftrace_rec_iter ftrace_rec_iter;
1891         struct ftrace_rec_iter *iter = &ftrace_rec_iter;
1892
1893         iter->pg = ftrace_pages_start;
1894         iter->index = 0;
1895
1896         /* Could have empty pages */
1897         while (iter->pg && !iter->pg->index)
1898                 iter->pg = iter->pg->next;
1899
1900         if (!iter->pg)
1901                 return NULL;
1902
1903         return iter;
1904 }
1905
1906 /**
1907  * ftrace_rec_iter_next, get the next record to process.
1908  * @iter: The handle to the iterator.
1909  *
1910  * Returns the next iterator after the given iterator @iter.
1911  */
1912 struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter)
1913 {
1914         iter->index++;
1915
1916         if (iter->index >= iter->pg->index) {
1917                 iter->pg = iter->pg->next;
1918                 iter->index = 0;
1919
1920                 /* Could have empty pages */
1921                 while (iter->pg && !iter->pg->index)
1922                         iter->pg = iter->pg->next;
1923         }
1924
1925         if (!iter->pg)
1926                 return NULL;
1927
1928         return iter;
1929 }
1930
1931 /**
1932  * ftrace_rec_iter_record, get the record at the iterator location
1933  * @iter: The current iterator location
1934  *
1935  * Returns the record that the current @iter is at.
1936  */
1937 struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter)
1938 {
1939         return &iter->pg->records[iter->index];
1940 }
1941
1942 static int
1943 ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
1944 {
1945         unsigned long ip;
1946         int ret;
1947
1948         ip = rec->ip;
1949
1950         if (unlikely(ftrace_disabled))
1951                 return 0;
1952
1953         ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
1954         if (ret) {
1955                 ftrace_bug(ret, ip);
1956                 return 0;
1957         }
1958         return 1;
1959 }
1960
1961 /*
1962  * archs can override this function if they must do something
1963  * before the modifying code is performed.
1964  */
1965 int __weak ftrace_arch_code_modify_prepare(void)
1966 {
1967         return 0;
1968 }
1969
1970 /*
1971  * archs can override this function if they must do something
1972  * after the modifying code is performed.
1973  */
1974 int __weak ftrace_arch_code_modify_post_process(void)
1975 {
1976         return 0;
1977 }
1978
1979 void ftrace_modify_all_code(int command)
1980 {
1981         if (command & FTRACE_UPDATE_CALLS)
1982                 ftrace_replace_code(1);
1983         else if (command & FTRACE_DISABLE_CALLS)
1984                 ftrace_replace_code(0);
1985
1986         if (command & FTRACE_UPDATE_TRACE_FUNC)
1987                 ftrace_update_ftrace_func(ftrace_trace_function);
1988
1989         if (command & FTRACE_START_FUNC_RET)
1990                 ftrace_enable_ftrace_graph_caller();
1991         else if (command & FTRACE_STOP_FUNC_RET)
1992                 ftrace_disable_ftrace_graph_caller();
1993 }
1994
1995 static int __ftrace_modify_code(void *data)
1996 {
1997         int *command = data;
1998
1999         ftrace_modify_all_code(*command);
2000
2001         return 0;
2002 }
2003
2004 /**
2005  * ftrace_run_stop_machine, go back to the stop machine method
2006  * @command: The command to tell ftrace what to do
2007  *
2008  * If an arch needs to fall back to the stop machine method, the
2009  * it can call this function.
2010  */
2011 void ftrace_run_stop_machine(int command)
2012 {
2013         stop_machine(__ftrace_modify_code, &command, NULL);
2014 }
2015
2016 /**
2017  * arch_ftrace_update_code, modify the code to trace or not trace
2018  * @command: The command that needs to be done
2019  *
2020  * Archs can override this function if it does not need to
2021  * run stop_machine() to modify code.
2022  */
2023 void __weak arch_ftrace_update_code(int command)
2024 {
2025         ftrace_run_stop_machine(command);
2026 }
2027
2028 static void ftrace_run_update_code(int command)
2029 {
2030         int ret;
2031
2032         ret = ftrace_arch_code_modify_prepare();
2033         FTRACE_WARN_ON(ret);
2034         if (ret)
2035                 return;
2036         /*
2037          * Do not call function tracer while we update the code.
2038          * We are in stop machine.
2039          */
2040         function_trace_stop++;
2041
2042         /*
2043          * By default we use stop_machine() to modify the code.
2044          * But archs can do what ever they want as long as it
2045          * is safe. The stop_machine() is the safest, but also
2046          * produces the most overhead.
2047          */
2048         arch_ftrace_update_code(command);
2049
2050         function_trace_stop--;
2051
2052         ret = ftrace_arch_code_modify_post_process();
2053         FTRACE_WARN_ON(ret);
2054 }
2055
2056 static ftrace_func_t saved_ftrace_func;
2057 static int ftrace_start_up;
2058 static int global_start_up;
2059
2060 static void ftrace_startup_enable(int command)
2061 {
2062         if (saved_ftrace_func != ftrace_trace_function) {
2063                 saved_ftrace_func = ftrace_trace_function;
2064                 command |= FTRACE_UPDATE_TRACE_FUNC;
2065         }
2066
2067         if (!command || !ftrace_enabled)
2068                 return;
2069
2070         ftrace_run_update_code(command);
2071 }
2072
2073 static int ftrace_startup(struct ftrace_ops *ops, int command)
2074 {
2075         bool hash_enable = true;
2076
2077         if (unlikely(ftrace_disabled))
2078                 return -ENODEV;
2079
2080         ftrace_start_up++;
2081         command |= FTRACE_UPDATE_CALLS;
2082
2083         /* ops marked global share the filter hashes */
2084         if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
2085                 ops = &global_ops;
2086                 /* Don't update hash if global is already set */
2087                 if (global_start_up)
2088                         hash_enable = false;
2089                 global_start_up++;
2090         }
2091
2092         ops->flags |= FTRACE_OPS_FL_ENABLED;
2093         if (hash_enable)
2094                 ftrace_hash_rec_enable(ops, 1);
2095
2096         ftrace_startup_enable(command);
2097
2098         return 0;
2099 }
2100
2101 static void ftrace_shutdown(struct ftrace_ops *ops, int command)
2102 {
2103         bool hash_disable = true;
2104
2105         if (unlikely(ftrace_disabled))
2106                 return;
2107
2108         ftrace_start_up--;
2109         /*
2110          * Just warn in case of unbalance, no need to kill ftrace, it's not
2111          * critical but the ftrace_call callers may be never nopped again after
2112          * further ftrace uses.
2113          */
2114         WARN_ON_ONCE(ftrace_start_up < 0);
2115
2116         if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
2117                 ops = &global_ops;
2118                 global_start_up--;
2119                 WARN_ON_ONCE(global_start_up < 0);
2120                 /* Don't update hash if global still has users */
2121                 if (global_start_up) {
2122                         WARN_ON_ONCE(!ftrace_start_up);
2123                         hash_disable = false;
2124                 }
2125         }
2126
2127         if (hash_disable)
2128                 ftrace_hash_rec_disable(ops, 1);
2129
2130         if (ops != &global_ops || !global_start_up)
2131                 ops->flags &= ~FTRACE_OPS_FL_ENABLED;
2132
2133         command |= FTRACE_UPDATE_CALLS;
2134
2135         if (saved_ftrace_func != ftrace_trace_function) {
2136                 saved_ftrace_func = ftrace_trace_function;
2137                 command |= FTRACE_UPDATE_TRACE_FUNC;
2138         }
2139
2140         if (!command || !ftrace_enabled)
2141                 return;
2142
2143         ftrace_run_update_code(command);
2144 }
2145
2146 static void ftrace_startup_sysctl(void)
2147 {
2148         if (unlikely(ftrace_disabled))
2149                 return;
2150
2151         /* Force update next time */
2152         saved_ftrace_func = NULL;
2153         /* ftrace_start_up is true if we want ftrace running */
2154         if (ftrace_start_up)
2155                 ftrace_run_update_code(FTRACE_UPDATE_CALLS);
2156 }
2157
2158 static void ftrace_shutdown_sysctl(void)
2159 {
2160         if (unlikely(ftrace_disabled))
2161                 return;
2162
2163         /* ftrace_start_up is true if ftrace is running */
2164         if (ftrace_start_up)
2165                 ftrace_run_update_code(FTRACE_DISABLE_CALLS);
2166 }
2167
2168 static cycle_t          ftrace_update_time;
2169 static unsigned long    ftrace_update_cnt;
2170 unsigned long           ftrace_update_tot_cnt;
2171
2172 static int ops_traces_mod(struct ftrace_ops *ops)
2173 {
2174         struct ftrace_hash *hash;
2175
2176         hash = ops->filter_hash;
2177         return ftrace_hash_empty(hash);
2178 }
2179
2180 static int ftrace_update_code(struct module *mod)
2181 {
2182         struct ftrace_page *pg;
2183         struct dyn_ftrace *p;
2184         cycle_t start, stop;
2185         unsigned long ref = 0;
2186         int i;
2187
2188         /*
2189          * When adding a module, we need to check if tracers are
2190          * currently enabled and if they are set to trace all functions.
2191          * If they are, we need to enable the module functions as well
2192          * as update the reference counts for those function records.
2193          */
2194         if (mod) {
2195                 struct ftrace_ops *ops;
2196
2197                 for (ops = ftrace_ops_list;
2198                      ops != &ftrace_list_end; ops = ops->next) {
2199                         if (ops->flags & FTRACE_OPS_FL_ENABLED &&
2200                             ops_traces_mod(ops))
2201                                 ref++;
2202                 }
2203         }
2204
2205         start = ftrace_now(raw_smp_processor_id());
2206         ftrace_update_cnt = 0;
2207
2208         for (pg = ftrace_new_pgs; pg; pg = pg->next) {
2209
2210                 for (i = 0; i < pg->index; i++) {
2211                         /* If something went wrong, bail without enabling anything */
2212                         if (unlikely(ftrace_disabled))
2213                                 return -1;
2214
2215                         p = &pg->records[i];
2216                         p->flags = ref;
2217
2218                         /*
2219                          * Do the initial record conversion from mcount jump
2220                          * to the NOP instructions.
2221                          */
2222                         if (!ftrace_code_disable(mod, p))
2223                                 break;
2224
2225                         ftrace_update_cnt++;
2226
2227                         /*
2228                          * If the tracing is enabled, go ahead and enable the record.
2229                          *
2230                          * The reason not to enable the record immediatelly is the
2231                          * inherent check of ftrace_make_nop/ftrace_make_call for
2232                          * correct previous instructions.  Making first the NOP
2233                          * conversion puts the module to the correct state, thus
2234                          * passing the ftrace_make_call check.
2235                          */
2236                         if (ftrace_start_up && ref) {
2237                                 int failed = __ftrace_replace_code(p, 1);
2238                                 if (failed)
2239                                         ftrace_bug(failed, p->ip);
2240                         }
2241                 }
2242         }
2243
2244         ftrace_new_pgs = NULL;
2245
2246         stop = ftrace_now(raw_smp_processor_id());
2247         ftrace_update_time = stop - start;
2248         ftrace_update_tot_cnt += ftrace_update_cnt;
2249
2250         return 0;
2251 }
2252
2253 static int ftrace_allocate_records(struct ftrace_page *pg, int count)
2254 {
2255         int order;
2256         int cnt;
2257
2258         if (WARN_ON(!count))
2259                 return -EINVAL;
2260
2261         order = get_count_order(DIV_ROUND_UP(count, ENTRIES_PER_PAGE));
2262
2263         /*
2264          * We want to fill as much as possible. No more than a page
2265          * may be empty.
2266          */
2267         while ((PAGE_SIZE << order) / ENTRY_SIZE >= count + ENTRIES_PER_PAGE)
2268                 order--;
2269
2270  again:
2271         pg->records = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
2272
2273         if (!pg->records) {
2274                 /* if we can't allocate this size, try something smaller */
2275                 if (!order)
2276                         return -ENOMEM;
2277                 order >>= 1;
2278                 goto again;
2279         }
2280
2281         cnt = (PAGE_SIZE << order) / ENTRY_SIZE;
2282         pg->size = cnt;
2283
2284         if (cnt > count)
2285                 cnt = count;
2286
2287         return cnt;
2288 }
2289
2290 static struct ftrace_page *
2291 ftrace_allocate_pages(unsigned long num_to_init)
2292 {
2293         struct ftrace_page *start_pg;
2294         struct ftrace_page *pg;
2295         int order;
2296         int cnt;
2297
2298         if (!num_to_init)
2299                 return 0;
2300
2301         start_pg = pg = kzalloc(sizeof(*pg), GFP_KERNEL);
2302         if (!pg)
2303                 return NULL;
2304
2305         /*
2306          * Try to allocate as much as possible in one continues
2307          * location that fills in all of the space. We want to
2308          * waste as little space as possible.
2309          */
2310         for (;;) {
2311                 cnt = ftrace_allocate_records(pg, num_to_init);
2312                 if (cnt < 0)
2313                         goto free_pages;
2314
2315                 num_to_init -= cnt;
2316                 if (!num_to_init)
2317                         break;
2318
2319                 pg->next = kzalloc(sizeof(*pg), GFP_KERNEL);
2320                 if (!pg->next)
2321                         goto free_pages;
2322
2323                 pg = pg->next;
2324         }
2325
2326         return start_pg;
2327
2328  free_pages:
2329         while (start_pg) {
2330                 order = get_count_order(pg->size / ENTRIES_PER_PAGE);
2331                 free_pages((unsigned long)pg->records, order);
2332                 start_pg = pg->next;
2333                 kfree(pg);
2334                 pg = start_pg;
2335         }
2336         pr_info("ftrace: FAILED to allocate memory for functions\n");
2337         return NULL;
2338 }
2339
2340 static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
2341 {
2342         int cnt;
2343
2344         if (!num_to_init) {
2345                 pr_info("ftrace: No functions to be traced?\n");
2346                 return -1;
2347         }
2348
2349         cnt = num_to_init / ENTRIES_PER_PAGE;
2350         pr_info("ftrace: allocating %ld entries in %d pages\n",
2351                 num_to_init, cnt + 1);
2352
2353         return 0;
2354 }
2355
2356 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
2357
2358 struct ftrace_iterator {
2359         loff_t                          pos;
2360         loff_t                          func_pos;
2361         struct ftrace_page              *pg;
2362         struct dyn_ftrace               *func;
2363         struct ftrace_func_probe        *probe;
2364         struct trace_parser             parser;
2365         struct ftrace_hash              *hash;
2366         struct ftrace_ops               *ops;
2367         int                             hidx;
2368         int                             idx;
2369         unsigned                        flags;
2370 };
2371
2372 static void *
2373 t_hash_next(struct seq_file *m, loff_t *pos)
2374 {
2375         struct ftrace_iterator *iter = m->private;
2376         struct hlist_node *hnd = NULL;
2377         struct hlist_head *hhd;
2378
2379         (*pos)++;
2380         iter->pos = *pos;
2381
2382         if (iter->probe)
2383                 hnd = &iter->probe->node;
2384  retry:
2385         if (iter->hidx >= FTRACE_FUNC_HASHSIZE)
2386                 return NULL;
2387
2388         hhd = &ftrace_func_hash[iter->hidx];
2389
2390         if (hlist_empty(hhd)) {
2391                 iter->hidx++;
2392                 hnd = NULL;
2393                 goto retry;
2394         }
2395
2396         if (!hnd)
2397                 hnd = hhd->first;
2398         else {
2399                 hnd = hnd->next;
2400                 if (!hnd) {
2401                         iter->hidx++;
2402                         goto retry;
2403                 }
2404         }
2405
2406         if (WARN_ON_ONCE(!hnd))
2407                 return NULL;
2408
2409         iter->probe = hlist_entry(hnd, struct ftrace_func_probe, node);
2410
2411         return iter;
2412 }
2413
2414 static void *t_hash_start(struct seq_file *m, loff_t *pos)
2415 {
2416         struct ftrace_iterator *iter = m->private;
2417         void *p = NULL;
2418         loff_t l;
2419
2420         if (!(iter->flags & FTRACE_ITER_DO_HASH))
2421                 return NULL;
2422
2423         if (iter->func_pos > *pos)
2424                 return NULL;
2425
2426         iter->hidx = 0;
2427         for (l = 0; l <= (*pos - iter->func_pos); ) {
2428                 p = t_hash_next(m, &l);
2429                 if (!p)
2430                         break;
2431         }
2432         if (!p)
2433                 return NULL;
2434
2435         /* Only set this if we have an item */
2436         iter->flags |= FTRACE_ITER_HASH;
2437
2438         return iter;
2439 }
2440
2441 static int
2442 t_hash_show(struct seq_file *m, struct ftrace_iterator *iter)
2443 {
2444         struct ftrace_func_probe *rec;
2445
2446         rec = iter->probe;
2447         if (WARN_ON_ONCE(!rec))
2448                 return -EIO;
2449
2450         if (rec->ops->print)
2451                 return rec->ops->print(m, rec->ip, rec->ops, rec->data);
2452
2453         seq_printf(m, "%ps:%ps", (void *)rec->ip, (void *)rec->ops->func);
2454
2455         if (rec->data)
2456                 seq_printf(m, ":%p", rec->data);
2457         seq_putc(m, '\n');
2458
2459         return 0;
2460 }
2461
2462 static void *
2463 t_next(struct seq_file *m, void *v, loff_t *pos)
2464 {
2465         struct ftrace_iterator *iter = m->private;
2466         struct ftrace_ops *ops = iter->ops;
2467         struct dyn_ftrace *rec = NULL;
2468
2469         if (unlikely(ftrace_disabled))
2470                 return NULL;
2471
2472         if (iter->flags & FTRACE_ITER_HASH)
2473                 return t_hash_next(m, pos);
2474
2475         (*pos)++;
2476         iter->pos = iter->func_pos = *pos;
2477
2478         if (iter->flags & FTRACE_ITER_PRINTALL)
2479                 return t_hash_start(m, pos);
2480
2481  retry:
2482         if (iter->idx >= iter->pg->index) {
2483                 if (iter->pg->next) {
2484                         iter->pg = iter->pg->next;
2485                         iter->idx = 0;
2486                         goto retry;
2487                 }
2488         } else {
2489                 rec = &iter->pg->records[iter->idx++];
2490                 if (((iter->flags & FTRACE_ITER_FILTER) &&
2491                      !(ftrace_lookup_ip(ops->filter_hash, rec->ip))) ||
2492
2493                     ((iter->flags & FTRACE_ITER_NOTRACE) &&
2494                      !ftrace_lookup_ip(ops->notrace_hash, rec->ip)) ||
2495
2496                     ((iter->flags & FTRACE_ITER_ENABLED) &&
2497                      !(rec->flags & FTRACE_FL_ENABLED))) {
2498
2499                         rec = NULL;
2500                         goto retry;
2501                 }
2502         }
2503
2504         if (!rec)
2505                 return t_hash_start(m, pos);
2506
2507         iter->func = rec;
2508
2509         return iter;
2510 }
2511
2512 static void reset_iter_read(struct ftrace_iterator *iter)
2513 {
2514         iter->pos = 0;
2515         iter->func_pos = 0;
2516         iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_HASH);
2517 }
2518
2519 static void *t_start(struct seq_file *m, loff_t *pos)
2520 {
2521         struct ftrace_iterator *iter = m->private;
2522         struct ftrace_ops *ops = iter->ops;
2523         void *p = NULL;
2524         loff_t l;
2525
2526         mutex_lock(&ftrace_lock);
2527
2528         if (unlikely(ftrace_disabled))
2529                 return NULL;
2530
2531         /*
2532          * If an lseek was done, then reset and start from beginning.
2533          */
2534         if (*pos < iter->pos)
2535                 reset_iter_read(iter);
2536
2537         /*
2538          * For set_ftrace_filter reading, if we have the filter
2539          * off, we can short cut and just print out that all
2540          * functions are enabled.
2541          */
2542         if (iter->flags & FTRACE_ITER_FILTER &&
2543             ftrace_hash_empty(ops->filter_hash)) {
2544                 if (*pos > 0)
2545                         return t_hash_start(m, pos);
2546                 iter->flags |= FTRACE_ITER_PRINTALL;
2547                 /* reset in case of seek/pread */
2548                 iter->flags &= ~FTRACE_ITER_HASH;
2549                 return iter;
2550         }
2551
2552         if (iter->flags & FTRACE_ITER_HASH)
2553                 return t_hash_start(m, pos);
2554
2555         /*
2556          * Unfortunately, we need to restart at ftrace_pages_start
2557          * every time we let go of the ftrace_mutex. This is because
2558          * those pointers can change without the lock.
2559          */
2560         iter->pg = ftrace_pages_start;
2561         iter->idx = 0;
2562         for (l = 0; l <= *pos; ) {
2563                 p = t_next(m, p, &l);
2564                 if (!p)
2565                         break;
2566         }
2567
2568         if (!p)
2569                 return t_hash_start(m, pos);
2570
2571         return iter;
2572 }
2573
2574 static void t_stop(struct seq_file *m, void *p)
2575 {
2576         mutex_unlock(&ftrace_lock);
2577 }
2578
2579 static int t_show(struct seq_file *m, void *v)
2580 {
2581         struct ftrace_iterator *iter = m->private;
2582         struct dyn_ftrace *rec;
2583
2584         if (iter->flags & FTRACE_ITER_HASH)
2585                 return t_hash_show(m, iter);
2586
2587         if (iter->flags & FTRACE_ITER_PRINTALL) {
2588                 seq_printf(m, "#### all functions enabled ####\n");
2589                 return 0;
2590         }
2591
2592         rec = iter->func;
2593
2594         if (!rec)
2595                 return 0;
2596
2597         seq_printf(m, "%ps", (void *)rec->ip);
2598         if (iter->flags & FTRACE_ITER_ENABLED)
2599                 seq_printf(m, " (%ld)%s",
2600                            rec->flags & ~FTRACE_FL_MASK,
2601                            rec->flags & FTRACE_FL_REGS ? " R" : "");
2602         seq_printf(m, "\n");
2603
2604         return 0;
2605 }
2606
2607 static const struct seq_operations show_ftrace_seq_ops = {
2608         .start = t_start,
2609         .next = t_next,
2610         .stop = t_stop,
2611         .show = t_show,
2612 };
2613
2614 static int
2615 ftrace_avail_open(struct inode *inode, struct file *file)
2616 {
2617         struct ftrace_iterator *iter;
2618
2619         if (unlikely(ftrace_disabled))
2620                 return -ENODEV;
2621
2622         iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
2623         if (iter) {
2624                 iter->pg = ftrace_pages_start;
2625                 iter->ops = &global_ops;
2626         }
2627
2628         return iter ? 0 : -ENOMEM;
2629 }
2630
2631 static int
2632 ftrace_enabled_open(struct inode *inode, struct file *file)
2633 {
2634         struct ftrace_iterator *iter;
2635
2636         if (unlikely(ftrace_disabled))
2637                 return -ENODEV;
2638
2639         iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
2640         if (iter) {
2641                 iter->pg = ftrace_pages_start;
2642                 iter->flags = FTRACE_ITER_ENABLED;
2643                 iter->ops = &global_ops;
2644         }
2645
2646         return iter ? 0 : -ENOMEM;
2647 }
2648
2649 static void ftrace_filter_reset(struct ftrace_hash *hash)
2650 {
2651         mutex_lock(&ftrace_lock);
2652         ftrace_hash_clear(hash);
2653         mutex_unlock(&ftrace_lock);
2654 }
2655
2656 /**
2657  * ftrace_regex_open - initialize function tracer filter files
2658  * @ops: The ftrace_ops that hold the hash filters
2659  * @flag: The type of filter to process
2660  * @inode: The inode, usually passed in to your open routine
2661  * @file: The file, usually passed in to your open routine
2662  *
2663  * ftrace_regex_open() initializes the filter files for the
2664  * @ops. Depending on @flag it may process the filter hash or
2665  * the notrace hash of @ops. With this called from the open
2666  * routine, you can use ftrace_filter_write() for the write
2667  * routine if @flag has FTRACE_ITER_FILTER set, or
2668  * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set.
2669  * ftrace_filter_lseek() should be used as the lseek routine, and
2670  * release must call ftrace_regex_release().
2671  */
2672 int
2673 ftrace_regex_open(struct ftrace_ops *ops, int flag,
2674                   struct inode *inode, struct file *file)
2675 {
2676         struct ftrace_iterator *iter;
2677         struct ftrace_hash *hash;
2678         int ret = 0;
2679
2680         ftrace_ops_init(ops);
2681
2682         if (unlikely(ftrace_disabled))
2683                 return -ENODEV;
2684
2685         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2686         if (!iter)
2687                 return -ENOMEM;
2688
2689         if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) {
2690                 kfree(iter);
2691                 return -ENOMEM;
2692         }
2693
2694         iter->ops = ops;
2695         iter->flags = flag;
2696
2697         mutex_lock(&ops->regex_lock);
2698
2699         if (flag & FTRACE_ITER_NOTRACE)
2700                 hash = ops->notrace_hash;
2701         else
2702                 hash = ops->filter_hash;
2703
2704         if (file->f_mode & FMODE_WRITE) {
2705                 iter->hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, hash);
2706                 if (!iter->hash) {
2707                         trace_parser_put(&iter->parser);
2708                         kfree(iter);
2709                         ret = -ENOMEM;
2710                         goto out_unlock;
2711                 }
2712         }
2713
2714         if ((file->f_mode & FMODE_WRITE) &&
2715             (file->f_flags & O_TRUNC))
2716                 ftrace_filter_reset(iter->hash);
2717
2718         if (file->f_mode & FMODE_READ) {
2719                 iter->pg = ftrace_pages_start;
2720
2721                 ret = seq_open(file, &show_ftrace_seq_ops);
2722                 if (!ret) {
2723                         struct seq_file *m = file->private_data;
2724                         m->private = iter;
2725                 } else {
2726                         /* Failed */
2727                         free_ftrace_hash(iter->hash);
2728                         trace_parser_put(&iter->parser);
2729                         kfree(iter);
2730                 }
2731         } else
2732                 file->private_data = iter;
2733
2734  out_unlock:
2735         mutex_unlock(&ops->regex_lock);
2736
2737         return ret;
2738 }
2739
2740 static int
2741 ftrace_filter_open(struct inode *inode, struct file *file)
2742 {
2743         return ftrace_regex_open(&global_ops,
2744                         FTRACE_ITER_FILTER | FTRACE_ITER_DO_HASH,
2745                         inode, file);
2746 }
2747
2748 static int
2749 ftrace_notrace_open(struct inode *inode, struct file *file)
2750 {
2751         return ftrace_regex_open(&global_ops, FTRACE_ITER_NOTRACE,
2752                                  inode, file);
2753 }
2754
2755 static int ftrace_match(char *str, char *regex, int len, int type)
2756 {
2757         int matched = 0;
2758         int slen;
2759
2760         switch (type) {
2761         case MATCH_FULL:
2762                 if (strcmp(str, regex) == 0)
2763                         matched = 1;
2764                 break;
2765         case MATCH_FRONT_ONLY:
2766                 if (strncmp(str, regex, len) == 0)
2767                         matched = 1;
2768                 break;
2769         case MATCH_MIDDLE_ONLY:
2770                 if (strstr(str, regex))
2771                         matched = 1;
2772                 break;
2773         case MATCH_END_ONLY:
2774                 slen = strlen(str);
2775                 if (slen >= len && memcmp(str + slen - len, regex, len) == 0)
2776                         matched = 1;
2777                 break;
2778         }
2779
2780         return matched;
2781 }
2782
2783 static int
2784 enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int not)
2785 {
2786         struct ftrace_func_entry *entry;
2787         int ret = 0;
2788
2789         entry = ftrace_lookup_ip(hash, rec->ip);
2790         if (not) {
2791                 /* Do nothing if it doesn't exist */
2792                 if (!entry)
2793                         return 0;
2794
2795                 free_hash_entry(hash, entry);
2796         } else {
2797                 /* Do nothing if it exists */
2798                 if (entry)
2799                         return 0;
2800
2801                 ret = add_hash_entry(hash, rec->ip);
2802         }
2803         return ret;
2804 }
2805
2806 static int
2807 ftrace_match_record(struct dyn_ftrace *rec, char *mod,
2808                     char *regex, int len, int type)
2809 {
2810         char str[KSYM_SYMBOL_LEN];
2811         char *modname;
2812
2813         kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
2814
2815         if (mod) {
2816                 /* module lookup requires matching the module */
2817                 if (!modname || strcmp(modname, mod))
2818                         return 0;
2819
2820                 /* blank search means to match all funcs in the mod */
2821                 if (!len)
2822                         return 1;
2823         }
2824
2825         return ftrace_match(str, regex, len, type);
2826 }
2827
2828 static int
2829 match_records(struct ftrace_hash *hash, char *buff,
2830               int len, char *mod, int not)
2831 {
2832         unsigned search_len = 0;
2833         struct ftrace_page *pg;
2834         struct dyn_ftrace *rec;
2835         int type = MATCH_FULL;
2836         char *search = buff;
2837         int found = 0;
2838         int ret;
2839
2840         if (len) {
2841                 type = filter_parse_regex(buff, len, &search, &not);
2842                 search_len = strlen(search);
2843         }
2844
2845         mutex_lock(&ftrace_lock);
2846
2847         if (unlikely(ftrace_disabled))
2848                 goto out_unlock;
2849
2850         do_for_each_ftrace_rec(pg, rec) {
2851                 if (ftrace_match_record(rec, mod, search, search_len, type)) {
2852                         ret = enter_record(hash, rec, not);
2853                         if (ret < 0) {
2854                                 found = ret;
2855                                 goto out_unlock;
2856                         }
2857                         found = 1;
2858                 }
2859         } while_for_each_ftrace_rec();
2860  out_unlock:
2861         mutex_unlock(&ftrace_lock);
2862
2863         return found;
2864 }
2865
2866 static int
2867 ftrace_match_records(struct ftrace_hash *hash, char *buff, int len)
2868 {
2869         return match_records(hash, buff, len, NULL, 0);
2870 }
2871
2872 static int
2873 ftrace_match_module_records(struct ftrace_hash *hash, char *buff, char *mod)
2874 {
2875         int not = 0;
2876
2877         /* blank or '*' mean the same */
2878         if (strcmp(buff, "*") == 0)
2879                 buff[0] = 0;
2880
2881         /* handle the case of 'dont filter this module' */
2882         if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) {
2883                 buff[0] = 0;
2884                 not = 1;
2885         }
2886
2887         return match_records(hash, buff, strlen(buff), mod, not);
2888 }
2889
2890 /*
2891  * We register the module command as a template to show others how
2892  * to register the a command as well.
2893  */
2894
2895 static int
2896 ftrace_mod_callback(struct ftrace_hash *hash,
2897                     char *func, char *cmd, char *param, int enable)
2898 {
2899         char *mod;
2900         int ret = -EINVAL;
2901
2902         /*
2903          * cmd == 'mod' because we only registered this func
2904          * for the 'mod' ftrace_func_command.
2905          * But if you register one func with multiple commands,
2906          * you can tell which command was used by the cmd
2907          * parameter.
2908          */
2909
2910         /* we must have a module name */
2911         if (!param)
2912                 return ret;
2913
2914         mod = strsep(&param, ":");
2915         if (!strlen(mod))
2916                 return ret;
2917
2918         ret = ftrace_match_module_records(hash, func, mod);
2919         if (!ret)
2920                 ret = -EINVAL;
2921         if (ret < 0)
2922                 return ret;
2923
2924         return 0;
2925 }
2926
2927 static struct ftrace_func_command ftrace_mod_cmd = {
2928         .name                   = "mod",
2929         .func                   = ftrace_mod_callback,
2930 };
2931
2932 static int __init ftrace_mod_cmd_init(void)
2933 {
2934         return register_ftrace_command(&ftrace_mod_cmd);
2935 }
2936 core_initcall(ftrace_mod_cmd_init);
2937
2938 static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
2939                                       struct ftrace_ops *op, struct pt_regs *pt_regs)
2940 {
2941         struct ftrace_func_probe *entry;
2942         struct hlist_head *hhd;
2943         unsigned long key;
2944
2945         key = hash_long(ip, FTRACE_HASH_BITS);
2946
2947         hhd = &ftrace_func_hash[key];
2948
2949         if (hlist_empty(hhd))
2950                 return;
2951
2952         /*
2953          * Disable preemption for these calls to prevent a RCU grace
2954          * period. This syncs the hash iteration and freeing of items
2955          * on the hash. rcu_read_lock is too dangerous here.
2956          */
2957         preempt_disable_notrace();
2958         hlist_for_each_entry_rcu_notrace(entry, hhd, node) {
2959                 if (entry->ip == ip)
2960                         entry->ops->func(ip, parent_ip, &entry->data);
2961         }
2962         preempt_enable_notrace();
2963 }
2964
2965 static struct ftrace_ops trace_probe_ops __read_mostly =
2966 {
2967         .func           = function_trace_probe_call,
2968         .flags          = FTRACE_OPS_FL_INITIALIZED,
2969         INIT_REGEX_LOCK(trace_probe_ops)
2970 };
2971
2972 static int ftrace_probe_registered;
2973
2974 static void __enable_ftrace_function_probe(void)
2975 {
2976         int ret;
2977         int i;
2978
2979         if (ftrace_probe_registered) {
2980                 /* still need to update the function call sites */
2981                 if (ftrace_enabled)
2982                         ftrace_run_update_code(FTRACE_UPDATE_CALLS);
2983                 return;
2984         }
2985
2986         for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2987                 struct hlist_head *hhd = &ftrace_func_hash[i];
2988                 if (hhd->first)
2989                         break;
2990         }
2991         /* Nothing registered? */
2992         if (i == FTRACE_FUNC_HASHSIZE)
2993                 return;
2994
2995         ret = __register_ftrace_function(&trace_probe_ops);
2996         if (!ret)
2997                 ret = ftrace_startup(&trace_probe_ops, 0);
2998
2999         ftrace_probe_registered = 1;
3000 }
3001
3002 static void __disable_ftrace_function_probe(void)
3003 {
3004         int ret;
3005         int i;
3006
3007         if (!ftrace_probe_registered)
3008                 return;
3009
3010         for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
3011                 struct hlist_head *hhd = &ftrace_func_hash[i];
3012                 if (hhd->first)
3013                         return;
3014         }
3015
3016         /* no more funcs left */
3017         ret = __unregister_ftrace_function(&trace_probe_ops);
3018         if (!ret)
3019                 ftrace_shutdown(&trace_probe_ops, 0);
3020
3021         ftrace_probe_registered = 0;
3022 }
3023
3024
3025 static void ftrace_free_entry(struct ftrace_func_probe *entry)
3026 {
3027         if (entry->ops->free)
3028                 entry->ops->free(entry->ops, entry->ip, &entry->data);
3029         kfree(entry);
3030 }
3031
3032 int
3033 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3034                               void *data)
3035 {
3036         struct ftrace_func_probe *entry;
3037         struct ftrace_hash **orig_hash = &trace_probe_ops.filter_hash;
3038         struct ftrace_hash *hash;
3039         struct ftrace_page *pg;
3040         struct dyn_ftrace *rec;
3041         int type, len, not;
3042         unsigned long key;
3043         int count = 0;
3044         char *search;
3045         int ret;
3046
3047         type = filter_parse_regex(glob, strlen(glob), &search, &not);
3048         len = strlen(search);
3049
3050         /* we do not support '!' for function probes */
3051         if (WARN_ON(not))
3052                 return -EINVAL;
3053
3054         mutex_lock(&trace_probe_ops.regex_lock);
3055
3056         hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
3057         if (!hash) {
3058                 count = -ENOMEM;
3059                 goto out;
3060         }
3061
3062         if (unlikely(ftrace_disabled)) {
3063                 count = -ENODEV;
3064                 goto out;
3065         }
3066
3067         mutex_lock(&ftrace_lock);
3068
3069         do_for_each_ftrace_rec(pg, rec) {
3070
3071                 if (!ftrace_match_record(rec, NULL, search, len, type))
3072                         continue;
3073
3074                 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
3075                 if (!entry) {
3076                         /* If we did not process any, then return error */
3077                         if (!count)
3078                                 count = -ENOMEM;
3079                         goto out_unlock;
3080                 }
3081
3082                 count++;
3083
3084                 entry->data = data;
3085
3086                 /*
3087                  * The caller might want to do something special
3088                  * for each function we find. We call the callback
3089                  * to give the caller an opportunity to do so.
3090                  */
3091                 if (ops->init) {
3092                         if (ops->init(ops, rec->ip, &entry->data) < 0) {
3093                                 /* caller does not like this func */
3094                                 kfree(entry);
3095                                 continue;
3096                         }
3097                 }
3098
3099                 ret = enter_record(hash, rec, 0);
3100                 if (ret < 0) {
3101                         kfree(entry);
3102                         count = ret;
3103                         goto out_unlock;
3104                 }
3105
3106                 entry->ops = ops;
3107                 entry->ip = rec->ip;
3108
3109                 key = hash_long(entry->ip, FTRACE_HASH_BITS);
3110                 hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]);
3111
3112         } while_for_each_ftrace_rec();
3113
3114         ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash);
3115         if (ret < 0)
3116                 count = ret;
3117
3118         __enable_ftrace_function_probe();
3119
3120  out_unlock:
3121         mutex_unlock(&ftrace_lock);
3122  out:
3123         mutex_unlock(&trace_probe_ops.regex_lock);
3124         free_ftrace_hash(hash);
3125
3126         return count;
3127 }
3128
3129 enum {
3130         PROBE_TEST_FUNC         = 1,
3131         PROBE_TEST_DATA         = 2
3132 };
3133
3134 static void
3135 __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3136                                   void *data, int flags)
3137 {
3138         struct ftrace_func_entry *rec_entry;
3139         struct ftrace_func_probe *entry;
3140         struct ftrace_func_probe *p;
3141         struct ftrace_hash **orig_hash = &trace_probe_ops.filter_hash;
3142         struct list_head free_list;
3143         struct ftrace_hash *hash;
3144         struct hlist_node *tmp;
3145         char str[KSYM_SYMBOL_LEN];
3146         int type = MATCH_FULL;
3147         int i, len = 0;
3148         char *search;
3149
3150         if (glob && (strcmp(glob, "*") == 0 || !strlen(glob)))
3151                 glob = NULL;
3152         else if (glob) {
3153                 int not;
3154
3155                 type = filter_parse_regex(glob, strlen(glob), &search, &not);
3156                 len = strlen(search);
3157
3158                 /* we do not support '!' for function probes */
3159                 if (WARN_ON(not))
3160                         return;
3161         }
3162
3163         mutex_lock(&trace_probe_ops.regex_lock);
3164
3165         hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
3166         if (!hash)
3167                 /* Hmm, should report this somehow */
3168                 goto out_unlock;
3169
3170         INIT_LIST_HEAD(&free_list);
3171
3172         for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
3173                 struct hlist_head *hhd = &ftrace_func_hash[i];
3174
3175                 hlist_for_each_entry_safe(entry, tmp, hhd, node) {
3176
3177                         /* break up if statements for readability */
3178                         if ((flags & PROBE_TEST_FUNC) && entry->ops != ops)
3179                                 continue;
3180
3181                         if ((flags & PROBE_TEST_DATA) && entry->data != data)
3182                                 continue;
3183
3184                         /* do this last, since it is the most expensive */
3185                         if (glob) {
3186                                 kallsyms_lookup(entry->ip, NULL, NULL,
3187                                                 NULL, str);
3188                                 if (!ftrace_match(str, glob, len, type))
3189                                         continue;
3190                         }
3191
3192                         rec_entry = ftrace_lookup_ip(hash, entry->ip);
3193                         /* It is possible more than one entry had this ip */
3194                         if (rec_entry)
3195                                 free_hash_entry(hash, rec_entry);
3196
3197                         hlist_del_rcu(&entry->node);
3198                         list_add(&entry->free_list, &free_list);
3199                 }
3200         }
3201         mutex_lock(&ftrace_lock);
3202         __disable_ftrace_function_probe();
3203         /*
3204          * Remove after the disable is called. Otherwise, if the last
3205          * probe is removed, a null hash means *all enabled*.
3206          */
3207         ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash);
3208         synchronize_sched();
3209         list_for_each_entry_safe(entry, p, &free_list, free_list) {
3210                 list_del(&entry->free_list);
3211                 ftrace_free_entry(entry);
3212         }
3213         mutex_unlock(&ftrace_lock);
3214                 
3215  out_unlock:
3216         mutex_unlock(&trace_probe_ops.regex_lock);
3217         free_ftrace_hash(hash);
3218 }
3219
3220 void
3221 unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3222                                 void *data)
3223 {
3224         __unregister_ftrace_function_probe(glob, ops, data,
3225                                           PROBE_TEST_FUNC | PROBE_TEST_DATA);
3226 }
3227
3228 void
3229 unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
3230 {
3231         __unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC);
3232 }
3233
3234 void unregister_ftrace_function_probe_all(char *glob)
3235 {
3236         __unregister_ftrace_function_probe(glob, NULL, NULL, 0);
3237 }
3238
3239 static LIST_HEAD(ftrace_commands);
3240 static DEFINE_MUTEX(ftrace_cmd_mutex);
3241
3242 int register_ftrace_command(struct ftrace_func_command *cmd)
3243 {
3244         struct ftrace_func_command *p;
3245         int ret = 0;
3246
3247         mutex_lock(&ftrace_cmd_mutex);
3248         list_for_each_entry(p, &ftrace_commands, list) {
3249                 if (strcmp(cmd->name, p->name) == 0) {
3250                         ret = -EBUSY;
3251                         goto out_unlock;
3252                 }
3253         }
3254         list_add(&cmd->list, &ftrace_commands);
3255  out_unlock:
3256         mutex_unlock(&ftrace_cmd_mutex);
3257
3258         return ret;
3259 }
3260
3261 int unregister_ftrace_command(struct ftrace_func_command *cmd)
3262 {
3263         struct ftrace_func_command *p, *n;
3264         int ret = -ENODEV;
3265
3266         mutex_lock(&ftrace_cmd_mutex);
3267         list_for_each_entry_safe(p, n, &ftrace_commands, list) {
3268                 if (strcmp(cmd->name, p->name) == 0) {
3269                         ret = 0;
3270                         list_del_init(&p->list);
3271                         goto out_unlock;
3272                 }
3273         }
3274  out_unlock:
3275         mutex_unlock(&ftrace_cmd_mutex);
3276
3277         return ret;
3278 }
3279
3280 static int ftrace_process_regex(struct ftrace_hash *hash,
3281                                 char *buff, int len, int enable)
3282 {
3283         char *func, *command, *next = buff;
3284         struct ftrace_func_command *p;
3285         int ret = -EINVAL;
3286
3287         func = strsep(&next, ":");
3288
3289         if (!next) {
3290                 ret = ftrace_match_records(hash, func, len);
3291                 if (!ret)
3292                         ret = -EINVAL;
3293                 if (ret < 0)
3294                         return ret;
3295                 return 0;
3296         }
3297
3298         /* command found */
3299
3300         command = strsep(&next, ":");
3301
3302         mutex_lock(&ftrace_cmd_mutex);
3303         list_for_each_entry(p, &ftrace_commands, list) {
3304                 if (strcmp(p->name, command) == 0) {
3305                         ret = p->func(hash, func, command, next, enable);
3306                         goto out_unlock;
3307                 }
3308         }
3309  out_unlock:
3310         mutex_unlock(&ftrace_cmd_mutex);
3311
3312         return ret;
3313 }
3314
3315 static ssize_t
3316 ftrace_regex_write(struct file *file, const char __user *ubuf,
3317                    size_t cnt, loff_t *ppos, int enable)
3318 {
3319         struct ftrace_iterator *iter;
3320         struct trace_parser *parser;
3321         ssize_t ret, read;
3322
3323         if (!cnt)
3324                 return 0;
3325
3326         if (file->f_mode & FMODE_READ) {
3327                 struct seq_file *m = file->private_data;
3328                 iter = m->private;
3329         } else
3330                 iter = file->private_data;
3331
3332         if (unlikely(ftrace_disabled))
3333                 return -ENODEV;
3334
3335         /* iter->hash is a local copy, so we don't need regex_lock */
3336
3337         parser = &iter->parser;
3338         read = trace_get_user(parser, ubuf, cnt, ppos);
3339
3340         if (read >= 0 && trace_parser_loaded(parser) &&
3341             !trace_parser_cont(parser)) {
3342                 ret = ftrace_process_regex(iter->hash, parser->buffer,
3343                                            parser->idx, enable);
3344                 trace_parser_clear(parser);
3345                 if (ret < 0)
3346                         goto out;
3347         }
3348
3349         ret = read;
3350  out:
3351         return ret;
3352 }
3353
3354 ssize_t
3355 ftrace_filter_write(struct file *file, const char __user *ubuf,
3356                     size_t cnt, loff_t *ppos)
3357 {
3358         return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
3359 }
3360
3361 ssize_t
3362 ftrace_notrace_write(struct file *file, const char __user *ubuf,
3363                      size_t cnt, loff_t *ppos)
3364 {
3365         return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
3366 }
3367
3368 static int
3369 ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove)
3370 {
3371         struct ftrace_func_entry *entry;
3372
3373         if (!ftrace_location(ip))
3374                 return -EINVAL;
3375
3376         if (remove) {
3377                 entry = ftrace_lookup_ip(hash, ip);
3378                 if (!entry)
3379                         return -ENOENT;
3380                 free_hash_entry(hash, entry);
3381                 return 0;
3382         }
3383
3384         return add_hash_entry(hash, ip);
3385 }
3386
3387 static int
3388 ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
3389                 unsigned long ip, int remove, int reset, int enable)
3390 {
3391         struct ftrace_hash **orig_hash;
3392         struct ftrace_hash *hash;
3393         int ret;
3394
3395         /* All global ops uses the global ops filters */
3396         if (ops->flags & FTRACE_OPS_FL_GLOBAL)
3397                 ops = &global_ops;
3398
3399         if (unlikely(ftrace_disabled))
3400                 return -ENODEV;
3401
3402         mutex_lock(&ops->regex_lock);
3403
3404         if (enable)
3405                 orig_hash = &ops->filter_hash;
3406         else
3407                 orig_hash = &ops->notrace_hash;
3408
3409         hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
3410         if (!hash) {
3411                 ret = -ENOMEM;
3412                 goto out_regex_unlock;
3413         }
3414
3415         if (reset)
3416                 ftrace_filter_reset(hash);
3417         if (buf && !ftrace_match_records(hash, buf, len)) {
3418                 ret = -EINVAL;
3419                 goto out_regex_unlock;
3420         }
3421         if (ip) {
3422                 ret = ftrace_match_addr(hash, ip, remove);
3423                 if (ret < 0)
3424                         goto out_regex_unlock;
3425         }
3426
3427         mutex_lock(&ftrace_lock);
3428         ret = ftrace_hash_move(ops, enable, orig_hash, hash);
3429         if (!ret && ops->flags & FTRACE_OPS_FL_ENABLED
3430             && ftrace_enabled)
3431                 ftrace_run_update_code(FTRACE_UPDATE_CALLS);
3432
3433         mutex_unlock(&ftrace_lock);
3434
3435  out_regex_unlock:
3436         mutex_unlock(&ops->regex_lock);
3437
3438         free_ftrace_hash(hash);
3439         return ret;
3440 }
3441
3442 static int
3443 ftrace_set_addr(struct ftrace_ops *ops, unsigned long ip, int remove,
3444                 int reset, int enable)
3445 {
3446         return ftrace_set_hash(ops, 0, 0, ip, remove, reset, enable);
3447 }
3448
3449 /**
3450  * ftrace_set_filter_ip - set a function to filter on in ftrace by address
3451  * @ops - the ops to set the filter with
3452  * @ip - the address to add to or remove from the filter.
3453  * @remove - non zero to remove the ip from the filter
3454  * @reset - non zero to reset all filters before applying this filter.
3455  *
3456  * Filters denote which functions should be enabled when tracing is enabled
3457  * If @ip is NULL, it failes to update filter.
3458  */
3459 int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
3460                          int remove, int reset)
3461 {
3462         ftrace_ops_init(ops);
3463         return ftrace_set_addr(ops, ip, remove, reset, 1);
3464 }
3465 EXPORT_SYMBOL_GPL(ftrace_set_filter_ip);
3466
3467 static int
3468 ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
3469                  int reset, int enable)
3470 {
3471         return ftrace_set_hash(ops, buf, len, 0, 0, reset, enable);
3472 }
3473
3474 /**
3475  * ftrace_set_filter - set a function to filter on in ftrace
3476  * @ops - the ops to set the filter with
3477  * @buf - the string that holds the function filter text.
3478  * @len - the length of the string.
3479  * @reset - non zero to reset all filters before applying this filter.
3480  *
3481  * Filters denote which functions should be enabled when tracing is enabled.
3482  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
3483  */
3484 int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
3485                        int len, int reset)
3486 {
3487         ftrace_ops_init(ops);
3488         return ftrace_set_regex(ops, buf, len, reset, 1);
3489 }
3490 EXPORT_SYMBOL_GPL(ftrace_set_filter);
3491
3492 /**
3493  * ftrace_set_notrace - set a function to not trace in ftrace
3494  * @ops - the ops to set the notrace filter with
3495  * @buf - the string that holds the function notrace text.
3496  * @len - the length of the string.
3497  * @reset - non zero to reset all filters before applying this filter.
3498  *
3499  * Notrace Filters denote which functions should not be enabled when tracing
3500  * is enabled. If @buf is NULL and reset is set, all functions will be enabled
3501  * for tracing.
3502  */
3503 int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
3504                         int len, int reset)
3505 {
3506         ftrace_ops_init(ops);
3507         return ftrace_set_regex(ops, buf, len, reset, 0);
3508 }
3509 EXPORT_SYMBOL_GPL(ftrace_set_notrace);
3510 /**
3511  * ftrace_set_filter - set a function to filter on in ftrace
3512  * @ops - the ops to set the filter with
3513  * @buf - the string that holds the function filter text.
3514  * @len - the length of the string.
3515  * @reset - non zero to reset all filters before applying this filter.
3516  *
3517  * Filters denote which functions should be enabled when tracing is enabled.
3518  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
3519  */
3520 void ftrace_set_global_filter(unsigned char *buf, int len, int reset)
3521 {
3522         ftrace_set_regex(&global_ops, buf, len, reset, 1);
3523 }
3524 EXPORT_SYMBOL_GPL(ftrace_set_global_filter);
3525
3526 /**
3527  * ftrace_set_notrace - set a function to not trace in ftrace
3528  * @ops - the ops to set the notrace filter with
3529  * @buf - the string that holds the function notrace text.
3530  * @len - the length of the string.
3531  * @reset - non zero to reset all filters before applying this filter.
3532  *
3533  * Notrace Filters denote which functions should not be enabled when tracing
3534  * is enabled. If @buf is NULL and reset is set, all functions will be enabled
3535  * for tracing.
3536  */
3537 void ftrace_set_global_notrace(unsigned char *buf, int len, int reset)
3538 {
3539         ftrace_set_regex(&global_ops, buf, len, reset, 0);
3540 }
3541 EXPORT_SYMBOL_GPL(ftrace_set_global_notrace);
3542
3543 /*
3544  * command line interface to allow users to set filters on boot up.
3545  */
3546 #define FTRACE_FILTER_SIZE              COMMAND_LINE_SIZE
3547 static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
3548 static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
3549
3550 /* Used by function selftest to not test if filter is set */
3551 bool ftrace_filter_param __initdata;
3552
3553 static int __init set_ftrace_notrace(char *str)
3554 {
3555         ftrace_filter_param = true;
3556         strlcpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
3557         return 1;
3558 }
3559 __setup("ftrace_notrace=", set_ftrace_notrace);
3560
3561 static int __init set_ftrace_filter(char *str)
3562 {
3563         ftrace_filter_param = true;
3564         strlcpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
3565         return 1;
3566 }
3567 __setup("ftrace_filter=", set_ftrace_filter);
3568
3569 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3570 static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
3571 static int ftrace_set_func(unsigned long *array, int *idx, char *buffer);
3572
3573 static int __init set_graph_function(char *str)
3574 {
3575         strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
3576         return 1;
3577 }
3578 __setup("ftrace_graph_filter=", set_graph_function);
3579
3580 static void __init set_ftrace_early_graph(char *buf)
3581 {
3582         int ret;
3583         char *func;
3584
3585         while (buf) {
3586                 func = strsep(&buf, ",");
3587                 /* we allow only one expression at a time */
3588                 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
3589                                       func);
3590                 if (ret)
3591                         printk(KERN_DEBUG "ftrace: function %s not "
3592                                           "traceable\n", func);
3593         }
3594 }
3595 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3596
3597 void __init
3598 ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable)
3599 {
3600         char *func;
3601
3602         ftrace_ops_init(ops);
3603
3604         while (buf) {
3605                 func = strsep(&buf, ",");
3606                 ftrace_set_regex(ops, func, strlen(func), 0, enable);
3607         }
3608 }
3609
3610 static void __init set_ftrace_early_filters(void)
3611 {
3612         if (ftrace_filter_buf[0])
3613                 ftrace_set_early_filter(&global_ops, ftrace_filter_buf, 1);
3614         if (ftrace_notrace_buf[0])
3615                 ftrace_set_early_filter(&global_ops, ftrace_notrace_buf, 0);
3616 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3617         if (ftrace_graph_buf[0])
3618                 set_ftrace_early_graph(ftrace_graph_buf);
3619 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3620 }
3621
3622 int ftrace_regex_release(struct inode *inode, struct file *file)
3623 {
3624         struct seq_file *m = (struct seq_file *)file->private_data;
3625         struct ftrace_iterator *iter;
3626         struct ftrace_hash **orig_hash;
3627         struct trace_parser *parser;
3628         int filter_hash;
3629         int ret;
3630
3631         if (file->f_mode & FMODE_READ) {
3632                 iter = m->private;
3633                 seq_release(inode, file);
3634         } else
3635                 iter = file->private_data;
3636
3637         parser = &iter->parser;
3638         if (trace_parser_loaded(parser)) {
3639                 parser->buffer[parser->idx] = 0;
3640                 ftrace_match_records(iter->hash, parser->buffer, parser->idx);
3641         }
3642
3643         trace_parser_put(parser);
3644
3645         mutex_lock(&iter->ops->regex_lock);
3646
3647         if (file->f_mode & FMODE_WRITE) {
3648                 filter_hash = !!(iter->flags & FTRACE_ITER_FILTER);
3649
3650                 if (filter_hash)
3651                         orig_hash = &iter->ops->filter_hash;
3652                 else
3653                         orig_hash = &iter->ops->notrace_hash;
3654
3655                 mutex_lock(&ftrace_lock);
3656                 ret = ftrace_hash_move(iter->ops, filter_hash,
3657                                        orig_hash, iter->hash);
3658                 if (!ret && (iter->ops->flags & FTRACE_OPS_FL_ENABLED)
3659                     && ftrace_enabled)
3660                         ftrace_run_update_code(FTRACE_UPDATE_CALLS);
3661
3662                 mutex_unlock(&ftrace_lock);
3663         }
3664
3665         mutex_unlock(&iter->ops->regex_lock);
3666         free_ftrace_hash(iter->hash);
3667         kfree(iter);
3668
3669         return 0;
3670 }
3671
3672 static const struct file_operations ftrace_avail_fops = {
3673         .open = ftrace_avail_open,
3674         .read = seq_read,
3675         .llseek = seq_lseek,
3676         .release = seq_release_private,
3677 };
3678
3679 static const struct file_operations ftrace_enabled_fops = {
3680         .open = ftrace_enabled_open,
3681         .read = seq_read,
3682         .llseek = seq_lseek,
3683         .release = seq_release_private,
3684 };
3685
3686 static const struct file_operations ftrace_filter_fops = {
3687         .open = ftrace_filter_open,
3688         .read = seq_read,
3689         .write = ftrace_filter_write,
3690         .llseek = ftrace_filter_lseek,
3691         .release = ftrace_regex_release,
3692 };
3693
3694 static const struct file_operations ftrace_notrace_fops = {
3695         .open = ftrace_notrace_open,
3696         .read = seq_read,
3697         .write = ftrace_notrace_write,
3698         .llseek = ftrace_filter_lseek,
3699         .release = ftrace_regex_release,
3700 };
3701
3702 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3703
3704 static DEFINE_MUTEX(graph_lock);
3705
3706 int ftrace_graph_count;
3707 int ftrace_graph_filter_enabled;
3708 unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
3709
3710 static void *
3711 __g_next(struct seq_file *m, loff_t *pos)
3712 {
3713         if (*pos >= ftrace_graph_count)
3714                 return NULL;
3715         return &ftrace_graph_funcs[*pos];
3716 }
3717
3718 static void *
3719 g_next(struct seq_file *m, void *v, loff_t *pos)
3720 {
3721         (*pos)++;
3722         return __g_next(m, pos);
3723 }
3724
3725 static void *g_start(struct seq_file *m, loff_t *pos)
3726 {
3727         mutex_lock(&graph_lock);
3728
3729         /* Nothing, tell g_show to print all functions are enabled */
3730         if (!ftrace_graph_filter_enabled && !*pos)
3731                 return (void *)1;
3732
3733         return __g_next(m, pos);
3734 }
3735
3736 static void g_stop(struct seq_file *m, void *p)
3737 {
3738         mutex_unlock(&graph_lock);
3739 }
3740
3741 static int g_show(struct seq_file *m, void *v)
3742 {
3743         unsigned long *ptr = v;
3744
3745         if (!ptr)
3746                 return 0;
3747
3748         if (ptr == (unsigned long *)1) {
3749                 seq_printf(m, "#### all functions enabled ####\n");
3750                 return 0;
3751         }
3752
3753         seq_printf(m, "%ps\n", (void *)*ptr);
3754
3755         return 0;
3756 }
3757
3758 static const struct seq_operations ftrace_graph_seq_ops = {
3759         .start = g_start,
3760         .next = g_next,
3761         .stop = g_stop,
3762         .show = g_show,
3763 };
3764
3765 static int
3766 ftrace_graph_open(struct inode *inode, struct file *file)
3767 {
3768         int ret = 0;
3769
3770         if (unlikely(ftrace_disabled))
3771                 return -ENODEV;
3772
3773         mutex_lock(&graph_lock);
3774         if ((file->f_mode & FMODE_WRITE) &&
3775             (file->f_flags & O_TRUNC)) {
3776                 ftrace_graph_filter_enabled = 0;
3777                 ftrace_graph_count = 0;
3778                 memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
3779         }
3780         mutex_unlock(&graph_lock);
3781
3782         if (file->f_mode & FMODE_READ)
3783                 ret = seq_open(file, &ftrace_graph_seq_ops);
3784
3785         return ret;
3786 }
3787
3788 static int
3789 ftrace_graph_release(struct inode *inode, struct file *file)
3790 {
3791         if (file->f_mode & FMODE_READ)
3792                 seq_release(inode, file);
3793         return 0;
3794 }
3795
3796 static int
3797 ftrace_set_func(unsigned long *array, int *idx, char *buffer)
3798 {
3799         struct dyn_ftrace *rec;
3800         struct ftrace_page *pg;
3801         int search_len;
3802         int fail = 1;
3803         int type, not;
3804         char *search;
3805         bool exists;
3806         int i;
3807
3808         /* decode regex */
3809         type = filter_parse_regex(buffer, strlen(buffer), &search, &not);
3810         if (!not && *idx >= FTRACE_GRAPH_MAX_FUNCS)
3811                 return -EBUSY;
3812
3813         search_len = strlen(search);
3814
3815         mutex_lock(&ftrace_lock);
3816
3817         if (unlikely(ftrace_disabled)) {
3818                 mutex_unlock(&ftrace_lock);
3819                 return -ENODEV;
3820         }
3821
3822         do_for_each_ftrace_rec(pg, rec) {
3823
3824                 if (ftrace_match_record(rec, NULL, search, search_len, type)) {
3825                         /* if it is in the array */
3826                         exists = false;
3827                         for (i = 0; i < *idx; i++) {
3828                                 if (array[i] == rec->ip) {
3829                                         exists = true;
3830                                         break;
3831                                 }
3832                         }
3833
3834                         if (!not) {
3835                                 fail = 0;
3836                                 if (!exists) {
3837                                         array[(*idx)++] = rec->ip;
3838                                         if (*idx >= FTRACE_GRAPH_MAX_FUNCS)
3839                                                 goto out;
3840                                 }
3841                         } else {
3842                                 if (exists) {
3843                                         array[i] = array[--(*idx)];
3844                                         array[*idx] = 0;
3845                                         fail = 0;
3846                                 }
3847                         }
3848                 }
3849         } while_for_each_ftrace_rec();
3850 out:
3851         mutex_unlock(&ftrace_lock);
3852
3853         if (fail)
3854                 return -EINVAL;
3855
3856         ftrace_graph_filter_enabled = !!(*idx);
3857
3858         return 0;
3859 }
3860
3861 static ssize_t
3862 ftrace_graph_write(struct file *file, const char __user *ubuf,
3863                    size_t cnt, loff_t *ppos)
3864 {
3865         struct trace_parser parser;
3866         ssize_t read, ret;
3867
3868         if (!cnt)
3869                 return 0;
3870
3871         mutex_lock(&graph_lock);
3872
3873         if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX)) {
3874                 ret = -ENOMEM;
3875                 goto out_unlock;
3876         }
3877
3878         read = trace_get_user(&parser, ubuf, cnt, ppos);
3879
3880         if (read >= 0 && trace_parser_loaded((&parser))) {
3881                 parser.buffer[parser.idx] = 0;
3882
3883                 /* we allow only one expression at a time */
3884                 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
3885                                         parser.buffer);
3886                 if (ret)
3887                         goto out_free;
3888         }
3889
3890         ret = read;
3891
3892 out_free:
3893         trace_parser_put(&parser);
3894 out_unlock:
3895         mutex_unlock(&graph_lock);
3896
3897         return ret;
3898 }
3899
3900 static const struct file_operations ftrace_graph_fops = {
3901         .open           = ftrace_graph_open,
3902         .read           = seq_read,
3903         .write          = ftrace_graph_write,
3904         .llseek         = ftrace_filter_lseek,
3905         .release        = ftrace_graph_release,
3906 };
3907 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3908
3909 static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
3910 {
3911
3912         trace_create_file("available_filter_functions", 0444,
3913                         d_tracer, NULL, &ftrace_avail_fops);
3914
3915         trace_create_file("enabled_functions", 0444,
3916                         d_tracer, NULL, &ftrace_enabled_fops);
3917
3918         trace_create_file("set_ftrace_filter", 0644, d_tracer,
3919                         NULL, &ftrace_filter_fops);
3920
3921         trace_create_file("set_ftrace_notrace", 0644, d_tracer,
3922                                     NULL, &ftrace_notrace_fops);
3923
3924 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3925         trace_create_file("set_graph_function", 0444, d_tracer,
3926                                     NULL,
3927                                     &ftrace_graph_fops);
3928 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3929
3930         return 0;
3931 }
3932
3933 static int ftrace_cmp_ips(const void *a, const void *b)
3934 {
3935         const unsigned long *ipa = a;
3936         const unsigned long *ipb = b;
3937
3938         if (*ipa > *ipb)
3939                 return 1;
3940         if (*ipa < *ipb)
3941                 return -1;
3942         return 0;
3943 }
3944
3945 static void ftrace_swap_ips(void *a, void *b, int size)
3946 {
3947         unsigned long *ipa = a;
3948         unsigned long *ipb = b;
3949         unsigned long t;
3950
3951         t = *ipa;
3952         *ipa = *ipb;
3953         *ipb = t;
3954 }
3955
3956 static int ftrace_process_locs(struct module *mod,
3957                                unsigned long *start,
3958                                unsigned long *end)
3959 {
3960         struct ftrace_page *start_pg;
3961         struct ftrace_page *pg;
3962         struct dyn_ftrace *rec;
3963         unsigned long count;
3964         unsigned long *p;
3965         unsigned long addr;
3966         unsigned long flags = 0; /* Shut up gcc */
3967         int ret = -ENOMEM;
3968
3969         count = end - start;
3970
3971         if (!count)
3972                 return 0;
3973
3974         sort(start, count, sizeof(*start),
3975              ftrace_cmp_ips, ftrace_swap_ips);
3976
3977         start_pg = ftrace_allocate_pages(count);
3978         if (!start_pg)
3979                 return -ENOMEM;
3980
3981         mutex_lock(&ftrace_lock);
3982
3983         /*
3984          * Core and each module needs their own pages, as
3985          * modules will free them when they are removed.
3986          * Force a new page to be allocated for modules.
3987          */
3988         if (!mod) {
3989                 WARN_ON(ftrace_pages || ftrace_pages_start);
3990                 /* First initialization */
3991                 ftrace_pages = ftrace_pages_start = start_pg;
3992         } else {
3993                 if (!ftrace_pages)
3994                         goto out;
3995
3996                 if (WARN_ON(ftrace_pages->next)) {
3997                         /* Hmm, we have free pages? */
3998                         while (ftrace_pages->next)
3999                                 ftrace_pages = ftrace_pages->next;
4000                 }
4001
4002                 ftrace_pages->next = start_pg;
4003         }
4004
4005         p = start;
4006         pg = start_pg;
4007         while (p < end) {
4008                 addr = ftrace_call_adjust(*p++);
4009                 /*
4010                  * Some architecture linkers will pad between
4011                  * the different mcount_loc sections of different
4012                  * object files to satisfy alignments.
4013                  * Skip any NULL pointers.
4014                  */
4015                 if (!addr)
4016                         continue;
4017
4018                 if (pg->index == pg->size) {
4019                         /* We should have allocated enough */
4020                         if (WARN_ON(!pg->next))
4021                                 break;
4022                         pg = pg->next;
4023                 }
4024
4025                 rec = &pg->records[pg->index++];
4026                 rec->ip = addr;
4027         }
4028
4029         /* We should have used all pages */
4030         WARN_ON(pg->next);
4031
4032         /* Assign the last page to ftrace_pages */
4033         ftrace_pages = pg;
4034
4035         /* These new locations need to be initialized */
4036         ftrace_new_pgs = start_pg;
4037
4038         /*
4039          * We only need to disable interrupts on start up
4040          * because we are modifying code that an interrupt
4041          * may execute, and the modification is not atomic.
4042          * But for modules, nothing runs the code we modify
4043          * until we are finished with it, and there's no
4044          * reason to cause large interrupt latencies while we do it.
4045          */
4046         if (!mod)
4047                 local_irq_save(flags);
4048         ftrace_update_code(mod);
4049         if (!mod)
4050                 local_irq_restore(flags);
4051         ret = 0;
4052  out:
4053         mutex_unlock(&ftrace_lock);
4054
4055         return ret;
4056 }
4057
4058 #ifdef CONFIG_MODULES
4059
4060 #define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next)
4061
4062 void ftrace_release_mod(struct module *mod)
4063 {
4064         struct dyn_ftrace *rec;
4065         struct ftrace_page **last_pg;
4066         struct ftrace_page *pg;
4067         int order;
4068
4069         mutex_lock(&ftrace_lock);
4070
4071         if (ftrace_disabled)
4072                 goto out_unlock;
4073
4074         /*
4075          * Each module has its own ftrace_pages, remove
4076          * them from the list.
4077          */
4078         last_pg = &ftrace_pages_start;
4079         for (pg = ftrace_pages_start; pg; pg = *last_pg) {
4080                 rec = &pg->records[0];
4081                 if (within_module_core(rec->ip, mod)) {
4082                         /*
4083                          * As core pages are first, the first
4084                          * page should never be a module page.
4085                          */
4086                         if (WARN_ON(pg == ftrace_pages_start))
4087                                 goto out_unlock;
4088
4089                         /* Check if we are deleting the last page */
4090                         if (pg == ftrace_pages)
4091                                 ftrace_pages = next_to_ftrace_page(last_pg);
4092
4093                         *last_pg = pg->next;
4094                         order = get_count_order(pg->size / ENTRIES_PER_PAGE);
4095                         free_pages((unsigned long)pg->records, order);
4096                         kfree(pg);
4097                 } else
4098                         last_pg = &pg->next;
4099         }
4100  out_unlock:
4101         mutex_unlock(&ftrace_lock);
4102 }
4103
4104 static void ftrace_init_module(struct module *mod,
4105                                unsigned long *start, unsigned long *end)
4106 {
4107         if (ftrace_disabled || start == end)
4108                 return;
4109         ftrace_process_locs(mod, start, end);
4110 }
4111
4112 static int ftrace_module_notify_enter(struct notifier_block *self,
4113                                       unsigned long val, void *data)
4114 {
4115         struct module *mod = data;
4116
4117         if (val == MODULE_STATE_COMING)
4118                 ftrace_init_module(mod, mod->ftrace_callsites,
4119                                    mod->ftrace_callsites +
4120                                    mod->num_ftrace_callsites);
4121         return 0;
4122 }
4123
4124 static int ftrace_module_notify_exit(struct notifier_block *self,
4125                                      unsigned long val, void *data)
4126 {
4127         struct module *mod = data;
4128
4129         if (val == MODULE_STATE_GOING)
4130                 ftrace_release_mod(mod);
4131
4132         return 0;
4133 }
4134 #else
4135 static int ftrace_module_notify_enter(struct notifier_block *self,
4136                                       unsigned long val, void *data)
4137 {
4138         return 0;
4139 }
4140 static int ftrace_module_notify_exit(struct notifier_block *self,
4141                                      unsigned long val, void *data)
4142 {
4143         return 0;
4144 }
4145 #endif /* CONFIG_MODULES */
4146
4147 struct notifier_block ftrace_module_enter_nb = {
4148         .notifier_call = ftrace_module_notify_enter,
4149         .priority = INT_MAX,    /* Run before anything that can use kprobes */
4150 };
4151
4152 struct notifier_block ftrace_module_exit_nb = {
4153         .notifier_call = ftrace_module_notify_exit,
4154         .priority = INT_MIN,    /* Run after anything that can remove kprobes */
4155 };
4156
4157 extern unsigned long __start_mcount_loc[];
4158 extern unsigned long __stop_mcount_loc[];
4159
4160 void __init ftrace_init(void)
4161 {
4162         unsigned long count, addr, flags;
4163         int ret;
4164
4165         /* Keep the ftrace pointer to the stub */
4166         addr = (unsigned long)ftrace_stub;
4167
4168         local_irq_save(flags);
4169         ftrace_dyn_arch_init(&addr);
4170         local_irq_restore(flags);
4171
4172         /* ftrace_dyn_arch_init places the return code in addr */
4173         if (addr)
4174                 goto failed;
4175
4176         count = __stop_mcount_loc - __start_mcount_loc;
4177
4178         ret = ftrace_dyn_table_alloc(count);
4179         if (ret)
4180                 goto failed;
4181
4182         last_ftrace_enabled = ftrace_enabled = 1;
4183
4184         ret = ftrace_process_locs(NULL,
4185                                   __start_mcount_loc,
4186                                   __stop_mcount_loc);
4187
4188         ret = register_module_notifier(&ftrace_module_enter_nb);
4189         if (ret)
4190                 pr_warning("Failed to register trace ftrace module enter notifier\n");
4191
4192         ret = register_module_notifier(&ftrace_module_exit_nb);
4193         if (ret)
4194                 pr_warning("Failed to register trace ftrace module exit notifier\n");
4195
4196         set_ftrace_early_filters();
4197
4198         return;
4199  failed:
4200         ftrace_disabled = 1;
4201 }
4202
4203 #else
4204
4205 static struct ftrace_ops global_ops = {
4206         .func                   = ftrace_stub,
4207         .flags                  = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
4208         INIT_REGEX_LOCK(global_ops)
4209 };
4210
4211 static int __init ftrace_nodyn_init(void)
4212 {
4213         ftrace_enabled = 1;
4214         return 0;
4215 }
4216 core_initcall(ftrace_nodyn_init);
4217
4218 static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
4219 static inline void ftrace_startup_enable(int command) { }
4220 /* Keep as macros so we do not need to define the commands */
4221 # define ftrace_startup(ops, command)                   \
4222         ({                                              \
4223                 (ops)->flags |= FTRACE_OPS_FL_ENABLED;  \
4224                 0;                                      \
4225         })
4226 # define ftrace_shutdown(ops, command)  do { } while (0)
4227 # define ftrace_startup_sysctl()        do { } while (0)
4228 # define ftrace_shutdown_sysctl()       do { } while (0)
4229
4230 static inline int
4231 ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
4232 {
4233         return 1;
4234 }
4235
4236 #endif /* CONFIG_DYNAMIC_FTRACE */
4237
4238 static void
4239 ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip,
4240                         struct ftrace_ops *op, struct pt_regs *regs)
4241 {
4242         if (unlikely(trace_recursion_test(TRACE_CONTROL_BIT)))
4243                 return;
4244
4245         /*
4246          * Some of the ops may be dynamically allocated,
4247          * they must be freed after a synchronize_sched().
4248          */
4249         preempt_disable_notrace();
4250         trace_recursion_set(TRACE_CONTROL_BIT);
4251         do_for_each_ftrace_op(op, ftrace_control_list) {
4252                 if (!(op->flags & FTRACE_OPS_FL_STUB) &&
4253                     !ftrace_function_local_disabled(op) &&
4254                     ftrace_ops_test(op, ip, regs))
4255                         op->func(ip, parent_ip, op, regs);
4256         } while_for_each_ftrace_op(op);
4257         trace_recursion_clear(TRACE_CONTROL_BIT);
4258         preempt_enable_notrace();
4259 }
4260
4261 static struct ftrace_ops control_ops = {
4262         .func   = ftrace_ops_control_func,
4263         .flags  = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
4264         INIT_REGEX_LOCK(control_ops)
4265 };
4266
4267 static inline void
4268 __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
4269                        struct ftrace_ops *ignored, struct pt_regs *regs)
4270 {
4271         struct ftrace_ops *op;
4272         int bit;
4273
4274         if (function_trace_stop)
4275                 return;
4276
4277         bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX);
4278         if (bit < 0)
4279                 return;
4280
4281         /*
4282          * Some of the ops may be dynamically allocated,
4283          * they must be freed after a synchronize_sched().
4284          */
4285         preempt_disable_notrace();
4286         do_for_each_ftrace_op(op, ftrace_ops_list) {
4287                 if (ftrace_ops_test(op, ip, regs))
4288                         op->func(ip, parent_ip, op, regs);
4289         } while_for_each_ftrace_op(op);
4290         preempt_enable_notrace();
4291         trace_clear_recursion(bit);
4292 }
4293
4294 /*
4295  * Some archs only support passing ip and parent_ip. Even though
4296  * the list function ignores the op parameter, we do not want any
4297  * C side effects, where a function is called without the caller
4298  * sending a third parameter.
4299  * Archs are to support both the regs and ftrace_ops at the same time.
4300  * If they support ftrace_ops, it is assumed they support regs.
4301  * If call backs want to use regs, they must either check for regs
4302  * being NULL, or CONFIG_DYNAMIC_FTRACE_WITH_REGS.
4303  * Note, CONFIG_DYNAMIC_FTRACE_WITH_REGS expects a full regs to be saved.
4304  * An architecture can pass partial regs with ftrace_ops and still
4305  * set the ARCH_SUPPORT_FTARCE_OPS.
4306  */
4307 #if ARCH_SUPPORTS_FTRACE_OPS
4308 static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
4309                                  struct ftrace_ops *op, struct pt_regs *regs)
4310 {
4311         __ftrace_ops_list_func(ip, parent_ip, NULL, regs);
4312 }
4313 #else
4314 static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip)
4315 {
4316         __ftrace_ops_list_func(ip, parent_ip, NULL, NULL);
4317 }
4318 #endif
4319
4320 static void clear_ftrace_swapper(void)
4321 {
4322         struct task_struct *p;
4323         int cpu;
4324
4325         get_online_cpus();
4326         for_each_online_cpu(cpu) {
4327                 p = idle_task(cpu);
4328                 clear_tsk_trace_trace(p);
4329         }
4330         put_online_cpus();
4331 }
4332
4333 static void set_ftrace_swapper(void)
4334 {
4335         struct task_struct *p;
4336         int cpu;
4337
4338         get_online_cpus();
4339         for_each_online_cpu(cpu) {
4340                 p = idle_task(cpu);
4341                 set_tsk_trace_trace(p);
4342         }
4343         put_online_cpus();
4344 }
4345
4346 static void clear_ftrace_pid(struct pid *pid)
4347 {
4348         struct task_struct *p;
4349
4350         rcu_read_lock();
4351         do_each_pid_task(pid, PIDTYPE_PID, p) {
4352                 clear_tsk_trace_trace(p);
4353         } while_each_pid_task(pid, PIDTYPE_PID, p);
4354         rcu_read_unlock();
4355
4356         put_pid(pid);
4357 }
4358
4359 static void set_ftrace_pid(struct pid *pid)
4360 {
4361         struct task_struct *p;
4362
4363         rcu_read_lock();
4364         do_each_pid_task(pid, PIDTYPE_PID, p) {
4365                 set_tsk_trace_trace(p);
4366         } while_each_pid_task(pid, PIDTYPE_PID, p);
4367         rcu_read_unlock();
4368 }
4369
4370 static void clear_ftrace_pid_task(struct pid *pid)
4371 {
4372         if (pid == ftrace_swapper_pid)
4373                 clear_ftrace_swapper();
4374         else
4375                 clear_ftrace_pid(pid);
4376 }
4377
4378 static void set_ftrace_pid_task(struct pid *pid)
4379 {
4380         if (pid == ftrace_swapper_pid)
4381                 set_ftrace_swapper();
4382         else
4383                 set_ftrace_pid(pid);
4384 }
4385
4386 static int ftrace_pid_add(int p)
4387 {
4388         struct pid *pid;
4389         struct ftrace_pid *fpid;
4390         int ret = -EINVAL;
4391
4392         mutex_lock(&ftrace_lock);
4393
4394         if (!p)
4395                 pid = ftrace_swapper_pid;
4396         else
4397                 pid = find_get_pid(p);
4398
4399         if (!pid)
4400                 goto out;
4401
4402         ret = 0;
4403
4404         list_for_each_entry(fpid, &ftrace_pids, list)
4405                 if (fpid->pid == pid)
4406                         goto out_put;
4407
4408         ret = -ENOMEM;
4409
4410         fpid = kmalloc(sizeof(*fpid), GFP_KERNEL);
4411         if (!fpid)
4412                 goto out_put;
4413
4414         list_add(&fpid->list, &ftrace_pids);
4415         fpid->pid = pid;
4416
4417         set_ftrace_pid_task(pid);
4418
4419         ftrace_update_pid_func();
4420         ftrace_startup_enable(0);
4421
4422         mutex_unlock(&ftrace_lock);
4423         return 0;
4424
4425 out_put:
4426         if (pid != ftrace_swapper_pid)
4427                 put_pid(pid);
4428
4429 out:
4430         mutex_unlock(&ftrace_lock);
4431         return ret;
4432 }
4433
4434 static void ftrace_pid_reset(void)
4435 {
4436         struct ftrace_pid *fpid, *safe;
4437
4438         mutex_lock(&ftrace_lock);
4439         list_for_each_entry_safe(fpid, safe, &ftrace_pids, list) {
4440                 struct pid *pid = fpid->pid;
4441
4442                 clear_ftrace_pid_task(pid);
4443
4444                 list_del(&fpid->list);
4445                 kfree(fpid);
4446         }
4447
4448         ftrace_update_pid_func();
4449         ftrace_startup_enable(0);
4450
4451         mutex_unlock(&ftrace_lock);
4452 }
4453
4454 static void *fpid_start(struct seq_file *m, loff_t *pos)
4455 {
4456         mutex_lock(&ftrace_lock);
4457
4458         if (list_empty(&ftrace_pids) && (!*pos))
4459                 return (void *) 1;
4460
4461         return seq_list_start(&ftrace_pids, *pos);
4462 }
4463
4464 static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
4465 {
4466         if (v == (void *)1)
4467                 return NULL;
4468
4469         return seq_list_next(v, &ftrace_pids, pos);
4470 }
4471
4472 static void fpid_stop(struct seq_file *m, void *p)
4473 {
4474         mutex_unlock(&ftrace_lock);
4475 }
4476
4477 static int fpid_show(struct seq_file *m, void *v)
4478 {
4479         const struct ftrace_pid *fpid = list_entry(v, struct ftrace_pid, list);
4480
4481         if (v == (void *)1) {
4482                 seq_printf(m, "no pid\n");
4483                 return 0;
4484         }
4485
4486         if (fpid->pid == ftrace_swapper_pid)
4487                 seq_printf(m, "swapper tasks\n");
4488         else
4489                 seq_printf(m, "%u\n", pid_vnr(fpid->pid));
4490
4491         return 0;
4492 }
4493
4494 static const struct seq_operations ftrace_pid_sops = {
4495         .start = fpid_start,
4496         .next = fpid_next,
4497         .stop = fpid_stop,
4498         .show = fpid_show,
4499 };
4500
4501 static int
4502 ftrace_pid_open(struct inode *inode, struct file *file)
4503 {
4504         int ret = 0;
4505
4506         if ((file->f_mode & FMODE_WRITE) &&
4507             (file->f_flags & O_TRUNC))
4508                 ftrace_pid_reset();
4509
4510         if (file->f_mode & FMODE_READ)
4511                 ret = seq_open(file, &ftrace_pid_sops);
4512
4513         return ret;
4514 }
4515
4516 static ssize_t
4517 ftrace_pid_write(struct file *filp, const char __user *ubuf,
4518                    size_t cnt, loff_t *ppos)
4519 {
4520         char buf[64], *tmp;
4521         long val;
4522         int ret;
4523
4524         if (cnt >= sizeof(buf))
4525                 return -EINVAL;
4526
4527         if (copy_from_user(&buf, ubuf, cnt))
4528                 return -EFAULT;
4529
4530         buf[cnt] = 0;
4531
4532         /*
4533          * Allow "echo > set_ftrace_pid" or "echo -n '' > set_ftrace_pid"
4534          * to clean the filter quietly.
4535          */
4536         tmp = strstrip(buf);
4537         if (strlen(tmp) == 0)
4538                 return 1;
4539
4540         ret = kstrtol(tmp, 10, &val);
4541         if (ret < 0)
4542                 return ret;
4543
4544         ret = ftrace_pid_add(val);
4545
4546         return ret ? ret : cnt;
4547 }
4548
4549 static int
4550 ftrace_pid_release(struct inode *inode, struct file *file)
4551 {
4552         if (file->f_mode & FMODE_READ)
4553                 seq_release(inode, file);
4554
4555         return 0;
4556 }
4557
4558 static const struct file_operations ftrace_pid_fops = {
4559         .open           = ftrace_pid_open,
4560         .write          = ftrace_pid_write,
4561         .read           = seq_read,
4562         .llseek         = ftrace_filter_lseek,
4563         .release        = ftrace_pid_release,
4564 };
4565
4566 static __init int ftrace_init_debugfs(void)
4567 {
4568         struct dentry *d_tracer;
4569
4570         d_tracer = tracing_init_dentry();
4571         if (!d_tracer)
4572                 return 0;
4573
4574         ftrace_init_dyn_debugfs(d_tracer);
4575
4576         trace_create_file("set_ftrace_pid", 0644, d_tracer,
4577                             NULL, &ftrace_pid_fops);
4578
4579         ftrace_profile_debugfs(d_tracer);
4580
4581         return 0;
4582 }
4583 fs_initcall(ftrace_init_debugfs);
4584
4585 /**
4586  * ftrace_kill - kill ftrace
4587  *
4588  * This function should be used by panic code. It stops ftrace
4589  * but in a not so nice way. If you need to simply kill ftrace
4590  * from a non-atomic section, use ftrace_kill.
4591  */
4592 void ftrace_kill(void)
4593 {
4594         ftrace_disabled = 1;
4595         ftrace_enabled = 0;
4596         clear_ftrace_function();
4597 }
4598
4599 /**
4600  * Test if ftrace is dead or not.
4601  */
4602 int ftrace_is_dead(void)
4603 {
4604         return ftrace_disabled;
4605 }
4606
4607 /**
4608  * register_ftrace_function - register a function for profiling
4609  * @ops - ops structure that holds the function for profiling.
4610  *
4611  * Register a function to be called by all functions in the
4612  * kernel.
4613  *
4614  * Note: @ops->func and all the functions it calls must be labeled
4615  *       with "notrace", otherwise it will go into a
4616  *       recursive loop.
4617  */
4618 int register_ftrace_function(struct ftrace_ops *ops)
4619 {
4620         int ret = -1;
4621
4622         ftrace_ops_init(ops);
4623
4624         mutex_lock(&ftrace_lock);
4625
4626         ret = __register_ftrace_function(ops);
4627         if (!ret)
4628                 ret = ftrace_startup(ops, 0);
4629
4630         mutex_unlock(&ftrace_lock);
4631
4632         return ret;
4633 }
4634 EXPORT_SYMBOL_GPL(register_ftrace_function);
4635
4636 /**
4637  * unregister_ftrace_function - unregister a function for profiling.
4638  * @ops - ops structure that holds the function to unregister
4639  *
4640  * Unregister a function that was added to be called by ftrace profiling.
4641  */
4642 int unregister_ftrace_function(struct ftrace_ops *ops)
4643 {
4644         int ret;
4645
4646         mutex_lock(&ftrace_lock);
4647         ret = __unregister_ftrace_function(ops);
4648         if (!ret)
4649                 ftrace_shutdown(ops, 0);
4650         mutex_unlock(&ftrace_lock);
4651
4652         return ret;
4653 }
4654 EXPORT_SYMBOL_GPL(unregister_ftrace_function);
4655
4656 int
4657 ftrace_enable_sysctl(struct ctl_table *table, int write,
4658                      void __user *buffer, size_t *lenp,
4659                      loff_t *ppos)
4660 {
4661         int ret = -ENODEV;
4662
4663         mutex_lock(&ftrace_lock);
4664
4665         if (unlikely(ftrace_disabled))
4666                 goto out;
4667
4668         ret = proc_dointvec(table, write, buffer, lenp, ppos);
4669
4670         if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
4671                 goto out;
4672
4673         last_ftrace_enabled = !!ftrace_enabled;
4674
4675         if (ftrace_enabled) {
4676
4677                 ftrace_startup_sysctl();
4678
4679                 /* we are starting ftrace again */
4680                 if (ftrace_ops_list != &ftrace_list_end)
4681                         update_ftrace_function();
4682
4683         } else {
4684                 /* stopping ftrace calls (just send to ftrace_stub) */
4685                 ftrace_trace_function = ftrace_stub;
4686
4687                 ftrace_shutdown_sysctl();
4688         }
4689
4690  out:
4691         mutex_unlock(&ftrace_lock);
4692         return ret;
4693 }
4694
4695 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4696
4697 static int ftrace_graph_active;
4698 static struct notifier_block ftrace_suspend_notifier;
4699
4700 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
4701 {
4702         return 0;
4703 }
4704
4705 /* The callbacks that hook a function */
4706 trace_func_graph_ret_t ftrace_graph_return =
4707                         (trace_func_graph_ret_t)ftrace_stub;
4708 trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
4709
4710 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
4711 static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
4712 {
4713         int i;
4714         int ret = 0;
4715         unsigned long flags;
4716         int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
4717         struct task_struct *g, *t;
4718
4719         for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
4720                 ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
4721                                         * sizeof(struct ftrace_ret_stack),
4722                                         GFP_KERNEL);
4723                 if (!ret_stack_list[i]) {
4724                         start = 0;
4725                         end = i;
4726                         ret = -ENOMEM;
4727                         goto free;
4728                 }
4729         }
4730
4731         read_lock_irqsave(&tasklist_lock, flags);
4732         do_each_thread(g, t) {
4733                 if (start == end) {
4734                         ret = -EAGAIN;
4735                         goto unlock;
4736                 }
4737
4738                 if (t->ret_stack == NULL) {
4739                         atomic_set(&t->tracing_graph_pause, 0);
4740                         atomic_set(&t->trace_overrun, 0);
4741                         t->curr_ret_stack = -1;
4742                         /* Make sure the tasks see the -1 first: */
4743                         smp_wmb();
4744                         t->ret_stack = ret_stack_list[start++];
4745                 }
4746         } while_each_thread(g, t);
4747
4748 unlock:
4749         read_unlock_irqrestore(&tasklist_lock, flags);
4750 free:
4751         for (i = start; i < end; i++)
4752                 kfree(ret_stack_list[i]);
4753         return ret;
4754 }
4755
4756 static void
4757 ftrace_graph_probe_sched_switch(void *ignore,
4758                         struct task_struct *prev, struct task_struct *next)
4759 {
4760         unsigned long long timestamp;
4761         int index;
4762
4763         /*
4764          * Does the user want to count the time a function was asleep.
4765          * If so, do not update the time stamps.
4766          */
4767         if (trace_flags & TRACE_ITER_SLEEP_TIME)
4768                 return;
4769
4770         timestamp = trace_clock_local();
4771
4772         prev->ftrace_timestamp = timestamp;
4773
4774         /* only process tasks that we timestamped */
4775         if (!next->ftrace_timestamp)
4776                 return;
4777
4778         /*
4779          * Update all the counters in next to make up for the
4780          * time next was sleeping.
4781          */
4782         timestamp -= next->ftrace_timestamp;
4783
4784         for (index = next->curr_ret_stack; index >= 0; index--)
4785                 next->ret_stack[index].calltime += timestamp;
4786 }
4787
4788 /* Allocate a return stack for each task */
4789 static int start_graph_tracing(void)
4790 {
4791         struct ftrace_ret_stack **ret_stack_list;
4792         int ret, cpu;
4793
4794         ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
4795                                 sizeof(struct ftrace_ret_stack *),
4796                                 GFP_KERNEL);
4797
4798         if (!ret_stack_list)
4799                 return -ENOMEM;
4800
4801         /* The cpu_boot init_task->ret_stack will never be freed */
4802         for_each_online_cpu(cpu) {
4803                 if (!idle_task(cpu)->ret_stack)
4804                         ftrace_graph_init_idle_task(idle_task(cpu), cpu);
4805         }
4806
4807         do {
4808                 ret = alloc_retstack_tasklist(ret_stack_list);
4809         } while (ret == -EAGAIN);
4810
4811         if (!ret) {
4812                 ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
4813                 if (ret)
4814                         pr_info("ftrace_graph: Couldn't activate tracepoint"
4815                                 " probe to kernel_sched_switch\n");
4816         }
4817
4818         kfree(ret_stack_list);
4819         return ret;
4820 }
4821
4822 /*
4823  * Hibernation protection.
4824  * The state of the current task is too much unstable during
4825  * suspend/restore to disk. We want to protect against that.
4826  */
4827 static int
4828 ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
4829                                                         void *unused)
4830 {
4831         switch (state) {
4832         case PM_HIBERNATION_PREPARE:
4833                 pause_graph_tracing();
4834                 break;
4835
4836         case PM_POST_HIBERNATION:
4837                 unpause_graph_tracing();
4838                 break;
4839         }
4840         return NOTIFY_DONE;
4841 }
4842
4843 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
4844                         trace_func_graph_ent_t entryfunc)
4845 {
4846         int ret = 0;
4847
4848         mutex_lock(&ftrace_lock);
4849
4850         /* we currently allow only one tracer registered at a time */
4851         if (ftrace_graph_active) {
4852                 ret = -EBUSY;
4853                 goto out;
4854         }
4855
4856         ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
4857         register_pm_notifier(&ftrace_suspend_notifier);
4858
4859         ftrace_graph_active++;
4860         ret = start_graph_tracing();
4861         if (ret) {
4862                 ftrace_graph_active--;
4863                 goto out;
4864         }
4865
4866         ftrace_graph_return = retfunc;
4867         ftrace_graph_entry = entryfunc;
4868
4869         ret = ftrace_startup(&global_ops, FTRACE_START_FUNC_RET);
4870
4871 out:
4872         mutex_unlock(&ftrace_lock);
4873         return ret;
4874 }
4875
4876 void unregister_ftrace_graph(void)
4877 {
4878         mutex_lock(&ftrace_lock);
4879
4880         if (unlikely(!ftrace_graph_active))
4881                 goto out;
4882
4883         ftrace_graph_active--;
4884         ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
4885         ftrace_graph_entry = ftrace_graph_entry_stub;
4886         ftrace_shutdown(&global_ops, FTRACE_STOP_FUNC_RET);
4887         unregister_pm_notifier(&ftrace_suspend_notifier);
4888         unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
4889
4890  out:
4891         mutex_unlock(&ftrace_lock);
4892 }
4893
4894 static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);
4895
4896 static void
4897 graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
4898 {
4899         atomic_set(&t->tracing_graph_pause, 0);
4900         atomic_set(&t->trace_overrun, 0);
4901         t->ftrace_timestamp = 0;
4902         /* make curr_ret_stack visible before we add the ret_stack */
4903         smp_wmb();
4904         t->ret_stack = ret_stack;
4905 }
4906
4907 /*
4908  * Allocate a return stack for the idle task. May be the first
4909  * time through, or it may be done by CPU hotplug online.
4910  */
4911 void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
4912 {
4913         t->curr_ret_stack = -1;
4914         /*
4915          * The idle task has no parent, it either has its own
4916          * stack or no stack at all.
4917          */
4918         if (t->ret_stack)
4919                 WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));
4920
4921         if (ftrace_graph_active) {
4922                 struct ftrace_ret_stack *ret_stack;
4923
4924                 ret_stack = per_cpu(idle_ret_stack, cpu);
4925                 if (!ret_stack) {
4926                         ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
4927                                             * sizeof(struct ftrace_ret_stack),
4928                                             GFP_KERNEL);
4929                         if (!ret_stack)
4930                                 return;
4931                         per_cpu(idle_ret_stack, cpu) = ret_stack;
4932                 }
4933                 graph_init_task(t, ret_stack);
4934         }
4935 }
4936
4937 /* Allocate a return stack for newly created task */
4938 void ftrace_graph_init_task(struct task_struct *t)
4939 {
4940         /* Make sure we do not use the parent ret_stack */
4941         t->ret_stack = NULL;
4942         t->curr_ret_stack = -1;
4943
4944         if (ftrace_graph_active) {
4945                 struct ftrace_ret_stack *ret_stack;
4946
4947                 ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
4948                                 * sizeof(struct ftrace_ret_stack),
4949                                 GFP_KERNEL);
4950                 if (!ret_stack)
4951                         return;
4952                 graph_init_task(t, ret_stack);
4953         }
4954 }
4955
4956 void ftrace_graph_exit_task(struct task_struct *t)
4957 {
4958         struct ftrace_ret_stack *ret_stack = t->ret_stack;
4959
4960         t->ret_stack = NULL;
4961         /* NULL must become visible to IRQs before we free it: */
4962         barrier();
4963
4964         kfree(ret_stack);
4965 }
4966
4967 void ftrace_graph_stop(void)
4968 {
4969         ftrace_stop();
4970 }
4971 #endif