Root/
1 | /* |
2 | * Infrastructure for profiling code inserted by 'gcc -pg'. |
3 | * |
4 | * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> |
5 | * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com> |
6 | * |
7 | * Originally ported from the -rt patch by: |
8 | * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com> |
9 | * |
10 | * Based on code in the latency_tracer, that is: |
11 | * |
12 | * Copyright (C) 2004-2006 Ingo Molnar |
13 | * Copyright (C) 2004 William Lee Irwin III |
14 | */ |
15 | |
16 | #include <linux/stop_machine.h> |
17 | #include <linux/clocksource.h> |
18 | #include <linux/kallsyms.h> |
19 | #include <linux/seq_file.h> |
20 | #include <linux/suspend.h> |
21 | #include <linux/debugfs.h> |
22 | #include <linux/hardirq.h> |
23 | #include <linux/kthread.h> |
24 | #include <linux/uaccess.h> |
25 | #include <linux/ftrace.h> |
26 | #include <linux/sysctl.h> |
27 | #include <linux/slab.h> |
28 | #include <linux/ctype.h> |
29 | #include <linux/list.h> |
30 | #include <linux/hash.h> |
31 | #include <linux/rcupdate.h> |
32 | |
33 | #include <trace/events/sched.h> |
34 | |
35 | #include <asm/ftrace.h> |
36 | #include <asm/setup.h> |
37 | |
38 | #include "trace_output.h" |
39 | #include "trace_stat.h" |
40 | |
41 | #define FTRACE_WARN_ON(cond) \ |
42 | do { \ |
43 | if (WARN_ON(cond)) \ |
44 | ftrace_kill(); \ |
45 | } while (0) |
46 | |
47 | #define FTRACE_WARN_ON_ONCE(cond) \ |
48 | do { \ |
49 | if (WARN_ON_ONCE(cond)) \ |
50 | ftrace_kill(); \ |
51 | } while (0) |
52 | |
53 | /* hash bits for specific function selection */ |
54 | #define FTRACE_HASH_BITS 7 |
55 | #define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS) |
56 | |
57 | /* ftrace_enabled is a method to turn ftrace on or off */ |
58 | int ftrace_enabled __read_mostly; |
59 | static int last_ftrace_enabled; |
60 | |
61 | /* Quick disabling of function tracer. */ |
62 | int function_trace_stop; |
63 | |
64 | /* List for set_ftrace_pid's pids. */ |
65 | LIST_HEAD(ftrace_pids); |
66 | struct ftrace_pid { |
67 | struct list_head list; |
68 | struct pid *pid; |
69 | }; |
70 | |
71 | /* |
72 | * ftrace_disabled is set when an anomaly is discovered. |
73 | * ftrace_disabled is much stronger than ftrace_enabled. |
74 | */ |
75 | static int ftrace_disabled __read_mostly; |
76 | |
77 | static DEFINE_MUTEX(ftrace_lock); |
78 | |
79 | static struct ftrace_ops ftrace_list_end __read_mostly = |
80 | { |
81 | .func = ftrace_stub, |
82 | }; |
83 | |
84 | static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end; |
85 | ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; |
86 | ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub; |
87 | ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub; |
88 | |
89 | /* |
90 | * Traverse the ftrace_list, invoking all entries. The reason that we |
91 | * can use rcu_dereference_raw() is that elements removed from this list |
92 | * are simply leaked, so there is no need to interact with a grace-period |
93 | * mechanism. The rcu_dereference_raw() calls are needed to handle |
94 | * concurrent insertions into the ftrace_list. |
95 | * |
96 | * Silly Alpha and silly pointer-speculation compiler optimizations! |
97 | */ |
98 | static void ftrace_list_func(unsigned long ip, unsigned long parent_ip) |
99 | { |
100 | struct ftrace_ops *op = rcu_dereference_raw(ftrace_list); /*see above*/ |
101 | |
102 | while (op != &ftrace_list_end) { |
103 | op->func(ip, parent_ip); |
104 | op = rcu_dereference_raw(op->next); /*see above*/ |
105 | }; |
106 | } |
107 | |
108 | static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip) |
109 | { |
110 | if (!test_tsk_trace_trace(current)) |
111 | return; |
112 | |
113 | ftrace_pid_function(ip, parent_ip); |
114 | } |
115 | |
116 | static void set_ftrace_pid_function(ftrace_func_t func) |
117 | { |
118 | /* do not set ftrace_pid_function to itself! */ |
119 | if (func != ftrace_pid_func) |
120 | ftrace_pid_function = func; |
121 | } |
122 | |
123 | /** |
124 | * clear_ftrace_function - reset the ftrace function |
125 | * |
126 | * This NULLs the ftrace function and in essence stops |
127 | * tracing. There may be lag |
128 | */ |
129 | void clear_ftrace_function(void) |
130 | { |
131 | ftrace_trace_function = ftrace_stub; |
132 | __ftrace_trace_function = ftrace_stub; |
133 | ftrace_pid_function = ftrace_stub; |
134 | } |
135 | |
136 | #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST |
137 | /* |
138 | * For those archs that do not test ftrace_trace_stop in their |
139 | * mcount call site, we need to do it from C. |
140 | */ |
141 | static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip) |
142 | { |
143 | if (function_trace_stop) |
144 | return; |
145 | |
146 | __ftrace_trace_function(ip, parent_ip); |
147 | } |
148 | #endif |
149 | |
150 | static int __register_ftrace_function(struct ftrace_ops *ops) |
151 | { |
152 | ops->next = ftrace_list; |
153 | /* |
154 | * We are entering ops into the ftrace_list but another |
155 | * CPU might be walking that list. We need to make sure |
156 | * the ops->next pointer is valid before another CPU sees |
157 | * the ops pointer included into the ftrace_list. |
158 | */ |
159 | rcu_assign_pointer(ftrace_list, ops); |
160 | |
161 | if (ftrace_enabled) { |
162 | ftrace_func_t func; |
163 | |
164 | if (ops->next == &ftrace_list_end) |
165 | func = ops->func; |
166 | else |
167 | func = ftrace_list_func; |
168 | |
169 | if (!list_empty(&ftrace_pids)) { |
170 | set_ftrace_pid_function(func); |
171 | func = ftrace_pid_func; |
172 | } |
173 | |
174 | /* |
175 | * For one func, simply call it directly. |
176 | * For more than one func, call the chain. |
177 | */ |
178 | #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST |
179 | ftrace_trace_function = func; |
180 | #else |
181 | __ftrace_trace_function = func; |
182 | ftrace_trace_function = ftrace_test_stop_func; |
183 | #endif |
184 | } |
185 | |
186 | return 0; |
187 | } |
188 | |
189 | static int __unregister_ftrace_function(struct ftrace_ops *ops) |
190 | { |
191 | struct ftrace_ops **p; |
192 | |
193 | /* |
194 | * If we are removing the last function, then simply point |
195 | * to the ftrace_stub. |
196 | */ |
197 | if (ftrace_list == ops && ops->next == &ftrace_list_end) { |
198 | ftrace_trace_function = ftrace_stub; |
199 | ftrace_list = &ftrace_list_end; |
200 | return 0; |
201 | } |
202 | |
203 | for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next) |
204 | if (*p == ops) |
205 | break; |
206 | |
207 | if (*p != ops) |
208 | return -1; |
209 | |
210 | *p = (*p)->next; |
211 | |
212 | if (ftrace_enabled) { |
213 | /* If we only have one func left, then call that directly */ |
214 | if (ftrace_list->next == &ftrace_list_end) { |
215 | ftrace_func_t func = ftrace_list->func; |
216 | |
217 | if (!list_empty(&ftrace_pids)) { |
218 | set_ftrace_pid_function(func); |
219 | func = ftrace_pid_func; |
220 | } |
221 | #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST |
222 | ftrace_trace_function = func; |
223 | #else |
224 | __ftrace_trace_function = func; |
225 | #endif |
226 | } |
227 | } |
228 | |
229 | return 0; |
230 | } |
231 | |
232 | static void ftrace_update_pid_func(void) |
233 | { |
234 | ftrace_func_t func; |
235 | |
236 | if (ftrace_trace_function == ftrace_stub) |
237 | return; |
238 | |
239 | #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST |
240 | func = ftrace_trace_function; |
241 | #else |
242 | func = __ftrace_trace_function; |
243 | #endif |
244 | |
245 | if (!list_empty(&ftrace_pids)) { |
246 | set_ftrace_pid_function(func); |
247 | func = ftrace_pid_func; |
248 | } else { |
249 | if (func == ftrace_pid_func) |
250 | func = ftrace_pid_function; |
251 | } |
252 | |
253 | #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST |
254 | ftrace_trace_function = func; |
255 | #else |
256 | __ftrace_trace_function = func; |
257 | #endif |
258 | } |
259 | |
260 | #ifdef CONFIG_FUNCTION_PROFILER |
261 | struct ftrace_profile { |
262 | struct hlist_node node; |
263 | unsigned long ip; |
264 | unsigned long counter; |
265 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
266 | unsigned long long time; |
267 | unsigned long long time_squared; |
268 | #endif |
269 | }; |
270 | |
271 | struct ftrace_profile_page { |
272 | struct ftrace_profile_page *next; |
273 | unsigned long index; |
274 | struct ftrace_profile records[]; |
275 | }; |
276 | |
277 | struct ftrace_profile_stat { |
278 | atomic_t disabled; |
279 | struct hlist_head *hash; |
280 | struct ftrace_profile_page *pages; |
281 | struct ftrace_profile_page *start; |
282 | struct tracer_stat stat; |
283 | }; |
284 | |
285 | #define PROFILE_RECORDS_SIZE \ |
286 | (PAGE_SIZE - offsetof(struct ftrace_profile_page, records)) |
287 | |
288 | #define PROFILES_PER_PAGE \ |
289 | (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile)) |
290 | |
291 | static int ftrace_profile_bits __read_mostly; |
292 | static int ftrace_profile_enabled __read_mostly; |
293 | |
294 | /* ftrace_profile_lock - synchronize the enable and disable of the profiler */ |
295 | static DEFINE_MUTEX(ftrace_profile_lock); |
296 | |
297 | static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats); |
298 | |
299 | #define FTRACE_PROFILE_HASH_SIZE 1024 /* must be power of 2 */ |
300 | |
301 | static void * |
302 | function_stat_next(void *v, int idx) |
303 | { |
304 | struct ftrace_profile *rec = v; |
305 | struct ftrace_profile_page *pg; |
306 | |
307 | pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK); |
308 | |
309 | again: |
310 | if (idx != 0) |
311 | rec++; |
312 | |
313 | if ((void *)rec >= (void *)&pg->records[pg->index]) { |
314 | pg = pg->next; |
315 | if (!pg) |
316 | return NULL; |
317 | rec = &pg->records[0]; |
318 | if (!rec->counter) |
319 | goto again; |
320 | } |
321 | |
322 | return rec; |
323 | } |
324 | |
325 | static void *function_stat_start(struct tracer_stat *trace) |
326 | { |
327 | struct ftrace_profile_stat *stat = |
328 | container_of(trace, struct ftrace_profile_stat, stat); |
329 | |
330 | if (!stat || !stat->start) |
331 | return NULL; |
332 | |
333 | return function_stat_next(&stat->start->records[0], 0); |
334 | } |
335 | |
336 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
337 | /* function graph compares on total time */ |
338 | static int function_stat_cmp(void *p1, void *p2) |
339 | { |
340 | struct ftrace_profile *a = p1; |
341 | struct ftrace_profile *b = p2; |
342 | |
343 | if (a->time < b->time) |
344 | return -1; |
345 | if (a->time > b->time) |
346 | return 1; |
347 | else |
348 | return 0; |
349 | } |
350 | #else |
351 | /* not function graph compares against hits */ |
352 | static int function_stat_cmp(void *p1, void *p2) |
353 | { |
354 | struct ftrace_profile *a = p1; |
355 | struct ftrace_profile *b = p2; |
356 | |
357 | if (a->counter < b->counter) |
358 | return -1; |
359 | if (a->counter > b->counter) |
360 | return 1; |
361 | else |
362 | return 0; |
363 | } |
364 | #endif |
365 | |
366 | static int function_stat_headers(struct seq_file *m) |
367 | { |
368 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
369 | seq_printf(m, " Function " |
370 | "Hit Time Avg s^2\n" |
371 | " -------- " |
372 | "--- ---- --- ---\n"); |
373 | #else |
374 | seq_printf(m, " Function Hit\n" |
375 | " -------- ---\n"); |
376 | #endif |
377 | return 0; |
378 | } |
379 | |
380 | static int function_stat_show(struct seq_file *m, void *v) |
381 | { |
382 | struct ftrace_profile *rec = v; |
383 | char str[KSYM_SYMBOL_LEN]; |
384 | int ret = 0; |
385 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
386 | static struct trace_seq s; |
387 | unsigned long long avg; |
388 | unsigned long long stddev; |
389 | #endif |
390 | mutex_lock(&ftrace_profile_lock); |
391 | |
392 | /* we raced with function_profile_reset() */ |
393 | if (unlikely(rec->counter == 0)) { |
394 | ret = -EBUSY; |
395 | goto out; |
396 | } |
397 | |
398 | kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); |
399 | seq_printf(m, " %-30.30s %10lu", str, rec->counter); |
400 | |
401 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
402 | seq_printf(m, " "); |
403 | avg = rec->time; |
404 | do_div(avg, rec->counter); |
405 | |
406 | /* Sample standard deviation (s^2) */ |
407 | if (rec->counter <= 1) |
408 | stddev = 0; |
409 | else { |
410 | stddev = rec->time_squared - rec->counter * avg * avg; |
411 | /* |
412 | * Divide only 1000 for ns^2 -> us^2 conversion. |
413 | * trace_print_graph_duration will divide 1000 again. |
414 | */ |
415 | do_div(stddev, (rec->counter - 1) * 1000); |
416 | } |
417 | |
418 | trace_seq_init(&s); |
419 | trace_print_graph_duration(rec->time, &s); |
420 | trace_seq_puts(&s, " "); |
421 | trace_print_graph_duration(avg, &s); |
422 | trace_seq_puts(&s, " "); |
423 | trace_print_graph_duration(stddev, &s); |
424 | trace_print_seq(m, &s); |
425 | #endif |
426 | seq_putc(m, '\n'); |
427 | out: |
428 | mutex_unlock(&ftrace_profile_lock); |
429 | |
430 | return ret; |
431 | } |
432 | |
433 | static void ftrace_profile_reset(struct ftrace_profile_stat *stat) |
434 | { |
435 | struct ftrace_profile_page *pg; |
436 | |
437 | pg = stat->pages = stat->start; |
438 | |
439 | while (pg) { |
440 | memset(pg->records, 0, PROFILE_RECORDS_SIZE); |
441 | pg->index = 0; |
442 | pg = pg->next; |
443 | } |
444 | |
445 | memset(stat->hash, 0, |
446 | FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head)); |
447 | } |
448 | |
449 | int ftrace_profile_pages_init(struct ftrace_profile_stat *stat) |
450 | { |
451 | struct ftrace_profile_page *pg; |
452 | int functions; |
453 | int pages; |
454 | int i; |
455 | |
456 | /* If we already allocated, do nothing */ |
457 | if (stat->pages) |
458 | return 0; |
459 | |
460 | stat->pages = (void *)get_zeroed_page(GFP_KERNEL); |
461 | if (!stat->pages) |
462 | return -ENOMEM; |
463 | |
464 | #ifdef CONFIG_DYNAMIC_FTRACE |
465 | functions = ftrace_update_tot_cnt; |
466 | #else |
467 | /* |
468 | * We do not know the number of functions that exist because |
469 | * dynamic tracing is what counts them. With past experience |
470 | * we have around 20K functions. That should be more than enough. |
471 | * It is highly unlikely we will execute every function in |
472 | * the kernel. |
473 | */ |
474 | functions = 20000; |
475 | #endif |
476 | |
477 | pg = stat->start = stat->pages; |
478 | |
479 | pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE); |
480 | |
481 | for (i = 0; i < pages; i++) { |
482 | pg->next = (void *)get_zeroed_page(GFP_KERNEL); |
483 | if (!pg->next) |
484 | goto out_free; |
485 | pg = pg->next; |
486 | } |
487 | |
488 | return 0; |
489 | |
490 | out_free: |
491 | pg = stat->start; |
492 | while (pg) { |
493 | unsigned long tmp = (unsigned long)pg; |
494 | |
495 | pg = pg->next; |
496 | free_page(tmp); |
497 | } |
498 | |
499 | free_page((unsigned long)stat->pages); |
500 | stat->pages = NULL; |
501 | stat->start = NULL; |
502 | |
503 | return -ENOMEM; |
504 | } |
505 | |
506 | static int ftrace_profile_init_cpu(int cpu) |
507 | { |
508 | struct ftrace_profile_stat *stat; |
509 | int size; |
510 | |
511 | stat = &per_cpu(ftrace_profile_stats, cpu); |
512 | |
513 | if (stat->hash) { |
514 | /* If the profile is already created, simply reset it */ |
515 | ftrace_profile_reset(stat); |
516 | return 0; |
517 | } |
518 | |
519 | /* |
520 | * We are profiling all functions, but usually only a few thousand |
521 | * functions are hit. We'll make a hash of 1024 items. |
522 | */ |
523 | size = FTRACE_PROFILE_HASH_SIZE; |
524 | |
525 | stat->hash = kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL); |
526 | |
527 | if (!stat->hash) |
528 | return -ENOMEM; |
529 | |
530 | if (!ftrace_profile_bits) { |
531 | size--; |
532 | |
533 | for (; size; size >>= 1) |
534 | ftrace_profile_bits++; |
535 | } |
536 | |
537 | /* Preallocate the function profiling pages */ |
538 | if (ftrace_profile_pages_init(stat) < 0) { |
539 | kfree(stat->hash); |
540 | stat->hash = NULL; |
541 | return -ENOMEM; |
542 | } |
543 | |
544 | return 0; |
545 | } |
546 | |
547 | static int ftrace_profile_init(void) |
548 | { |
549 | int cpu; |
550 | int ret = 0; |
551 | |
552 | for_each_online_cpu(cpu) { |
553 | ret = ftrace_profile_init_cpu(cpu); |
554 | if (ret) |
555 | break; |
556 | } |
557 | |
558 | return ret; |
559 | } |
560 | |
561 | /* interrupts must be disabled */ |
562 | static struct ftrace_profile * |
563 | ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip) |
564 | { |
565 | struct ftrace_profile *rec; |
566 | struct hlist_head *hhd; |
567 | struct hlist_node *n; |
568 | unsigned long key; |
569 | |
570 | key = hash_long(ip, ftrace_profile_bits); |
571 | hhd = &stat->hash[key]; |
572 | |
573 | if (hlist_empty(hhd)) |
574 | return NULL; |
575 | |
576 | hlist_for_each_entry_rcu(rec, n, hhd, node) { |
577 | if (rec->ip == ip) |
578 | return rec; |
579 | } |
580 | |
581 | return NULL; |
582 | } |
583 | |
584 | static void ftrace_add_profile(struct ftrace_profile_stat *stat, |
585 | struct ftrace_profile *rec) |
586 | { |
587 | unsigned long key; |
588 | |
589 | key = hash_long(rec->ip, ftrace_profile_bits); |
590 | hlist_add_head_rcu(&rec->node, &stat->hash[key]); |
591 | } |
592 | |
593 | /* |
594 | * The memory is already allocated, this simply finds a new record to use. |
595 | */ |
596 | static struct ftrace_profile * |
597 | ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip) |
598 | { |
599 | struct ftrace_profile *rec = NULL; |
600 | |
601 | /* prevent recursion (from NMIs) */ |
602 | if (atomic_inc_return(&stat->disabled) != 1) |
603 | goto out; |
604 | |
605 | /* |
606 | * Try to find the function again since an NMI |
607 | * could have added it |
608 | */ |
609 | rec = ftrace_find_profiled_func(stat, ip); |
610 | if (rec) |
611 | goto out; |
612 | |
613 | if (stat->pages->index == PROFILES_PER_PAGE) { |
614 | if (!stat->pages->next) |
615 | goto out; |
616 | stat->pages = stat->pages->next; |
617 | } |
618 | |
619 | rec = &stat->pages->records[stat->pages->index++]; |
620 | rec->ip = ip; |
621 | ftrace_add_profile(stat, rec); |
622 | |
623 | out: |
624 | atomic_dec(&stat->disabled); |
625 | |
626 | return rec; |
627 | } |
628 | |
629 | static void |
630 | function_profile_call(unsigned long ip, unsigned long parent_ip) |
631 | { |
632 | struct ftrace_profile_stat *stat; |
633 | struct ftrace_profile *rec; |
634 | unsigned long flags; |
635 | |
636 | if (!ftrace_profile_enabled) |
637 | return; |
638 | |
639 | local_irq_save(flags); |
640 | |
641 | stat = &__get_cpu_var(ftrace_profile_stats); |
642 | if (!stat->hash || !ftrace_profile_enabled) |
643 | goto out; |
644 | |
645 | rec = ftrace_find_profiled_func(stat, ip); |
646 | if (!rec) { |
647 | rec = ftrace_profile_alloc(stat, ip); |
648 | if (!rec) |
649 | goto out; |
650 | } |
651 | |
652 | rec->counter++; |
653 | out: |
654 | local_irq_restore(flags); |
655 | } |
656 | |
657 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
658 | static int profile_graph_entry(struct ftrace_graph_ent *trace) |
659 | { |
660 | function_profile_call(trace->func, 0); |
661 | return 1; |
662 | } |
663 | |
664 | static void profile_graph_return(struct ftrace_graph_ret *trace) |
665 | { |
666 | struct ftrace_profile_stat *stat; |
667 | unsigned long long calltime; |
668 | struct ftrace_profile *rec; |
669 | unsigned long flags; |
670 | |
671 | local_irq_save(flags); |
672 | stat = &__get_cpu_var(ftrace_profile_stats); |
673 | if (!stat->hash || !ftrace_profile_enabled) |
674 | goto out; |
675 | |
676 | /* If the calltime was zero'd ignore it */ |
677 | if (!trace->calltime) |
678 | goto out; |
679 | |
680 | calltime = trace->rettime - trace->calltime; |
681 | |
682 | if (!(trace_flags & TRACE_ITER_GRAPH_TIME)) { |
683 | int index; |
684 | |
685 | index = trace->depth; |
686 | |
687 | /* Append this call time to the parent time to subtract */ |
688 | if (index) |
689 | current->ret_stack[index - 1].subtime += calltime; |
690 | |
691 | if (current->ret_stack[index].subtime < calltime) |
692 | calltime -= current->ret_stack[index].subtime; |
693 | else |
694 | calltime = 0; |
695 | } |
696 | |
697 | rec = ftrace_find_profiled_func(stat, trace->func); |
698 | if (rec) { |
699 | rec->time += calltime; |
700 | rec->time_squared += calltime * calltime; |
701 | } |
702 | |
703 | out: |
704 | local_irq_restore(flags); |
705 | } |
706 | |
707 | static int register_ftrace_profiler(void) |
708 | { |
709 | return register_ftrace_graph(&profile_graph_return, |
710 | &profile_graph_entry); |
711 | } |
712 | |
713 | static void unregister_ftrace_profiler(void) |
714 | { |
715 | unregister_ftrace_graph(); |
716 | } |
717 | #else |
718 | static struct ftrace_ops ftrace_profile_ops __read_mostly = |
719 | { |
720 | .func = function_profile_call, |
721 | }; |
722 | |
723 | static int register_ftrace_profiler(void) |
724 | { |
725 | return register_ftrace_function(&ftrace_profile_ops); |
726 | } |
727 | |
728 | static void unregister_ftrace_profiler(void) |
729 | { |
730 | unregister_ftrace_function(&ftrace_profile_ops); |
731 | } |
732 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
733 | |
734 | static ssize_t |
735 | ftrace_profile_write(struct file *filp, const char __user *ubuf, |
736 | size_t cnt, loff_t *ppos) |
737 | { |
738 | unsigned long val; |
739 | char buf[64]; /* big enough to hold a number */ |
740 | int ret; |
741 | |
742 | if (cnt >= sizeof(buf)) |
743 | return -EINVAL; |
744 | |
745 | if (copy_from_user(&buf, ubuf, cnt)) |
746 | return -EFAULT; |
747 | |
748 | buf[cnt] = 0; |
749 | |
750 | ret = strict_strtoul(buf, 10, &val); |
751 | if (ret < 0) |
752 | return ret; |
753 | |
754 | val = !!val; |
755 | |
756 | mutex_lock(&ftrace_profile_lock); |
757 | if (ftrace_profile_enabled ^ val) { |
758 | if (val) { |
759 | ret = ftrace_profile_init(); |
760 | if (ret < 0) { |
761 | cnt = ret; |
762 | goto out; |
763 | } |
764 | |
765 | ret = register_ftrace_profiler(); |
766 | if (ret < 0) { |
767 | cnt = ret; |
768 | goto out; |
769 | } |
770 | ftrace_profile_enabled = 1; |
771 | } else { |
772 | ftrace_profile_enabled = 0; |
773 | /* |
774 | * unregister_ftrace_profiler calls stop_machine |
775 | * so this acts like an synchronize_sched. |
776 | */ |
777 | unregister_ftrace_profiler(); |
778 | } |
779 | } |
780 | out: |
781 | mutex_unlock(&ftrace_profile_lock); |
782 | |
783 | *ppos += cnt; |
784 | |
785 | return cnt; |
786 | } |
787 | |
788 | static ssize_t |
789 | ftrace_profile_read(struct file *filp, char __user *ubuf, |
790 | size_t cnt, loff_t *ppos) |
791 | { |
792 | char buf[64]; /* big enough to hold a number */ |
793 | int r; |
794 | |
795 | r = sprintf(buf, "%u\n", ftrace_profile_enabled); |
796 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); |
797 | } |
798 | |
799 | static const struct file_operations ftrace_profile_fops = { |
800 | .open = tracing_open_generic, |
801 | .read = ftrace_profile_read, |
802 | .write = ftrace_profile_write, |
803 | .llseek = default_llseek, |
804 | }; |
805 | |
806 | /* used to initialize the real stat files */ |
807 | static struct tracer_stat function_stats __initdata = { |
808 | .name = "functions", |
809 | .stat_start = function_stat_start, |
810 | .stat_next = function_stat_next, |
811 | .stat_cmp = function_stat_cmp, |
812 | .stat_headers = function_stat_headers, |
813 | .stat_show = function_stat_show |
814 | }; |
815 | |
816 | static __init void ftrace_profile_debugfs(struct dentry *d_tracer) |
817 | { |
818 | struct ftrace_profile_stat *stat; |
819 | struct dentry *entry; |
820 | char *name; |
821 | int ret; |
822 | int cpu; |
823 | |
824 | for_each_possible_cpu(cpu) { |
825 | stat = &per_cpu(ftrace_profile_stats, cpu); |
826 | |
827 | /* allocate enough for function name + cpu number */ |
828 | name = kmalloc(32, GFP_KERNEL); |
829 | if (!name) { |
830 | /* |
831 | * The files created are permanent, if something happens |
832 | * we still do not free memory. |
833 | */ |
834 | WARN(1, |
835 | "Could not allocate stat file for cpu %d\n", |
836 | cpu); |
837 | return; |
838 | } |
839 | stat->stat = function_stats; |
840 | snprintf(name, 32, "function%d", cpu); |
841 | stat->stat.name = name; |
842 | ret = register_stat_tracer(&stat->stat); |
843 | if (ret) { |
844 | WARN(1, |
845 | "Could not register function stat for cpu %d\n", |
846 | cpu); |
847 | kfree(name); |
848 | return; |
849 | } |
850 | } |
851 | |
852 | entry = debugfs_create_file("function_profile_enabled", 0644, |
853 | d_tracer, NULL, &ftrace_profile_fops); |
854 | if (!entry) |
855 | pr_warning("Could not create debugfs " |
856 | "'function_profile_enabled' entry\n"); |
857 | } |
858 | |
859 | #else /* CONFIG_FUNCTION_PROFILER */ |
860 | static __init void ftrace_profile_debugfs(struct dentry *d_tracer) |
861 | { |
862 | } |
863 | #endif /* CONFIG_FUNCTION_PROFILER */ |
864 | |
865 | static struct pid * const ftrace_swapper_pid = &init_struct_pid; |
866 | |
867 | #ifdef CONFIG_DYNAMIC_FTRACE |
868 | |
869 | #ifndef CONFIG_FTRACE_MCOUNT_RECORD |
870 | # error Dynamic ftrace depends on MCOUNT_RECORD |
871 | #endif |
872 | |
873 | static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly; |
874 | |
875 | struct ftrace_func_probe { |
876 | struct hlist_node node; |
877 | struct ftrace_probe_ops *ops; |
878 | unsigned long flags; |
879 | unsigned long ip; |
880 | void *data; |
881 | struct rcu_head rcu; |
882 | }; |
883 | |
884 | enum { |
885 | FTRACE_ENABLE_CALLS = (1 << 0), |
886 | FTRACE_DISABLE_CALLS = (1 << 1), |
887 | FTRACE_UPDATE_TRACE_FUNC = (1 << 2), |
888 | FTRACE_START_FUNC_RET = (1 << 3), |
889 | FTRACE_STOP_FUNC_RET = (1 << 4), |
890 | }; |
891 | |
892 | static int ftrace_filtered; |
893 | |
894 | static struct dyn_ftrace *ftrace_new_addrs; |
895 | |
896 | static DEFINE_MUTEX(ftrace_regex_lock); |
897 | |
898 | struct ftrace_page { |
899 | struct ftrace_page *next; |
900 | int index; |
901 | struct dyn_ftrace records[]; |
902 | }; |
903 | |
904 | #define ENTRIES_PER_PAGE \ |
905 | ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace)) |
906 | |
907 | /* estimate from running different kernels */ |
908 | #define NR_TO_INIT 10000 |
909 | |
910 | static struct ftrace_page *ftrace_pages_start; |
911 | static struct ftrace_page *ftrace_pages; |
912 | |
913 | static struct dyn_ftrace *ftrace_free_records; |
914 | |
915 | /* |
916 | * This is a double for. Do not use 'break' to break out of the loop, |
917 | * you must use a goto. |
918 | */ |
919 | #define do_for_each_ftrace_rec(pg, rec) \ |
920 | for (pg = ftrace_pages_start; pg; pg = pg->next) { \ |
921 | int _____i; \ |
922 | for (_____i = 0; _____i < pg->index; _____i++) { \ |
923 | rec = &pg->records[_____i]; |
924 | |
925 | #define while_for_each_ftrace_rec() \ |
926 | } \ |
927 | } |
928 | |
929 | static void ftrace_free_rec(struct dyn_ftrace *rec) |
930 | { |
931 | rec->freelist = ftrace_free_records; |
932 | ftrace_free_records = rec; |
933 | rec->flags |= FTRACE_FL_FREE; |
934 | } |
935 | |
936 | static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip) |
937 | { |
938 | struct dyn_ftrace *rec; |
939 | |
940 | /* First check for freed records */ |
941 | if (ftrace_free_records) { |
942 | rec = ftrace_free_records; |
943 | |
944 | if (unlikely(!(rec->flags & FTRACE_FL_FREE))) { |
945 | FTRACE_WARN_ON_ONCE(1); |
946 | ftrace_free_records = NULL; |
947 | return NULL; |
948 | } |
949 | |
950 | ftrace_free_records = rec->freelist; |
951 | memset(rec, 0, sizeof(*rec)); |
952 | return rec; |
953 | } |
954 | |
955 | if (ftrace_pages->index == ENTRIES_PER_PAGE) { |
956 | if (!ftrace_pages->next) { |
957 | /* allocate another page */ |
958 | ftrace_pages->next = |
959 | (void *)get_zeroed_page(GFP_KERNEL); |
960 | if (!ftrace_pages->next) |
961 | return NULL; |
962 | } |
963 | ftrace_pages = ftrace_pages->next; |
964 | } |
965 | |
966 | return &ftrace_pages->records[ftrace_pages->index++]; |
967 | } |
968 | |
969 | static struct dyn_ftrace * |
970 | ftrace_record_ip(unsigned long ip) |
971 | { |
972 | struct dyn_ftrace *rec; |
973 | |
974 | if (ftrace_disabled) |
975 | return NULL; |
976 | |
977 | rec = ftrace_alloc_dyn_node(ip); |
978 | if (!rec) |
979 | return NULL; |
980 | |
981 | rec->ip = ip; |
982 | rec->newlist = ftrace_new_addrs; |
983 | ftrace_new_addrs = rec; |
984 | |
985 | return rec; |
986 | } |
987 | |
988 | static void print_ip_ins(const char *fmt, unsigned char *p) |
989 | { |
990 | int i; |
991 | |
992 | printk(KERN_CONT "%s", fmt); |
993 | |
994 | for (i = 0; i < MCOUNT_INSN_SIZE; i++) |
995 | printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]); |
996 | } |
997 | |
998 | static void ftrace_bug(int failed, unsigned long ip) |
999 | { |
1000 | switch (failed) { |
1001 | case -EFAULT: |
1002 | FTRACE_WARN_ON_ONCE(1); |
1003 | pr_info("ftrace faulted on modifying "); |
1004 | print_ip_sym(ip); |
1005 | break; |
1006 | case -EINVAL: |
1007 | FTRACE_WARN_ON_ONCE(1); |
1008 | pr_info("ftrace failed to modify "); |
1009 | print_ip_sym(ip); |
1010 | print_ip_ins(" actual: ", (unsigned char *)ip); |
1011 | printk(KERN_CONT "\n"); |
1012 | break; |
1013 | case -EPERM: |
1014 | FTRACE_WARN_ON_ONCE(1); |
1015 | pr_info("ftrace faulted on writing "); |
1016 | print_ip_sym(ip); |
1017 | break; |
1018 | default: |
1019 | FTRACE_WARN_ON_ONCE(1); |
1020 | pr_info("ftrace faulted on unknown error "); |
1021 | print_ip_sym(ip); |
1022 | } |
1023 | } |
1024 | |
1025 | |
1026 | /* Return 1 if the address range is reserved for ftrace */ |
1027 | int ftrace_text_reserved(void *start, void *end) |
1028 | { |
1029 | struct dyn_ftrace *rec; |
1030 | struct ftrace_page *pg; |
1031 | |
1032 | do_for_each_ftrace_rec(pg, rec) { |
1033 | if (rec->ip <= (unsigned long)end && |
1034 | rec->ip + MCOUNT_INSN_SIZE > (unsigned long)start) |
1035 | return 1; |
1036 | } while_for_each_ftrace_rec(); |
1037 | return 0; |
1038 | } |
1039 | |
1040 | |
1041 | static int |
1042 | __ftrace_replace_code(struct dyn_ftrace *rec, int enable) |
1043 | { |
1044 | unsigned long ftrace_addr; |
1045 | unsigned long flag = 0UL; |
1046 | |
1047 | ftrace_addr = (unsigned long)FTRACE_ADDR; |
1048 | |
1049 | /* |
1050 | * If this record is not to be traced or we want to disable it, |
1051 | * then disable it. |
1052 | * |
1053 | * If we want to enable it and filtering is off, then enable it. |
1054 | * |
1055 | * If we want to enable it and filtering is on, enable it only if |
1056 | * it's filtered |
1057 | */ |
1058 | if (enable && !(rec->flags & FTRACE_FL_NOTRACE)) { |
1059 | if (!ftrace_filtered || (rec->flags & FTRACE_FL_FILTER)) |
1060 | flag = FTRACE_FL_ENABLED; |
1061 | } |
1062 | |
1063 | /* If the state of this record hasn't changed, then do nothing */ |
1064 | if ((rec->flags & FTRACE_FL_ENABLED) == flag) |
1065 | return 0; |
1066 | |
1067 | if (flag) { |
1068 | rec->flags |= FTRACE_FL_ENABLED; |
1069 | return ftrace_make_call(rec, ftrace_addr); |
1070 | } |
1071 | |
1072 | rec->flags &= ~FTRACE_FL_ENABLED; |
1073 | return ftrace_make_nop(NULL, rec, ftrace_addr); |
1074 | } |
1075 | |
1076 | static void ftrace_replace_code(int enable) |
1077 | { |
1078 | struct dyn_ftrace *rec; |
1079 | struct ftrace_page *pg; |
1080 | int failed; |
1081 | |
1082 | do_for_each_ftrace_rec(pg, rec) { |
1083 | /* |
1084 | * Skip over free records, records that have |
1085 | * failed and not converted. |
1086 | */ |
1087 | if (rec->flags & FTRACE_FL_FREE || |
1088 | rec->flags & FTRACE_FL_FAILED || |
1089 | !(rec->flags & FTRACE_FL_CONVERTED)) |
1090 | continue; |
1091 | |
1092 | failed = __ftrace_replace_code(rec, enable); |
1093 | if (failed) { |
1094 | rec->flags |= FTRACE_FL_FAILED; |
1095 | ftrace_bug(failed, rec->ip); |
1096 | /* Stop processing */ |
1097 | return; |
1098 | } |
1099 | } while_for_each_ftrace_rec(); |
1100 | } |
1101 | |
1102 | static int |
1103 | ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec) |
1104 | { |
1105 | unsigned long ip; |
1106 | int ret; |
1107 | |
1108 | ip = rec->ip; |
1109 | |
1110 | ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR); |
1111 | if (ret) { |
1112 | ftrace_bug(ret, ip); |
1113 | rec->flags |= FTRACE_FL_FAILED; |
1114 | return 0; |
1115 | } |
1116 | return 1; |
1117 | } |
1118 | |
1119 | /* |
1120 | * archs can override this function if they must do something |
1121 | * before the modifying code is performed. |
1122 | */ |
1123 | int __weak ftrace_arch_code_modify_prepare(void) |
1124 | { |
1125 | return 0; |
1126 | } |
1127 | |
1128 | /* |
1129 | * archs can override this function if they must do something |
1130 | * after the modifying code is performed. |
1131 | */ |
1132 | int __weak ftrace_arch_code_modify_post_process(void) |
1133 | { |
1134 | return 0; |
1135 | } |
1136 | |
1137 | static int __ftrace_modify_code(void *data) |
1138 | { |
1139 | int *command = data; |
1140 | |
1141 | if (*command & FTRACE_ENABLE_CALLS) |
1142 | ftrace_replace_code(1); |
1143 | else if (*command & FTRACE_DISABLE_CALLS) |
1144 | ftrace_replace_code(0); |
1145 | |
1146 | if (*command & FTRACE_UPDATE_TRACE_FUNC) |
1147 | ftrace_update_ftrace_func(ftrace_trace_function); |
1148 | |
1149 | if (*command & FTRACE_START_FUNC_RET) |
1150 | ftrace_enable_ftrace_graph_caller(); |
1151 | else if (*command & FTRACE_STOP_FUNC_RET) |
1152 | ftrace_disable_ftrace_graph_caller(); |
1153 | |
1154 | return 0; |
1155 | } |
1156 | |
1157 | static void ftrace_run_update_code(int command) |
1158 | { |
1159 | int ret; |
1160 | |
1161 | ret = ftrace_arch_code_modify_prepare(); |
1162 | FTRACE_WARN_ON(ret); |
1163 | if (ret) |
1164 | return; |
1165 | |
1166 | stop_machine(__ftrace_modify_code, &command, NULL); |
1167 | |
1168 | ret = ftrace_arch_code_modify_post_process(); |
1169 | FTRACE_WARN_ON(ret); |
1170 | } |
1171 | |
1172 | static ftrace_func_t saved_ftrace_func; |
1173 | static int ftrace_start_up; |
1174 | |
1175 | static void ftrace_startup_enable(int command) |
1176 | { |
1177 | if (saved_ftrace_func != ftrace_trace_function) { |
1178 | saved_ftrace_func = ftrace_trace_function; |
1179 | command |= FTRACE_UPDATE_TRACE_FUNC; |
1180 | } |
1181 | |
1182 | if (!command || !ftrace_enabled) |
1183 | return; |
1184 | |
1185 | ftrace_run_update_code(command); |
1186 | } |
1187 | |
1188 | static void ftrace_startup(int command) |
1189 | { |
1190 | if (unlikely(ftrace_disabled)) |
1191 | return; |
1192 | |
1193 | ftrace_start_up++; |
1194 | command |= FTRACE_ENABLE_CALLS; |
1195 | |
1196 | ftrace_startup_enable(command); |
1197 | } |
1198 | |
1199 | static void ftrace_shutdown(int command) |
1200 | { |
1201 | if (unlikely(ftrace_disabled)) |
1202 | return; |
1203 | |
1204 | ftrace_start_up--; |
1205 | /* |
1206 | * Just warn in case of unbalance, no need to kill ftrace, it's not |
1207 | * critical but the ftrace_call callers may be never nopped again after |
1208 | * further ftrace uses. |
1209 | */ |
1210 | WARN_ON_ONCE(ftrace_start_up < 0); |
1211 | |
1212 | if (!ftrace_start_up) |
1213 | command |= FTRACE_DISABLE_CALLS; |
1214 | |
1215 | if (saved_ftrace_func != ftrace_trace_function) { |
1216 | saved_ftrace_func = ftrace_trace_function; |
1217 | command |= FTRACE_UPDATE_TRACE_FUNC; |
1218 | } |
1219 | |
1220 | if (!command || !ftrace_enabled) |
1221 | return; |
1222 | |
1223 | ftrace_run_update_code(command); |
1224 | } |
1225 | |
1226 | static void ftrace_startup_sysctl(void) |
1227 | { |
1228 | if (unlikely(ftrace_disabled)) |
1229 | return; |
1230 | |
1231 | /* Force update next time */ |
1232 | saved_ftrace_func = NULL; |
1233 | /* ftrace_start_up is true if we want ftrace running */ |
1234 | if (ftrace_start_up) |
1235 | ftrace_run_update_code(FTRACE_ENABLE_CALLS); |
1236 | } |
1237 | |
1238 | static void ftrace_shutdown_sysctl(void) |
1239 | { |
1240 | if (unlikely(ftrace_disabled)) |
1241 | return; |
1242 | |
1243 | /* ftrace_start_up is true if ftrace is running */ |
1244 | if (ftrace_start_up) |
1245 | ftrace_run_update_code(FTRACE_DISABLE_CALLS); |
1246 | } |
1247 | |
1248 | static cycle_t ftrace_update_time; |
1249 | static unsigned long ftrace_update_cnt; |
1250 | unsigned long ftrace_update_tot_cnt; |
1251 | |
1252 | static int ftrace_update_code(struct module *mod) |
1253 | { |
1254 | struct dyn_ftrace *p; |
1255 | cycle_t start, stop; |
1256 | |
1257 | start = ftrace_now(raw_smp_processor_id()); |
1258 | ftrace_update_cnt = 0; |
1259 | |
1260 | while (ftrace_new_addrs) { |
1261 | |
1262 | /* If something went wrong, bail without enabling anything */ |
1263 | if (unlikely(ftrace_disabled)) |
1264 | return -1; |
1265 | |
1266 | p = ftrace_new_addrs; |
1267 | ftrace_new_addrs = p->newlist; |
1268 | p->flags = 0L; |
1269 | |
1270 | /* |
1271 | * Do the initial record convertion from mcount jump |
1272 | * to the NOP instructions. |
1273 | */ |
1274 | if (!ftrace_code_disable(mod, p)) { |
1275 | ftrace_free_rec(p); |
1276 | continue; |
1277 | } |
1278 | |
1279 | p->flags |= FTRACE_FL_CONVERTED; |
1280 | ftrace_update_cnt++; |
1281 | |
1282 | /* |
1283 | * If the tracing is enabled, go ahead and enable the record. |
1284 | * |
1285 | * The reason not to enable the record immediatelly is the |
1286 | * inherent check of ftrace_make_nop/ftrace_make_call for |
1287 | * correct previous instructions. Making first the NOP |
1288 | * conversion puts the module to the correct state, thus |
1289 | * passing the ftrace_make_call check. |
1290 | */ |
1291 | if (ftrace_start_up) { |
1292 | int failed = __ftrace_replace_code(p, 1); |
1293 | if (failed) { |
1294 | ftrace_bug(failed, p->ip); |
1295 | ftrace_free_rec(p); |
1296 | } |
1297 | } |
1298 | } |
1299 | |
1300 | stop = ftrace_now(raw_smp_processor_id()); |
1301 | ftrace_update_time = stop - start; |
1302 | ftrace_update_tot_cnt += ftrace_update_cnt; |
1303 | |
1304 | return 0; |
1305 | } |
1306 | |
1307 | static int __init ftrace_dyn_table_alloc(unsigned long num_to_init) |
1308 | { |
1309 | struct ftrace_page *pg; |
1310 | int cnt; |
1311 | int i; |
1312 | |
1313 | /* allocate a few pages */ |
1314 | ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL); |
1315 | if (!ftrace_pages_start) |
1316 | return -1; |
1317 | |
1318 | /* |
1319 | * Allocate a few more pages. |
1320 | * |
1321 | * TODO: have some parser search vmlinux before |
1322 | * final linking to find all calls to ftrace. |
1323 | * Then we can: |
1324 | * a) know how many pages to allocate. |
1325 | * and/or |
1326 | * b) set up the table then. |
1327 | * |
1328 | * The dynamic code is still necessary for |
1329 | * modules. |
1330 | */ |
1331 | |
1332 | pg = ftrace_pages = ftrace_pages_start; |
1333 | |
1334 | cnt = num_to_init / ENTRIES_PER_PAGE; |
1335 | pr_info("ftrace: allocating %ld entries in %d pages\n", |
1336 | num_to_init, cnt + 1); |
1337 | |
1338 | for (i = 0; i < cnt; i++) { |
1339 | pg->next = (void *)get_zeroed_page(GFP_KERNEL); |
1340 | |
1341 | /* If we fail, we'll try later anyway */ |
1342 | if (!pg->next) |
1343 | break; |
1344 | |
1345 | pg = pg->next; |
1346 | } |
1347 | |
1348 | return 0; |
1349 | } |
1350 | |
1351 | enum { |
1352 | FTRACE_ITER_FILTER = (1 << 0), |
1353 | FTRACE_ITER_NOTRACE = (1 << 1), |
1354 | FTRACE_ITER_FAILURES = (1 << 2), |
1355 | FTRACE_ITER_PRINTALL = (1 << 3), |
1356 | FTRACE_ITER_HASH = (1 << 4), |
1357 | }; |
1358 | |
1359 | #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */ |
1360 | |
1361 | struct ftrace_iterator { |
1362 | loff_t pos; |
1363 | loff_t func_pos; |
1364 | struct ftrace_page *pg; |
1365 | struct dyn_ftrace *func; |
1366 | struct ftrace_func_probe *probe; |
1367 | struct trace_parser parser; |
1368 | int hidx; |
1369 | int idx; |
1370 | unsigned flags; |
1371 | }; |
1372 | |
1373 | static void * |
1374 | t_hash_next(struct seq_file *m, loff_t *pos) |
1375 | { |
1376 | struct ftrace_iterator *iter = m->private; |
1377 | struct hlist_node *hnd = NULL; |
1378 | struct hlist_head *hhd; |
1379 | |
1380 | (*pos)++; |
1381 | iter->pos = *pos; |
1382 | |
1383 | if (iter->probe) |
1384 | hnd = &iter->probe->node; |
1385 | retry: |
1386 | if (iter->hidx >= FTRACE_FUNC_HASHSIZE) |
1387 | return NULL; |
1388 | |
1389 | hhd = &ftrace_func_hash[iter->hidx]; |
1390 | |
1391 | if (hlist_empty(hhd)) { |
1392 | iter->hidx++; |
1393 | hnd = NULL; |
1394 | goto retry; |
1395 | } |
1396 | |
1397 | if (!hnd) |
1398 | hnd = hhd->first; |
1399 | else { |
1400 | hnd = hnd->next; |
1401 | if (!hnd) { |
1402 | iter->hidx++; |
1403 | goto retry; |
1404 | } |
1405 | } |
1406 | |
1407 | if (WARN_ON_ONCE(!hnd)) |
1408 | return NULL; |
1409 | |
1410 | iter->probe = hlist_entry(hnd, struct ftrace_func_probe, node); |
1411 | |
1412 | return iter; |
1413 | } |
1414 | |
1415 | static void *t_hash_start(struct seq_file *m, loff_t *pos) |
1416 | { |
1417 | struct ftrace_iterator *iter = m->private; |
1418 | void *p = NULL; |
1419 | loff_t l; |
1420 | |
1421 | if (iter->func_pos > *pos) |
1422 | return NULL; |
1423 | |
1424 | iter->hidx = 0; |
1425 | for (l = 0; l <= (*pos - iter->func_pos); ) { |
1426 | p = t_hash_next(m, &l); |
1427 | if (!p) |
1428 | break; |
1429 | } |
1430 | if (!p) |
1431 | return NULL; |
1432 | |
1433 | /* Only set this if we have an item */ |
1434 | iter->flags |= FTRACE_ITER_HASH; |
1435 | |
1436 | return iter; |
1437 | } |
1438 | |
1439 | static int |
1440 | t_hash_show(struct seq_file *m, struct ftrace_iterator *iter) |
1441 | { |
1442 | struct ftrace_func_probe *rec; |
1443 | |
1444 | rec = iter->probe; |
1445 | if (WARN_ON_ONCE(!rec)) |
1446 | return -EIO; |
1447 | |
1448 | if (rec->ops->print) |
1449 | return rec->ops->print(m, rec->ip, rec->ops, rec->data); |
1450 | |
1451 | seq_printf(m, "%ps:%ps", (void *)rec->ip, (void *)rec->ops->func); |
1452 | |
1453 | if (rec->data) |
1454 | seq_printf(m, ":%p", rec->data); |
1455 | seq_putc(m, '\n'); |
1456 | |
1457 | return 0; |
1458 | } |
1459 | |
1460 | static void * |
1461 | t_next(struct seq_file *m, void *v, loff_t *pos) |
1462 | { |
1463 | struct ftrace_iterator *iter = m->private; |
1464 | struct dyn_ftrace *rec = NULL; |
1465 | |
1466 | if (iter->flags & FTRACE_ITER_HASH) |
1467 | return t_hash_next(m, pos); |
1468 | |
1469 | (*pos)++; |
1470 | iter->pos = *pos; |
1471 | |
1472 | if (iter->flags & FTRACE_ITER_PRINTALL) |
1473 | return t_hash_start(m, pos); |
1474 | |
1475 | retry: |
1476 | if (iter->idx >= iter->pg->index) { |
1477 | if (iter->pg->next) { |
1478 | iter->pg = iter->pg->next; |
1479 | iter->idx = 0; |
1480 | goto retry; |
1481 | } |
1482 | } else { |
1483 | rec = &iter->pg->records[iter->idx++]; |
1484 | if ((rec->flags & FTRACE_FL_FREE) || |
1485 | |
1486 | (!(iter->flags & FTRACE_ITER_FAILURES) && |
1487 | (rec->flags & FTRACE_FL_FAILED)) || |
1488 | |
1489 | ((iter->flags & FTRACE_ITER_FAILURES) && |
1490 | !(rec->flags & FTRACE_FL_FAILED)) || |
1491 | |
1492 | ((iter->flags & FTRACE_ITER_FILTER) && |
1493 | !(rec->flags & FTRACE_FL_FILTER)) || |
1494 | |
1495 | ((iter->flags & FTRACE_ITER_NOTRACE) && |
1496 | !(rec->flags & FTRACE_FL_NOTRACE))) { |
1497 | rec = NULL; |
1498 | goto retry; |
1499 | } |
1500 | } |
1501 | |
1502 | if (!rec) |
1503 | return t_hash_start(m, pos); |
1504 | |
1505 | iter->func_pos = *pos; |
1506 | iter->func = rec; |
1507 | |
1508 | return iter; |
1509 | } |
1510 | |
1511 | static void reset_iter_read(struct ftrace_iterator *iter) |
1512 | { |
1513 | iter->pos = 0; |
1514 | iter->func_pos = 0; |
1515 | iter->flags &= ~(FTRACE_ITER_PRINTALL & FTRACE_ITER_HASH); |
1516 | } |
1517 | |
1518 | static void *t_start(struct seq_file *m, loff_t *pos) |
1519 | { |
1520 | struct ftrace_iterator *iter = m->private; |
1521 | void *p = NULL; |
1522 | loff_t l; |
1523 | |
1524 | mutex_lock(&ftrace_lock); |
1525 | /* |
1526 | * If an lseek was done, then reset and start from beginning. |
1527 | */ |
1528 | if (*pos < iter->pos) |
1529 | reset_iter_read(iter); |
1530 | |
1531 | /* |
1532 | * For set_ftrace_filter reading, if we have the filter |
1533 | * off, we can short cut and just print out that all |
1534 | * functions are enabled. |
1535 | */ |
1536 | if (iter->flags & FTRACE_ITER_FILTER && !ftrace_filtered) { |
1537 | if (*pos > 0) |
1538 | return t_hash_start(m, pos); |
1539 | iter->flags |= FTRACE_ITER_PRINTALL; |
1540 | /* reset in case of seek/pread */ |
1541 | iter->flags &= ~FTRACE_ITER_HASH; |
1542 | return iter; |
1543 | } |
1544 | |
1545 | if (iter->flags & FTRACE_ITER_HASH) |
1546 | return t_hash_start(m, pos); |
1547 | |
1548 | /* |
1549 | * Unfortunately, we need to restart at ftrace_pages_start |
1550 | * every time we let go of the ftrace_mutex. This is because |
1551 | * those pointers can change without the lock. |
1552 | */ |
1553 | iter->pg = ftrace_pages_start; |
1554 | iter->idx = 0; |
1555 | for (l = 0; l <= *pos; ) { |
1556 | p = t_next(m, p, &l); |
1557 | if (!p) |
1558 | break; |
1559 | } |
1560 | |
1561 | if (!p) { |
1562 | if (iter->flags & FTRACE_ITER_FILTER) |
1563 | return t_hash_start(m, pos); |
1564 | |
1565 | return NULL; |
1566 | } |
1567 | |
1568 | return iter; |
1569 | } |
1570 | |
1571 | static void t_stop(struct seq_file *m, void *p) |
1572 | { |
1573 | mutex_unlock(&ftrace_lock); |
1574 | } |
1575 | |
1576 | static int t_show(struct seq_file *m, void *v) |
1577 | { |
1578 | struct ftrace_iterator *iter = m->private; |
1579 | struct dyn_ftrace *rec; |
1580 | |
1581 | if (iter->flags & FTRACE_ITER_HASH) |
1582 | return t_hash_show(m, iter); |
1583 | |
1584 | if (iter->flags & FTRACE_ITER_PRINTALL) { |
1585 | seq_printf(m, "#### all functions enabled ####\n"); |
1586 | return 0; |
1587 | } |
1588 | |
1589 | rec = iter->func; |
1590 | |
1591 | if (!rec) |
1592 | return 0; |
1593 | |
1594 | seq_printf(m, "%ps\n", (void *)rec->ip); |
1595 | |
1596 | return 0; |
1597 | } |
1598 | |
1599 | static const struct seq_operations show_ftrace_seq_ops = { |
1600 | .start = t_start, |
1601 | .next = t_next, |
1602 | .stop = t_stop, |
1603 | .show = t_show, |
1604 | }; |
1605 | |
1606 | static int |
1607 | ftrace_avail_open(struct inode *inode, struct file *file) |
1608 | { |
1609 | struct ftrace_iterator *iter; |
1610 | int ret; |
1611 | |
1612 | if (unlikely(ftrace_disabled)) |
1613 | return -ENODEV; |
1614 | |
1615 | iter = kzalloc(sizeof(*iter), GFP_KERNEL); |
1616 | if (!iter) |
1617 | return -ENOMEM; |
1618 | |
1619 | iter->pg = ftrace_pages_start; |
1620 | |
1621 | ret = seq_open(file, &show_ftrace_seq_ops); |
1622 | if (!ret) { |
1623 | struct seq_file *m = file->private_data; |
1624 | |
1625 | m->private = iter; |
1626 | } else { |
1627 | kfree(iter); |
1628 | } |
1629 | |
1630 | return ret; |
1631 | } |
1632 | |
1633 | static int |
1634 | ftrace_failures_open(struct inode *inode, struct file *file) |
1635 | { |
1636 | int ret; |
1637 | struct seq_file *m; |
1638 | struct ftrace_iterator *iter; |
1639 | |
1640 | ret = ftrace_avail_open(inode, file); |
1641 | if (!ret) { |
1642 | m = file->private_data; |
1643 | iter = m->private; |
1644 | iter->flags = FTRACE_ITER_FAILURES; |
1645 | } |
1646 | |
1647 | return ret; |
1648 | } |
1649 | |
1650 | |
1651 | static void ftrace_filter_reset(int enable) |
1652 | { |
1653 | struct ftrace_page *pg; |
1654 | struct dyn_ftrace *rec; |
1655 | unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE; |
1656 | |
1657 | mutex_lock(&ftrace_lock); |
1658 | if (enable) |
1659 | ftrace_filtered = 0; |
1660 | do_for_each_ftrace_rec(pg, rec) { |
1661 | if (rec->flags & FTRACE_FL_FAILED) |
1662 | continue; |
1663 | rec->flags &= ~type; |
1664 | } while_for_each_ftrace_rec(); |
1665 | mutex_unlock(&ftrace_lock); |
1666 | } |
1667 | |
1668 | static int |
1669 | ftrace_regex_open(struct inode *inode, struct file *file, int enable) |
1670 | { |
1671 | struct ftrace_iterator *iter; |
1672 | int ret = 0; |
1673 | |
1674 | if (unlikely(ftrace_disabled)) |
1675 | return -ENODEV; |
1676 | |
1677 | iter = kzalloc(sizeof(*iter), GFP_KERNEL); |
1678 | if (!iter) |
1679 | return -ENOMEM; |
1680 | |
1681 | if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) { |
1682 | kfree(iter); |
1683 | return -ENOMEM; |
1684 | } |
1685 | |
1686 | mutex_lock(&ftrace_regex_lock); |
1687 | if ((file->f_mode & FMODE_WRITE) && |
1688 | (file->f_flags & O_TRUNC)) |
1689 | ftrace_filter_reset(enable); |
1690 | |
1691 | if (file->f_mode & FMODE_READ) { |
1692 | iter->pg = ftrace_pages_start; |
1693 | iter->flags = enable ? FTRACE_ITER_FILTER : |
1694 | FTRACE_ITER_NOTRACE; |
1695 | |
1696 | ret = seq_open(file, &show_ftrace_seq_ops); |
1697 | if (!ret) { |
1698 | struct seq_file *m = file->private_data; |
1699 | m->private = iter; |
1700 | } else { |
1701 | trace_parser_put(&iter->parser); |
1702 | kfree(iter); |
1703 | } |
1704 | } else |
1705 | file->private_data = iter; |
1706 | mutex_unlock(&ftrace_regex_lock); |
1707 | |
1708 | return ret; |
1709 | } |
1710 | |
1711 | static int |
1712 | ftrace_filter_open(struct inode *inode, struct file *file) |
1713 | { |
1714 | return ftrace_regex_open(inode, file, 1); |
1715 | } |
1716 | |
1717 | static int |
1718 | ftrace_notrace_open(struct inode *inode, struct file *file) |
1719 | { |
1720 | return ftrace_regex_open(inode, file, 0); |
1721 | } |
1722 | |
1723 | static loff_t |
1724 | ftrace_regex_lseek(struct file *file, loff_t offset, int origin) |
1725 | { |
1726 | loff_t ret; |
1727 | |
1728 | if (file->f_mode & FMODE_READ) |
1729 | ret = seq_lseek(file, offset, origin); |
1730 | else |
1731 | file->f_pos = ret = 1; |
1732 | |
1733 | return ret; |
1734 | } |
1735 | |
1736 | static int ftrace_match(char *str, char *regex, int len, int type) |
1737 | { |
1738 | int matched = 0; |
1739 | int slen; |
1740 | |
1741 | switch (type) { |
1742 | case MATCH_FULL: |
1743 | if (strcmp(str, regex) == 0) |
1744 | matched = 1; |
1745 | break; |
1746 | case MATCH_FRONT_ONLY: |
1747 | if (strncmp(str, regex, len) == 0) |
1748 | matched = 1; |
1749 | break; |
1750 | case MATCH_MIDDLE_ONLY: |
1751 | if (strstr(str, regex)) |
1752 | matched = 1; |
1753 | break; |
1754 | case MATCH_END_ONLY: |
1755 | slen = strlen(str); |
1756 | if (slen >= len && memcmp(str + slen - len, regex, len) == 0) |
1757 | matched = 1; |
1758 | break; |
1759 | } |
1760 | |
1761 | return matched; |
1762 | } |
1763 | |
1764 | static int |
1765 | ftrace_match_record(struct dyn_ftrace *rec, char *regex, int len, int type) |
1766 | { |
1767 | char str[KSYM_SYMBOL_LEN]; |
1768 | |
1769 | kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); |
1770 | return ftrace_match(str, regex, len, type); |
1771 | } |
1772 | |
1773 | static int ftrace_match_records(char *buff, int len, int enable) |
1774 | { |
1775 | unsigned int search_len; |
1776 | struct ftrace_page *pg; |
1777 | struct dyn_ftrace *rec; |
1778 | unsigned long flag; |
1779 | char *search; |
1780 | int type; |
1781 | int not; |
1782 | int found = 0; |
1783 | |
1784 | flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE; |
1785 | type = filter_parse_regex(buff, len, &search, ¬); |
1786 | |
1787 | search_len = strlen(search); |
1788 | |
1789 | mutex_lock(&ftrace_lock); |
1790 | do_for_each_ftrace_rec(pg, rec) { |
1791 | |
1792 | if (rec->flags & FTRACE_FL_FAILED) |
1793 | continue; |
1794 | |
1795 | if (ftrace_match_record(rec, search, search_len, type)) { |
1796 | if (not) |
1797 | rec->flags &= ~flag; |
1798 | else |
1799 | rec->flags |= flag; |
1800 | found = 1; |
1801 | } |
1802 | /* |
1803 | * Only enable filtering if we have a function that |
1804 | * is filtered on. |
1805 | */ |
1806 | if (enable && (rec->flags & FTRACE_FL_FILTER)) |
1807 | ftrace_filtered = 1; |
1808 | } while_for_each_ftrace_rec(); |
1809 | mutex_unlock(&ftrace_lock); |
1810 | |
1811 | return found; |
1812 | } |
1813 | |
1814 | static int |
1815 | ftrace_match_module_record(struct dyn_ftrace *rec, char *mod, |
1816 | char *regex, int len, int type) |
1817 | { |
1818 | char str[KSYM_SYMBOL_LEN]; |
1819 | char *modname; |
1820 | |
1821 | kallsyms_lookup(rec->ip, NULL, NULL, &modname, str); |
1822 | |
1823 | if (!modname || strcmp(modname, mod)) |
1824 | return 0; |
1825 | |
1826 | /* blank search means to match all funcs in the mod */ |
1827 | if (len) |
1828 | return ftrace_match(str, regex, len, type); |
1829 | else |
1830 | return 1; |
1831 | } |
1832 | |
1833 | static int ftrace_match_module_records(char *buff, char *mod, int enable) |
1834 | { |
1835 | unsigned search_len = 0; |
1836 | struct ftrace_page *pg; |
1837 | struct dyn_ftrace *rec; |
1838 | int type = MATCH_FULL; |
1839 | char *search = buff; |
1840 | unsigned long flag; |
1841 | int not = 0; |
1842 | int found = 0; |
1843 | |
1844 | flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE; |
1845 | |
1846 | /* blank or '*' mean the same */ |
1847 | if (strcmp(buff, "*") == 0) |
1848 | buff[0] = 0; |
1849 | |
1850 | /* handle the case of 'dont filter this module' */ |
1851 | if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) { |
1852 | buff[0] = 0; |
1853 | not = 1; |
1854 | } |
1855 | |
1856 | if (strlen(buff)) { |
1857 | type = filter_parse_regex(buff, strlen(buff), &search, ¬); |
1858 | search_len = strlen(search); |
1859 | } |
1860 | |
1861 | mutex_lock(&ftrace_lock); |
1862 | do_for_each_ftrace_rec(pg, rec) { |
1863 | |
1864 | if (rec->flags & FTRACE_FL_FAILED) |
1865 | continue; |
1866 | |
1867 | if (ftrace_match_module_record(rec, mod, |
1868 | search, search_len, type)) { |
1869 | if (not) |
1870 | rec->flags &= ~flag; |
1871 | else |
1872 | rec->flags |= flag; |
1873 | found = 1; |
1874 | } |
1875 | if (enable && (rec->flags & FTRACE_FL_FILTER)) |
1876 | ftrace_filtered = 1; |
1877 | |
1878 | } while_for_each_ftrace_rec(); |
1879 | mutex_unlock(&ftrace_lock); |
1880 | |
1881 | return found; |
1882 | } |
1883 | |
1884 | /* |
1885 | * We register the module command as a template to show others how |
1886 | * to register the a command as well. |
1887 | */ |
1888 | |
1889 | static int |
1890 | ftrace_mod_callback(char *func, char *cmd, char *param, int enable) |
1891 | { |
1892 | char *mod; |
1893 | |
1894 | /* |
1895 | * cmd == 'mod' because we only registered this func |
1896 | * for the 'mod' ftrace_func_command. |
1897 | * But if you register one func with multiple commands, |
1898 | * you can tell which command was used by the cmd |
1899 | * parameter. |
1900 | */ |
1901 | |
1902 | /* we must have a module name */ |
1903 | if (!param) |
1904 | return -EINVAL; |
1905 | |
1906 | mod = strsep(¶m, ":"); |
1907 | if (!strlen(mod)) |
1908 | return -EINVAL; |
1909 | |
1910 | if (ftrace_match_module_records(func, mod, enable)) |
1911 | return 0; |
1912 | return -EINVAL; |
1913 | } |
1914 | |
1915 | static struct ftrace_func_command ftrace_mod_cmd = { |
1916 | .name = "mod", |
1917 | .func = ftrace_mod_callback, |
1918 | }; |
1919 | |
1920 | static int __init ftrace_mod_cmd_init(void) |
1921 | { |
1922 | return register_ftrace_command(&ftrace_mod_cmd); |
1923 | } |
1924 | device_initcall(ftrace_mod_cmd_init); |
1925 | |
1926 | static void |
1927 | function_trace_probe_call(unsigned long ip, unsigned long parent_ip) |
1928 | { |
1929 | struct ftrace_func_probe *entry; |
1930 | struct hlist_head *hhd; |
1931 | struct hlist_node *n; |
1932 | unsigned long key; |
1933 | |
1934 | key = hash_long(ip, FTRACE_HASH_BITS); |
1935 | |
1936 | hhd = &ftrace_func_hash[key]; |
1937 | |
1938 | if (hlist_empty(hhd)) |
1939 | return; |
1940 | |
1941 | /* |
1942 | * Disable preemption for these calls to prevent a RCU grace |
1943 | * period. This syncs the hash iteration and freeing of items |
1944 | * on the hash. rcu_read_lock is too dangerous here. |
1945 | */ |
1946 | preempt_disable_notrace(); |
1947 | hlist_for_each_entry_rcu(entry, n, hhd, node) { |
1948 | if (entry->ip == ip) |
1949 | entry->ops->func(ip, parent_ip, &entry->data); |
1950 | } |
1951 | preempt_enable_notrace(); |
1952 | } |
1953 | |
1954 | static struct ftrace_ops trace_probe_ops __read_mostly = |
1955 | { |
1956 | .func = function_trace_probe_call, |
1957 | }; |
1958 | |
1959 | static int ftrace_probe_registered; |
1960 | |
1961 | static void __enable_ftrace_function_probe(void) |
1962 | { |
1963 | int i; |
1964 | |
1965 | if (ftrace_probe_registered) |
1966 | return; |
1967 | |
1968 | for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) { |
1969 | struct hlist_head *hhd = &ftrace_func_hash[i]; |
1970 | if (hhd->first) |
1971 | break; |
1972 | } |
1973 | /* Nothing registered? */ |
1974 | if (i == FTRACE_FUNC_HASHSIZE) |
1975 | return; |
1976 | |
1977 | __register_ftrace_function(&trace_probe_ops); |
1978 | ftrace_startup(0); |
1979 | ftrace_probe_registered = 1; |
1980 | } |
1981 | |
1982 | static void __disable_ftrace_function_probe(void) |
1983 | { |
1984 | int i; |
1985 | |
1986 | if (!ftrace_probe_registered) |
1987 | return; |
1988 | |
1989 | for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) { |
1990 | struct hlist_head *hhd = &ftrace_func_hash[i]; |
1991 | if (hhd->first) |
1992 | return; |
1993 | } |
1994 | |
1995 | /* no more funcs left */ |
1996 | __unregister_ftrace_function(&trace_probe_ops); |
1997 | ftrace_shutdown(0); |
1998 | ftrace_probe_registered = 0; |
1999 | } |
2000 | |
2001 | |
2002 | static void ftrace_free_entry_rcu(struct rcu_head *rhp) |
2003 | { |
2004 | struct ftrace_func_probe *entry = |
2005 | container_of(rhp, struct ftrace_func_probe, rcu); |
2006 | |
2007 | if (entry->ops->free) |
2008 | entry->ops->free(&entry->data); |
2009 | kfree(entry); |
2010 | } |
2011 | |
2012 | |
2013 | int |
2014 | register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, |
2015 | void *data) |
2016 | { |
2017 | struct ftrace_func_probe *entry; |
2018 | struct ftrace_page *pg; |
2019 | struct dyn_ftrace *rec; |
2020 | int type, len, not; |
2021 | unsigned long key; |
2022 | int count = 0; |
2023 | char *search; |
2024 | |
2025 | type = filter_parse_regex(glob, strlen(glob), &search, ¬); |
2026 | len = strlen(search); |
2027 | |
2028 | /* we do not support '!' for function probes */ |
2029 | if (WARN_ON(not)) |
2030 | return -EINVAL; |
2031 | |
2032 | mutex_lock(&ftrace_lock); |
2033 | do_for_each_ftrace_rec(pg, rec) { |
2034 | |
2035 | if (rec->flags & FTRACE_FL_FAILED) |
2036 | continue; |
2037 | |
2038 | if (!ftrace_match_record(rec, search, len, type)) |
2039 | continue; |
2040 | |
2041 | entry = kmalloc(sizeof(*entry), GFP_KERNEL); |
2042 | if (!entry) { |
2043 | /* If we did not process any, then return error */ |
2044 | if (!count) |
2045 | count = -ENOMEM; |
2046 | goto out_unlock; |
2047 | } |
2048 | |
2049 | count++; |
2050 | |
2051 | entry->data = data; |
2052 | |
2053 | /* |
2054 | * The caller might want to do something special |
2055 | * for each function we find. We call the callback |
2056 | * to give the caller an opportunity to do so. |
2057 | */ |
2058 | if (ops->callback) { |
2059 | if (ops->callback(rec->ip, &entry->data) < 0) { |
2060 | /* caller does not like this func */ |
2061 | kfree(entry); |
2062 | continue; |
2063 | } |
2064 | } |
2065 | |
2066 | entry->ops = ops; |
2067 | entry->ip = rec->ip; |
2068 | |
2069 | key = hash_long(entry->ip, FTRACE_HASH_BITS); |
2070 | hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]); |
2071 | |
2072 | } while_for_each_ftrace_rec(); |
2073 | __enable_ftrace_function_probe(); |
2074 | |
2075 | out_unlock: |
2076 | mutex_unlock(&ftrace_lock); |
2077 | |
2078 | return count; |
2079 | } |
2080 | |
2081 | enum { |
2082 | PROBE_TEST_FUNC = 1, |
2083 | PROBE_TEST_DATA = 2 |
2084 | }; |
2085 | |
2086 | static void |
2087 | __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, |
2088 | void *data, int flags) |
2089 | { |
2090 | struct ftrace_func_probe *entry; |
2091 | struct hlist_node *n, *tmp; |
2092 | char str[KSYM_SYMBOL_LEN]; |
2093 | int type = MATCH_FULL; |
2094 | int i, len = 0; |
2095 | char *search; |
2096 | |
2097 | if (glob && (strcmp(glob, "*") == 0 || !strlen(glob))) |
2098 | glob = NULL; |
2099 | else if (glob) { |
2100 | int not; |
2101 | |
2102 | type = filter_parse_regex(glob, strlen(glob), &search, ¬); |
2103 | len = strlen(search); |
2104 | |
2105 | /* we do not support '!' for function probes */ |
2106 | if (WARN_ON(not)) |
2107 | return; |
2108 | } |
2109 | |
2110 | mutex_lock(&ftrace_lock); |
2111 | for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) { |
2112 | struct hlist_head *hhd = &ftrace_func_hash[i]; |
2113 | |
2114 | hlist_for_each_entry_safe(entry, n, tmp, hhd, node) { |
2115 | |
2116 | /* break up if statements for readability */ |
2117 | if ((flags & PROBE_TEST_FUNC) && entry->ops != ops) |
2118 | continue; |
2119 | |
2120 | if ((flags & PROBE_TEST_DATA) && entry->data != data) |
2121 | continue; |
2122 | |
2123 | /* do this last, since it is the most expensive */ |
2124 | if (glob) { |
2125 | kallsyms_lookup(entry->ip, NULL, NULL, |
2126 | NULL, str); |
2127 | if (!ftrace_match(str, glob, len, type)) |
2128 | continue; |
2129 | } |
2130 | |
2131 | hlist_del(&entry->node); |
2132 | call_rcu(&entry->rcu, ftrace_free_entry_rcu); |
2133 | } |
2134 | } |
2135 | __disable_ftrace_function_probe(); |
2136 | mutex_unlock(&ftrace_lock); |
2137 | } |
2138 | |
2139 | void |
2140 | unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, |
2141 | void *data) |
2142 | { |
2143 | __unregister_ftrace_function_probe(glob, ops, data, |
2144 | PROBE_TEST_FUNC | PROBE_TEST_DATA); |
2145 | } |
2146 | |
2147 | void |
2148 | unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops) |
2149 | { |
2150 | __unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC); |
2151 | } |
2152 | |
2153 | void unregister_ftrace_function_probe_all(char *glob) |
2154 | { |
2155 | __unregister_ftrace_function_probe(glob, NULL, NULL, 0); |
2156 | } |
2157 | |
2158 | static LIST_HEAD(ftrace_commands); |
2159 | static DEFINE_MUTEX(ftrace_cmd_mutex); |
2160 | |
2161 | int register_ftrace_command(struct ftrace_func_command *cmd) |
2162 | { |
2163 | struct ftrace_func_command *p; |
2164 | int ret = 0; |
2165 | |
2166 | mutex_lock(&ftrace_cmd_mutex); |
2167 | list_for_each_entry(p, &ftrace_commands, list) { |
2168 | if (strcmp(cmd->name, p->name) == 0) { |
2169 | ret = -EBUSY; |
2170 | goto out_unlock; |
2171 | } |
2172 | } |
2173 | list_add(&cmd->list, &ftrace_commands); |
2174 | out_unlock: |
2175 | mutex_unlock(&ftrace_cmd_mutex); |
2176 | |
2177 | return ret; |
2178 | } |
2179 | |
2180 | int unregister_ftrace_command(struct ftrace_func_command *cmd) |
2181 | { |
2182 | struct ftrace_func_command *p, *n; |
2183 | int ret = -ENODEV; |
2184 | |
2185 | mutex_lock(&ftrace_cmd_mutex); |
2186 | list_for_each_entry_safe(p, n, &ftrace_commands, list) { |
2187 | if (strcmp(cmd->name, p->name) == 0) { |
2188 | ret = 0; |
2189 | list_del_init(&p->list); |
2190 | goto out_unlock; |
2191 | } |
2192 | } |
2193 | out_unlock: |
2194 | mutex_unlock(&ftrace_cmd_mutex); |
2195 | |
2196 | return ret; |
2197 | } |
2198 | |
2199 | static int ftrace_process_regex(char *buff, int len, int enable) |
2200 | { |
2201 | char *func, *command, *next = buff; |
2202 | struct ftrace_func_command *p; |
2203 | int ret = -EINVAL; |
2204 | |
2205 | func = strsep(&next, ":"); |
2206 | |
2207 | if (!next) { |
2208 | if (ftrace_match_records(func, len, enable)) |
2209 | return 0; |
2210 | return ret; |
2211 | } |
2212 | |
2213 | /* command found */ |
2214 | |
2215 | command = strsep(&next, ":"); |
2216 | |
2217 | mutex_lock(&ftrace_cmd_mutex); |
2218 | list_for_each_entry(p, &ftrace_commands, list) { |
2219 | if (strcmp(p->name, command) == 0) { |
2220 | ret = p->func(func, command, next, enable); |
2221 | goto out_unlock; |
2222 | } |
2223 | } |
2224 | out_unlock: |
2225 | mutex_unlock(&ftrace_cmd_mutex); |
2226 | |
2227 | return ret; |
2228 | } |
2229 | |
2230 | static ssize_t |
2231 | ftrace_regex_write(struct file *file, const char __user *ubuf, |
2232 | size_t cnt, loff_t *ppos, int enable) |
2233 | { |
2234 | struct ftrace_iterator *iter; |
2235 | struct trace_parser *parser; |
2236 | ssize_t ret, read; |
2237 | |
2238 | if (!cnt) |
2239 | return 0; |
2240 | |
2241 | mutex_lock(&ftrace_regex_lock); |
2242 | |
2243 | if (file->f_mode & FMODE_READ) { |
2244 | struct seq_file *m = file->private_data; |
2245 | iter = m->private; |
2246 | } else |
2247 | iter = file->private_data; |
2248 | |
2249 | parser = &iter->parser; |
2250 | read = trace_get_user(parser, ubuf, cnt, ppos); |
2251 | |
2252 | if (read >= 0 && trace_parser_loaded(parser) && |
2253 | !trace_parser_cont(parser)) { |
2254 | ret = ftrace_process_regex(parser->buffer, |
2255 | parser->idx, enable); |
2256 | trace_parser_clear(parser); |
2257 | if (ret) |
2258 | goto out_unlock; |
2259 | } |
2260 | |
2261 | ret = read; |
2262 | out_unlock: |
2263 | mutex_unlock(&ftrace_regex_lock); |
2264 | |
2265 | return ret; |
2266 | } |
2267 | |
2268 | static ssize_t |
2269 | ftrace_filter_write(struct file *file, const char __user *ubuf, |
2270 | size_t cnt, loff_t *ppos) |
2271 | { |
2272 | return ftrace_regex_write(file, ubuf, cnt, ppos, 1); |
2273 | } |
2274 | |
2275 | static ssize_t |
2276 | ftrace_notrace_write(struct file *file, const char __user *ubuf, |
2277 | size_t cnt, loff_t *ppos) |
2278 | { |
2279 | return ftrace_regex_write(file, ubuf, cnt, ppos, 0); |
2280 | } |
2281 | |
2282 | static void |
2283 | ftrace_set_regex(unsigned char *buf, int len, int reset, int enable) |
2284 | { |
2285 | if (unlikely(ftrace_disabled)) |
2286 | return; |
2287 | |
2288 | mutex_lock(&ftrace_regex_lock); |
2289 | if (reset) |
2290 | ftrace_filter_reset(enable); |
2291 | if (buf) |
2292 | ftrace_match_records(buf, len, enable); |
2293 | mutex_unlock(&ftrace_regex_lock); |
2294 | } |
2295 | |
2296 | /** |
2297 | * ftrace_set_filter - set a function to filter on in ftrace |
2298 | * @buf - the string that holds the function filter text. |
2299 | * @len - the length of the string. |
2300 | * @reset - non zero to reset all filters before applying this filter. |
2301 | * |
2302 | * Filters denote which functions should be enabled when tracing is enabled. |
2303 | * If @buf is NULL and reset is set, all functions will be enabled for tracing. |
2304 | */ |
2305 | void ftrace_set_filter(unsigned char *buf, int len, int reset) |
2306 | { |
2307 | ftrace_set_regex(buf, len, reset, 1); |
2308 | } |
2309 | |
2310 | /** |
2311 | * ftrace_set_notrace - set a function to not trace in ftrace |
2312 | * @buf - the string that holds the function notrace text. |
2313 | * @len - the length of the string. |
2314 | * @reset - non zero to reset all filters before applying this filter. |
2315 | * |
2316 | * Notrace Filters denote which functions should not be enabled when tracing |
2317 | * is enabled. If @buf is NULL and reset is set, all functions will be enabled |
2318 | * for tracing. |
2319 | */ |
2320 | void ftrace_set_notrace(unsigned char *buf, int len, int reset) |
2321 | { |
2322 | ftrace_set_regex(buf, len, reset, 0); |
2323 | } |
2324 | |
2325 | /* |
2326 | * command line interface to allow users to set filters on boot up. |
2327 | */ |
2328 | #define FTRACE_FILTER_SIZE COMMAND_LINE_SIZE |
2329 | static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata; |
2330 | static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata; |
2331 | |
2332 | static int __init set_ftrace_notrace(char *str) |
2333 | { |
2334 | strncpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE); |
2335 | return 1; |
2336 | } |
2337 | __setup("ftrace_notrace=", set_ftrace_notrace); |
2338 | |
2339 | static int __init set_ftrace_filter(char *str) |
2340 | { |
2341 | strncpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE); |
2342 | return 1; |
2343 | } |
2344 | __setup("ftrace_filter=", set_ftrace_filter); |
2345 | |
2346 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
2347 | static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata; |
2348 | static int ftrace_set_func(unsigned long *array, int *idx, char *buffer); |
2349 | |
2350 | static int __init set_graph_function(char *str) |
2351 | { |
2352 | strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE); |
2353 | return 1; |
2354 | } |
2355 | __setup("ftrace_graph_filter=", set_graph_function); |
2356 | |
2357 | static void __init set_ftrace_early_graph(char *buf) |
2358 | { |
2359 | int ret; |
2360 | char *func; |
2361 | |
2362 | while (buf) { |
2363 | func = strsep(&buf, ","); |
2364 | /* we allow only one expression at a time */ |
2365 | ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count, |
2366 | func); |
2367 | if (ret) |
2368 | printk(KERN_DEBUG "ftrace: function %s not " |
2369 | "traceable\n", func); |
2370 | } |
2371 | } |
2372 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
2373 | |
2374 | static void __init set_ftrace_early_filter(char *buf, int enable) |
2375 | { |
2376 | char *func; |
2377 | |
2378 | while (buf) { |
2379 | func = strsep(&buf, ","); |
2380 | ftrace_set_regex(func, strlen(func), 0, enable); |
2381 | } |
2382 | } |
2383 | |
2384 | static void __init set_ftrace_early_filters(void) |
2385 | { |
2386 | if (ftrace_filter_buf[0]) |
2387 | set_ftrace_early_filter(ftrace_filter_buf, 1); |
2388 | if (ftrace_notrace_buf[0]) |
2389 | set_ftrace_early_filter(ftrace_notrace_buf, 0); |
2390 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
2391 | if (ftrace_graph_buf[0]) |
2392 | set_ftrace_early_graph(ftrace_graph_buf); |
2393 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
2394 | } |
2395 | |
2396 | static int |
2397 | ftrace_regex_release(struct inode *inode, struct file *file, int enable) |
2398 | { |
2399 | struct seq_file *m = (struct seq_file *)file->private_data; |
2400 | struct ftrace_iterator *iter; |
2401 | struct trace_parser *parser; |
2402 | |
2403 | mutex_lock(&ftrace_regex_lock); |
2404 | if (file->f_mode & FMODE_READ) { |
2405 | iter = m->private; |
2406 | |
2407 | seq_release(inode, file); |
2408 | } else |
2409 | iter = file->private_data; |
2410 | |
2411 | parser = &iter->parser; |
2412 | if (trace_parser_loaded(parser)) { |
2413 | parser->buffer[parser->idx] = 0; |
2414 | ftrace_match_records(parser->buffer, parser->idx, enable); |
2415 | } |
2416 | |
2417 | mutex_lock(&ftrace_lock); |
2418 | if (ftrace_start_up && ftrace_enabled) |
2419 | ftrace_run_update_code(FTRACE_ENABLE_CALLS); |
2420 | mutex_unlock(&ftrace_lock); |
2421 | |
2422 | trace_parser_put(parser); |
2423 | kfree(iter); |
2424 | |
2425 | mutex_unlock(&ftrace_regex_lock); |
2426 | return 0; |
2427 | } |
2428 | |
2429 | static int |
2430 | ftrace_filter_release(struct inode *inode, struct file *file) |
2431 | { |
2432 | return ftrace_regex_release(inode, file, 1); |
2433 | } |
2434 | |
2435 | static int |
2436 | ftrace_notrace_release(struct inode *inode, struct file *file) |
2437 | { |
2438 | return ftrace_regex_release(inode, file, 0); |
2439 | } |
2440 | |
2441 | static const struct file_operations ftrace_avail_fops = { |
2442 | .open = ftrace_avail_open, |
2443 | .read = seq_read, |
2444 | .llseek = seq_lseek, |
2445 | .release = seq_release_private, |
2446 | }; |
2447 | |
2448 | static const struct file_operations ftrace_failures_fops = { |
2449 | .open = ftrace_failures_open, |
2450 | .read = seq_read, |
2451 | .llseek = seq_lseek, |
2452 | .release = seq_release_private, |
2453 | }; |
2454 | |
2455 | static const struct file_operations ftrace_filter_fops = { |
2456 | .open = ftrace_filter_open, |
2457 | .read = seq_read, |
2458 | .write = ftrace_filter_write, |
2459 | .llseek = ftrace_regex_lseek, |
2460 | .release = ftrace_filter_release, |
2461 | }; |
2462 | |
2463 | static const struct file_operations ftrace_notrace_fops = { |
2464 | .open = ftrace_notrace_open, |
2465 | .read = seq_read, |
2466 | .write = ftrace_notrace_write, |
2467 | .llseek = ftrace_regex_lseek, |
2468 | .release = ftrace_notrace_release, |
2469 | }; |
2470 | |
2471 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
2472 | |
2473 | static DEFINE_MUTEX(graph_lock); |
2474 | |
2475 | int ftrace_graph_count; |
2476 | int ftrace_graph_filter_enabled; |
2477 | unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly; |
2478 | |
2479 | static void * |
2480 | __g_next(struct seq_file *m, loff_t *pos) |
2481 | { |
2482 | if (*pos >= ftrace_graph_count) |
2483 | return NULL; |
2484 | return &ftrace_graph_funcs[*pos]; |
2485 | } |
2486 | |
2487 | static void * |
2488 | g_next(struct seq_file *m, void *v, loff_t *pos) |
2489 | { |
2490 | (*pos)++; |
2491 | return __g_next(m, pos); |
2492 | } |
2493 | |
2494 | static void *g_start(struct seq_file *m, loff_t *pos) |
2495 | { |
2496 | mutex_lock(&graph_lock); |
2497 | |
2498 | /* Nothing, tell g_show to print all functions are enabled */ |
2499 | if (!ftrace_graph_filter_enabled && !*pos) |
2500 | return (void *)1; |
2501 | |
2502 | return __g_next(m, pos); |
2503 | } |
2504 | |
2505 | static void g_stop(struct seq_file *m, void *p) |
2506 | { |
2507 | mutex_unlock(&graph_lock); |
2508 | } |
2509 | |
2510 | static int g_show(struct seq_file *m, void *v) |
2511 | { |
2512 | unsigned long *ptr = v; |
2513 | |
2514 | if (!ptr) |
2515 | return 0; |
2516 | |
2517 | if (ptr == (unsigned long *)1) { |
2518 | seq_printf(m, "#### all functions enabled ####\n"); |
2519 | return 0; |
2520 | } |
2521 | |
2522 | seq_printf(m, "%ps\n", (void *)*ptr); |
2523 | |
2524 | return 0; |
2525 | } |
2526 | |
2527 | static const struct seq_operations ftrace_graph_seq_ops = { |
2528 | .start = g_start, |
2529 | .next = g_next, |
2530 | .stop = g_stop, |
2531 | .show = g_show, |
2532 | }; |
2533 | |
2534 | static int |
2535 | ftrace_graph_open(struct inode *inode, struct file *file) |
2536 | { |
2537 | int ret = 0; |
2538 | |
2539 | if (unlikely(ftrace_disabled)) |
2540 | return -ENODEV; |
2541 | |
2542 | mutex_lock(&graph_lock); |
2543 | if ((file->f_mode & FMODE_WRITE) && |
2544 | (file->f_flags & O_TRUNC)) { |
2545 | ftrace_graph_filter_enabled = 0; |
2546 | ftrace_graph_count = 0; |
2547 | memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs)); |
2548 | } |
2549 | mutex_unlock(&graph_lock); |
2550 | |
2551 | if (file->f_mode & FMODE_READ) |
2552 | ret = seq_open(file, &ftrace_graph_seq_ops); |
2553 | |
2554 | return ret; |
2555 | } |
2556 | |
2557 | static int |
2558 | ftrace_graph_release(struct inode *inode, struct file *file) |
2559 | { |
2560 | if (file->f_mode & FMODE_READ) |
2561 | seq_release(inode, file); |
2562 | return 0; |
2563 | } |
2564 | |
2565 | static int |
2566 | ftrace_set_func(unsigned long *array, int *idx, char *buffer) |
2567 | { |
2568 | struct dyn_ftrace *rec; |
2569 | struct ftrace_page *pg; |
2570 | int search_len; |
2571 | int fail = 1; |
2572 | int type, not; |
2573 | char *search; |
2574 | bool exists; |
2575 | int i; |
2576 | |
2577 | if (ftrace_disabled) |
2578 | return -ENODEV; |
2579 | |
2580 | /* decode regex */ |
2581 | type = filter_parse_regex(buffer, strlen(buffer), &search, ¬); |
2582 | if (!not && *idx >= FTRACE_GRAPH_MAX_FUNCS) |
2583 | return -EBUSY; |
2584 | |
2585 | search_len = strlen(search); |
2586 | |
2587 | mutex_lock(&ftrace_lock); |
2588 | do_for_each_ftrace_rec(pg, rec) { |
2589 | |
2590 | if (rec->flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE)) |
2591 | continue; |
2592 | |
2593 | if (ftrace_match_record(rec, search, search_len, type)) { |
2594 | /* if it is in the array */ |
2595 | exists = false; |
2596 | for (i = 0; i < *idx; i++) { |
2597 | if (array[i] == rec->ip) { |
2598 | exists = true; |
2599 | break; |
2600 | } |
2601 | } |
2602 | |
2603 | if (!not) { |
2604 | fail = 0; |
2605 | if (!exists) { |
2606 | array[(*idx)++] = rec->ip; |
2607 | if (*idx >= FTRACE_GRAPH_MAX_FUNCS) |
2608 | goto out; |
2609 | } |
2610 | } else { |
2611 | if (exists) { |
2612 | array[i] = array[--(*idx)]; |
2613 | array[*idx] = 0; |
2614 | fail = 0; |
2615 | } |
2616 | } |
2617 | } |
2618 | } while_for_each_ftrace_rec(); |
2619 | out: |
2620 | mutex_unlock(&ftrace_lock); |
2621 | |
2622 | if (fail) |
2623 | return -EINVAL; |
2624 | |
2625 | ftrace_graph_filter_enabled = 1; |
2626 | return 0; |
2627 | } |
2628 | |
2629 | static ssize_t |
2630 | ftrace_graph_write(struct file *file, const char __user *ubuf, |
2631 | size_t cnt, loff_t *ppos) |
2632 | { |
2633 | struct trace_parser parser; |
2634 | ssize_t read, ret; |
2635 | |
2636 | if (!cnt) |
2637 | return 0; |
2638 | |
2639 | mutex_lock(&graph_lock); |
2640 | |
2641 | if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX)) { |
2642 | ret = -ENOMEM; |
2643 | goto out_unlock; |
2644 | } |
2645 | |
2646 | read = trace_get_user(&parser, ubuf, cnt, ppos); |
2647 | |
2648 | if (read >= 0 && trace_parser_loaded((&parser))) { |
2649 | parser.buffer[parser.idx] = 0; |
2650 | |
2651 | /* we allow only one expression at a time */ |
2652 | ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count, |
2653 | parser.buffer); |
2654 | if (ret) |
2655 | goto out_free; |
2656 | } |
2657 | |
2658 | ret = read; |
2659 | |
2660 | out_free: |
2661 | trace_parser_put(&parser); |
2662 | out_unlock: |
2663 | mutex_unlock(&graph_lock); |
2664 | |
2665 | return ret; |
2666 | } |
2667 | |
2668 | static const struct file_operations ftrace_graph_fops = { |
2669 | .open = ftrace_graph_open, |
2670 | .read = seq_read, |
2671 | .write = ftrace_graph_write, |
2672 | .release = ftrace_graph_release, |
2673 | .llseek = seq_lseek, |
2674 | }; |
2675 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
2676 | |
2677 | static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer) |
2678 | { |
2679 | |
2680 | trace_create_file("available_filter_functions", 0444, |
2681 | d_tracer, NULL, &ftrace_avail_fops); |
2682 | |
2683 | trace_create_file("failures", 0444, |
2684 | d_tracer, NULL, &ftrace_failures_fops); |
2685 | |
2686 | trace_create_file("set_ftrace_filter", 0644, d_tracer, |
2687 | NULL, &ftrace_filter_fops); |
2688 | |
2689 | trace_create_file("set_ftrace_notrace", 0644, d_tracer, |
2690 | NULL, &ftrace_notrace_fops); |
2691 | |
2692 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
2693 | trace_create_file("set_graph_function", 0444, d_tracer, |
2694 | NULL, |
2695 | &ftrace_graph_fops); |
2696 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
2697 | |
2698 | return 0; |
2699 | } |
2700 | |
2701 | static int ftrace_process_locs(struct module *mod, |
2702 | unsigned long *start, |
2703 | unsigned long *end) |
2704 | { |
2705 | unsigned long *p; |
2706 | unsigned long addr; |
2707 | unsigned long flags; |
2708 | |
2709 | mutex_lock(&ftrace_lock); |
2710 | p = start; |
2711 | while (p < end) { |
2712 | addr = ftrace_call_adjust(*p++); |
2713 | /* |
2714 | * Some architecture linkers will pad between |
2715 | * the different mcount_loc sections of different |
2716 | * object files to satisfy alignments. |
2717 | * Skip any NULL pointers. |
2718 | */ |
2719 | if (!addr) |
2720 | continue; |
2721 | ftrace_record_ip(addr); |
2722 | } |
2723 | |
2724 | /* disable interrupts to prevent kstop machine */ |
2725 | local_irq_save(flags); |
2726 | ftrace_update_code(mod); |
2727 | local_irq_restore(flags); |
2728 | mutex_unlock(&ftrace_lock); |
2729 | |
2730 | return 0; |
2731 | } |
2732 | |
2733 | #ifdef CONFIG_MODULES |
2734 | void ftrace_release_mod(struct module *mod) |
2735 | { |
2736 | struct dyn_ftrace *rec; |
2737 | struct ftrace_page *pg; |
2738 | |
2739 | if (ftrace_disabled) |
2740 | return; |
2741 | |
2742 | mutex_lock(&ftrace_lock); |
2743 | do_for_each_ftrace_rec(pg, rec) { |
2744 | if (within_module_core(rec->ip, mod)) { |
2745 | /* |
2746 | * rec->ip is changed in ftrace_free_rec() |
2747 | * It should not between s and e if record was freed. |
2748 | */ |
2749 | FTRACE_WARN_ON(rec->flags & FTRACE_FL_FREE); |
2750 | ftrace_free_rec(rec); |
2751 | } |
2752 | } while_for_each_ftrace_rec(); |
2753 | mutex_unlock(&ftrace_lock); |
2754 | } |
2755 | |
2756 | static void ftrace_init_module(struct module *mod, |
2757 | unsigned long *start, unsigned long *end) |
2758 | { |
2759 | if (ftrace_disabled || start == end) |
2760 | return; |
2761 | ftrace_process_locs(mod, start, end); |
2762 | } |
2763 | |
2764 | static int ftrace_module_notify(struct notifier_block *self, |
2765 | unsigned long val, void *data) |
2766 | { |
2767 | struct module *mod = data; |
2768 | |
2769 | switch (val) { |
2770 | case MODULE_STATE_COMING: |
2771 | ftrace_init_module(mod, mod->ftrace_callsites, |
2772 | mod->ftrace_callsites + |
2773 | mod->num_ftrace_callsites); |
2774 | break; |
2775 | case MODULE_STATE_GOING: |
2776 | ftrace_release_mod(mod); |
2777 | break; |
2778 | } |
2779 | |
2780 | return 0; |
2781 | } |
2782 | #else |
2783 | static int ftrace_module_notify(struct notifier_block *self, |
2784 | unsigned long val, void *data) |
2785 | { |
2786 | return 0; |
2787 | } |
2788 | #endif /* CONFIG_MODULES */ |
2789 | |
2790 | struct notifier_block ftrace_module_nb = { |
2791 | .notifier_call = ftrace_module_notify, |
2792 | .priority = 0, |
2793 | }; |
2794 | |
2795 | extern unsigned long __start_mcount_loc[]; |
2796 | extern unsigned long __stop_mcount_loc[]; |
2797 | |
2798 | void __init ftrace_init(void) |
2799 | { |
2800 | unsigned long count, addr, flags; |
2801 | int ret; |
2802 | |
2803 | /* Keep the ftrace pointer to the stub */ |
2804 | addr = (unsigned long)ftrace_stub; |
2805 | |
2806 | local_irq_save(flags); |
2807 | ftrace_dyn_arch_init(&addr); |
2808 | local_irq_restore(flags); |
2809 | |
2810 | /* ftrace_dyn_arch_init places the return code in addr */ |
2811 | if (addr) |
2812 | goto failed; |
2813 | |
2814 | count = __stop_mcount_loc - __start_mcount_loc; |
2815 | |
2816 | ret = ftrace_dyn_table_alloc(count); |
2817 | if (ret) |
2818 | goto failed; |
2819 | |
2820 | last_ftrace_enabled = ftrace_enabled = 1; |
2821 | |
2822 | ret = ftrace_process_locs(NULL, |
2823 | __start_mcount_loc, |
2824 | __stop_mcount_loc); |
2825 | |
2826 | ret = register_module_notifier(&ftrace_module_nb); |
2827 | if (ret) |
2828 | pr_warning("Failed to register trace ftrace module notifier\n"); |
2829 | |
2830 | set_ftrace_early_filters(); |
2831 | |
2832 | return; |
2833 | failed: |
2834 | ftrace_disabled = 1; |
2835 | } |
2836 | |
2837 | #else |
2838 | |
2839 | static int __init ftrace_nodyn_init(void) |
2840 | { |
2841 | ftrace_enabled = 1; |
2842 | return 0; |
2843 | } |
2844 | device_initcall(ftrace_nodyn_init); |
2845 | |
2846 | static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; } |
2847 | static inline void ftrace_startup_enable(int command) { } |
2848 | /* Keep as macros so we do not need to define the commands */ |
2849 | # define ftrace_startup(command) do { } while (0) |
2850 | # define ftrace_shutdown(command) do { } while (0) |
2851 | # define ftrace_startup_sysctl() do { } while (0) |
2852 | # define ftrace_shutdown_sysctl() do { } while (0) |
2853 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
2854 | |
2855 | static void clear_ftrace_swapper(void) |
2856 | { |
2857 | struct task_struct *p; |
2858 | int cpu; |
2859 | |
2860 | get_online_cpus(); |
2861 | for_each_online_cpu(cpu) { |
2862 | p = idle_task(cpu); |
2863 | clear_tsk_trace_trace(p); |
2864 | } |
2865 | put_online_cpus(); |
2866 | } |
2867 | |
2868 | static void set_ftrace_swapper(void) |
2869 | { |
2870 | struct task_struct *p; |
2871 | int cpu; |
2872 | |
2873 | get_online_cpus(); |
2874 | for_each_online_cpu(cpu) { |
2875 | p = idle_task(cpu); |
2876 | set_tsk_trace_trace(p); |
2877 | } |
2878 | put_online_cpus(); |
2879 | } |
2880 | |
2881 | static void clear_ftrace_pid(struct pid *pid) |
2882 | { |
2883 | struct task_struct *p; |
2884 | |
2885 | rcu_read_lock(); |
2886 | do_each_pid_task(pid, PIDTYPE_PID, p) { |
2887 | clear_tsk_trace_trace(p); |
2888 | } while_each_pid_task(pid, PIDTYPE_PID, p); |
2889 | rcu_read_unlock(); |
2890 | |
2891 | put_pid(pid); |
2892 | } |
2893 | |
2894 | static void set_ftrace_pid(struct pid *pid) |
2895 | { |
2896 | struct task_struct *p; |
2897 | |
2898 | rcu_read_lock(); |
2899 | do_each_pid_task(pid, PIDTYPE_PID, p) { |
2900 | set_tsk_trace_trace(p); |
2901 | } while_each_pid_task(pid, PIDTYPE_PID, p); |
2902 | rcu_read_unlock(); |
2903 | } |
2904 | |
2905 | static void clear_ftrace_pid_task(struct pid *pid) |
2906 | { |
2907 | if (pid == ftrace_swapper_pid) |
2908 | clear_ftrace_swapper(); |
2909 | else |
2910 | clear_ftrace_pid(pid); |
2911 | } |
2912 | |
2913 | static void set_ftrace_pid_task(struct pid *pid) |
2914 | { |
2915 | if (pid == ftrace_swapper_pid) |
2916 | set_ftrace_swapper(); |
2917 | else |
2918 | set_ftrace_pid(pid); |
2919 | } |
2920 | |
2921 | static int ftrace_pid_add(int p) |
2922 | { |
2923 | struct pid *pid; |
2924 | struct ftrace_pid *fpid; |
2925 | int ret = -EINVAL; |
2926 | |
2927 | mutex_lock(&ftrace_lock); |
2928 | |
2929 | if (!p) |
2930 | pid = ftrace_swapper_pid; |
2931 | else |
2932 | pid = find_get_pid(p); |
2933 | |
2934 | if (!pid) |
2935 | goto out; |
2936 | |
2937 | ret = 0; |
2938 | |
2939 | list_for_each_entry(fpid, &ftrace_pids, list) |
2940 | if (fpid->pid == pid) |
2941 | goto out_put; |
2942 | |
2943 | ret = -ENOMEM; |
2944 | |
2945 | fpid = kmalloc(sizeof(*fpid), GFP_KERNEL); |
2946 | if (!fpid) |
2947 | goto out_put; |
2948 | |
2949 | list_add(&fpid->list, &ftrace_pids); |
2950 | fpid->pid = pid; |
2951 | |
2952 | set_ftrace_pid_task(pid); |
2953 | |
2954 | ftrace_update_pid_func(); |
2955 | ftrace_startup_enable(0); |
2956 | |
2957 | mutex_unlock(&ftrace_lock); |
2958 | return 0; |
2959 | |
2960 | out_put: |
2961 | if (pid != ftrace_swapper_pid) |
2962 | put_pid(pid); |
2963 | |
2964 | out: |
2965 | mutex_unlock(&ftrace_lock); |
2966 | return ret; |
2967 | } |
2968 | |
2969 | static void ftrace_pid_reset(void) |
2970 | { |
2971 | struct ftrace_pid *fpid, *safe; |
2972 | |
2973 | mutex_lock(&ftrace_lock); |
2974 | list_for_each_entry_safe(fpid, safe, &ftrace_pids, list) { |
2975 | struct pid *pid = fpid->pid; |
2976 | |
2977 | clear_ftrace_pid_task(pid); |
2978 | |
2979 | list_del(&fpid->list); |
2980 | kfree(fpid); |
2981 | } |
2982 | |
2983 | ftrace_update_pid_func(); |
2984 | ftrace_startup_enable(0); |
2985 | |
2986 | mutex_unlock(&ftrace_lock); |
2987 | } |
2988 | |
2989 | static void *fpid_start(struct seq_file *m, loff_t *pos) |
2990 | { |
2991 | mutex_lock(&ftrace_lock); |
2992 | |
2993 | if (list_empty(&ftrace_pids) && (!*pos)) |
2994 | return (void *) 1; |
2995 | |
2996 | return seq_list_start(&ftrace_pids, *pos); |
2997 | } |
2998 | |
2999 | static void *fpid_next(struct seq_file *m, void *v, loff_t *pos) |
3000 | { |
3001 | if (v == (void *)1) |
3002 | return NULL; |
3003 | |
3004 | return seq_list_next(v, &ftrace_pids, pos); |
3005 | } |
3006 | |
3007 | static void fpid_stop(struct seq_file *m, void *p) |
3008 | { |
3009 | mutex_unlock(&ftrace_lock); |
3010 | } |
3011 | |
3012 | static int fpid_show(struct seq_file *m, void *v) |
3013 | { |
3014 | const struct ftrace_pid *fpid = list_entry(v, struct ftrace_pid, list); |
3015 | |
3016 | if (v == (void *)1) { |
3017 | seq_printf(m, "no pid\n"); |
3018 | return 0; |
3019 | } |
3020 | |
3021 | if (fpid->pid == ftrace_swapper_pid) |
3022 | seq_printf(m, "swapper tasks\n"); |
3023 | else |
3024 | seq_printf(m, "%u\n", pid_vnr(fpid->pid)); |
3025 | |
3026 | return 0; |
3027 | } |
3028 | |
3029 | static const struct seq_operations ftrace_pid_sops = { |
3030 | .start = fpid_start, |
3031 | .next = fpid_next, |
3032 | .stop = fpid_stop, |
3033 | .show = fpid_show, |
3034 | }; |
3035 | |
3036 | static int |
3037 | ftrace_pid_open(struct inode *inode, struct file *file) |
3038 | { |
3039 | int ret = 0; |
3040 | |
3041 | if ((file->f_mode & FMODE_WRITE) && |
3042 | (file->f_flags & O_TRUNC)) |
3043 | ftrace_pid_reset(); |
3044 | |
3045 | if (file->f_mode & FMODE_READ) |
3046 | ret = seq_open(file, &ftrace_pid_sops); |
3047 | |
3048 | return ret; |
3049 | } |
3050 | |
3051 | static ssize_t |
3052 | ftrace_pid_write(struct file *filp, const char __user *ubuf, |
3053 | size_t cnt, loff_t *ppos) |
3054 | { |
3055 | char buf[64], *tmp; |
3056 | long val; |
3057 | int ret; |
3058 | |
3059 | if (cnt >= sizeof(buf)) |
3060 | return -EINVAL; |
3061 | |
3062 | if (copy_from_user(&buf, ubuf, cnt)) |
3063 | return -EFAULT; |
3064 | |
3065 | buf[cnt] = 0; |
3066 | |
3067 | /* |
3068 | * Allow "echo > set_ftrace_pid" or "echo -n '' > set_ftrace_pid" |
3069 | * to clean the filter quietly. |
3070 | */ |
3071 | tmp = strstrip(buf); |
3072 | if (strlen(tmp) == 0) |
3073 | return 1; |
3074 | |
3075 | ret = strict_strtol(tmp, 10, &val); |
3076 | if (ret < 0) |
3077 | return ret; |
3078 | |
3079 | ret = ftrace_pid_add(val); |
3080 | |
3081 | return ret ? ret : cnt; |
3082 | } |
3083 | |
3084 | static int |
3085 | ftrace_pid_release(struct inode *inode, struct file *file) |
3086 | { |
3087 | if (file->f_mode & FMODE_READ) |
3088 | seq_release(inode, file); |
3089 | |
3090 | return 0; |
3091 | } |
3092 | |
3093 | static const struct file_operations ftrace_pid_fops = { |
3094 | .open = ftrace_pid_open, |
3095 | .write = ftrace_pid_write, |
3096 | .read = seq_read, |
3097 | .llseek = seq_lseek, |
3098 | .release = ftrace_pid_release, |
3099 | }; |
3100 | |
3101 | static __init int ftrace_init_debugfs(void) |
3102 | { |
3103 | struct dentry *d_tracer; |
3104 | |
3105 | d_tracer = tracing_init_dentry(); |
3106 | if (!d_tracer) |
3107 | return 0; |
3108 | |
3109 | ftrace_init_dyn_debugfs(d_tracer); |
3110 | |
3111 | trace_create_file("set_ftrace_pid", 0644, d_tracer, |
3112 | NULL, &ftrace_pid_fops); |
3113 | |
3114 | ftrace_profile_debugfs(d_tracer); |
3115 | |
3116 | return 0; |
3117 | } |
3118 | fs_initcall(ftrace_init_debugfs); |
3119 | |
3120 | /** |
3121 | * ftrace_kill - kill ftrace |
3122 | * |
3123 | * This function should be used by panic code. It stops ftrace |
3124 | * but in a not so nice way. If you need to simply kill ftrace |
3125 | * from a non-atomic section, use ftrace_kill. |
3126 | */ |
3127 | void ftrace_kill(void) |
3128 | { |
3129 | ftrace_disabled = 1; |
3130 | ftrace_enabled = 0; |
3131 | clear_ftrace_function(); |
3132 | } |
3133 | |
3134 | /** |
3135 | * register_ftrace_function - register a function for profiling |
3136 | * @ops - ops structure that holds the function for profiling. |
3137 | * |
3138 | * Register a function to be called by all functions in the |
3139 | * kernel. |
3140 | * |
3141 | * Note: @ops->func and all the functions it calls must be labeled |
3142 | * with "notrace", otherwise it will go into a |
3143 | * recursive loop. |
3144 | */ |
3145 | int register_ftrace_function(struct ftrace_ops *ops) |
3146 | { |
3147 | int ret; |
3148 | |
3149 | if (unlikely(ftrace_disabled)) |
3150 | return -1; |
3151 | |
3152 | mutex_lock(&ftrace_lock); |
3153 | |
3154 | ret = __register_ftrace_function(ops); |
3155 | ftrace_startup(0); |
3156 | |
3157 | mutex_unlock(&ftrace_lock); |
3158 | return ret; |
3159 | } |
3160 | |
3161 | /** |
3162 | * unregister_ftrace_function - unregister a function for profiling. |
3163 | * @ops - ops structure that holds the function to unregister |
3164 | * |
3165 | * Unregister a function that was added to be called by ftrace profiling. |
3166 | */ |
3167 | int unregister_ftrace_function(struct ftrace_ops *ops) |
3168 | { |
3169 | int ret; |
3170 | |
3171 | mutex_lock(&ftrace_lock); |
3172 | ret = __unregister_ftrace_function(ops); |
3173 | ftrace_shutdown(0); |
3174 | mutex_unlock(&ftrace_lock); |
3175 | |
3176 | return ret; |
3177 | } |
3178 | |
3179 | int |
3180 | ftrace_enable_sysctl(struct ctl_table *table, int write, |
3181 | void __user *buffer, size_t *lenp, |
3182 | loff_t *ppos) |
3183 | { |
3184 | int ret; |
3185 | |
3186 | if (unlikely(ftrace_disabled)) |
3187 | return -ENODEV; |
3188 | |
3189 | mutex_lock(&ftrace_lock); |
3190 | |
3191 | ret = proc_dointvec(table, write, buffer, lenp, ppos); |
3192 | |
3193 | if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled)) |
3194 | goto out; |
3195 | |
3196 | last_ftrace_enabled = !!ftrace_enabled; |
3197 | |
3198 | if (ftrace_enabled) { |
3199 | |
3200 | ftrace_startup_sysctl(); |
3201 | |
3202 | /* we are starting ftrace again */ |
3203 | if (ftrace_list != &ftrace_list_end) { |
3204 | if (ftrace_list->next == &ftrace_list_end) |
3205 | ftrace_trace_function = ftrace_list->func; |
3206 | else |
3207 | ftrace_trace_function = ftrace_list_func; |
3208 | } |
3209 | |
3210 | } else { |
3211 | /* stopping ftrace calls (just send to ftrace_stub) */ |
3212 | ftrace_trace_function = ftrace_stub; |
3213 | |
3214 | ftrace_shutdown_sysctl(); |
3215 | } |
3216 | |
3217 | out: |
3218 | mutex_unlock(&ftrace_lock); |
3219 | return ret; |
3220 | } |
3221 | |
3222 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
3223 | |
3224 | static int ftrace_graph_active; |
3225 | static struct notifier_block ftrace_suspend_notifier; |
3226 | |
3227 | int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace) |
3228 | { |
3229 | return 0; |
3230 | } |
3231 | |
3232 | /* The callbacks that hook a function */ |
3233 | trace_func_graph_ret_t ftrace_graph_return = |
3234 | (trace_func_graph_ret_t)ftrace_stub; |
3235 | trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub; |
3236 | |
3237 | /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */ |
3238 | static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list) |
3239 | { |
3240 | int i; |
3241 | int ret = 0; |
3242 | unsigned long flags; |
3243 | int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE; |
3244 | struct task_struct *g, *t; |
3245 | |
3246 | for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) { |
3247 | ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH |
3248 | * sizeof(struct ftrace_ret_stack), |
3249 | GFP_KERNEL); |
3250 | if (!ret_stack_list[i]) { |
3251 | start = 0; |
3252 | end = i; |
3253 | ret = -ENOMEM; |
3254 | goto free; |
3255 | } |
3256 | } |
3257 | |
3258 | read_lock_irqsave(&tasklist_lock, flags); |
3259 | do_each_thread(g, t) { |
3260 | if (start == end) { |
3261 | ret = -EAGAIN; |
3262 | goto unlock; |
3263 | } |
3264 | |
3265 | if (t->ret_stack == NULL) { |
3266 | atomic_set(&t->tracing_graph_pause, 0); |
3267 | atomic_set(&t->trace_overrun, 0); |
3268 | t->curr_ret_stack = -1; |
3269 | /* Make sure the tasks see the -1 first: */ |
3270 | smp_wmb(); |
3271 | t->ret_stack = ret_stack_list[start++]; |
3272 | } |
3273 | } while_each_thread(g, t); |
3274 | |
3275 | unlock: |
3276 | read_unlock_irqrestore(&tasklist_lock, flags); |
3277 | free: |
3278 | for (i = start; i < end; i++) |
3279 | kfree(ret_stack_list[i]); |
3280 | return ret; |
3281 | } |
3282 | |
3283 | static void |
3284 | ftrace_graph_probe_sched_switch(void *ignore, |
3285 | struct task_struct *prev, struct task_struct *next) |
3286 | { |
3287 | unsigned long long timestamp; |
3288 | int index; |
3289 | |
3290 | /* |
3291 | * Does the user want to count the time a function was asleep. |
3292 | * If so, do not update the time stamps. |
3293 | */ |
3294 | if (trace_flags & TRACE_ITER_SLEEP_TIME) |
3295 | return; |
3296 | |
3297 | timestamp = trace_clock_local(); |
3298 | |
3299 | prev->ftrace_timestamp = timestamp; |
3300 | |
3301 | /* only process tasks that we timestamped */ |
3302 | if (!next->ftrace_timestamp) |
3303 | return; |
3304 | |
3305 | /* |
3306 | * Update all the counters in next to make up for the |
3307 | * time next was sleeping. |
3308 | */ |
3309 | timestamp -= next->ftrace_timestamp; |
3310 | |
3311 | for (index = next->curr_ret_stack; index >= 0; index--) |
3312 | next->ret_stack[index].calltime += timestamp; |
3313 | } |
3314 | |
3315 | /* Allocate a return stack for each task */ |
3316 | static int start_graph_tracing(void) |
3317 | { |
3318 | struct ftrace_ret_stack **ret_stack_list; |
3319 | int ret, cpu; |
3320 | |
3321 | ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE * |
3322 | sizeof(struct ftrace_ret_stack *), |
3323 | GFP_KERNEL); |
3324 | |
3325 | if (!ret_stack_list) |
3326 | return -ENOMEM; |
3327 | |
3328 | /* The cpu_boot init_task->ret_stack will never be freed */ |
3329 | for_each_online_cpu(cpu) { |
3330 | if (!idle_task(cpu)->ret_stack) |
3331 | ftrace_graph_init_task(idle_task(cpu)); |
3332 | } |
3333 | |
3334 | do { |
3335 | ret = alloc_retstack_tasklist(ret_stack_list); |
3336 | } while (ret == -EAGAIN); |
3337 | |
3338 | if (!ret) { |
3339 | ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL); |
3340 | if (ret) |
3341 | pr_info("ftrace_graph: Couldn't activate tracepoint" |
3342 | " probe to kernel_sched_switch\n"); |
3343 | } |
3344 | |
3345 | kfree(ret_stack_list); |
3346 | return ret; |
3347 | } |
3348 | |
3349 | /* |
3350 | * Hibernation protection. |
3351 | * The state of the current task is too much unstable during |
3352 | * suspend/restore to disk. We want to protect against that. |
3353 | */ |
3354 | static int |
3355 | ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state, |
3356 | void *unused) |
3357 | { |
3358 | switch (state) { |
3359 | case PM_HIBERNATION_PREPARE: |
3360 | pause_graph_tracing(); |
3361 | break; |
3362 | |
3363 | case PM_POST_HIBERNATION: |
3364 | unpause_graph_tracing(); |
3365 | break; |
3366 | } |
3367 | return NOTIFY_DONE; |
3368 | } |
3369 | |
3370 | int register_ftrace_graph(trace_func_graph_ret_t retfunc, |
3371 | trace_func_graph_ent_t entryfunc) |
3372 | { |
3373 | int ret = 0; |
3374 | |
3375 | mutex_lock(&ftrace_lock); |
3376 | |
3377 | /* we currently allow only one tracer registered at a time */ |
3378 | if (ftrace_graph_active) { |
3379 | ret = -EBUSY; |
3380 | goto out; |
3381 | } |
3382 | |
3383 | ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call; |
3384 | register_pm_notifier(&ftrace_suspend_notifier); |
3385 | |
3386 | ftrace_graph_active++; |
3387 | ret = start_graph_tracing(); |
3388 | if (ret) { |
3389 | ftrace_graph_active--; |
3390 | goto out; |
3391 | } |
3392 | |
3393 | ftrace_graph_return = retfunc; |
3394 | ftrace_graph_entry = entryfunc; |
3395 | |
3396 | ftrace_startup(FTRACE_START_FUNC_RET); |
3397 | |
3398 | out: |
3399 | mutex_unlock(&ftrace_lock); |
3400 | return ret; |
3401 | } |
3402 | |
3403 | void unregister_ftrace_graph(void) |
3404 | { |
3405 | mutex_lock(&ftrace_lock); |
3406 | |
3407 | if (unlikely(!ftrace_graph_active)) |
3408 | goto out; |
3409 | |
3410 | ftrace_graph_active--; |
3411 | ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub; |
3412 | ftrace_graph_entry = ftrace_graph_entry_stub; |
3413 | ftrace_shutdown(FTRACE_STOP_FUNC_RET); |
3414 | unregister_pm_notifier(&ftrace_suspend_notifier); |
3415 | unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL); |
3416 | |
3417 | out: |
3418 | mutex_unlock(&ftrace_lock); |
3419 | } |
3420 | |
3421 | /* Allocate a return stack for newly created task */ |
3422 | void ftrace_graph_init_task(struct task_struct *t) |
3423 | { |
3424 | /* Make sure we do not use the parent ret_stack */ |
3425 | t->ret_stack = NULL; |
3426 | t->curr_ret_stack = -1; |
3427 | |
3428 | if (ftrace_graph_active) { |
3429 | struct ftrace_ret_stack *ret_stack; |
3430 | |
3431 | ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH |
3432 | * sizeof(struct ftrace_ret_stack), |
3433 | GFP_KERNEL); |
3434 | if (!ret_stack) |
3435 | return; |
3436 | atomic_set(&t->tracing_graph_pause, 0); |
3437 | atomic_set(&t->trace_overrun, 0); |
3438 | t->ftrace_timestamp = 0; |
3439 | /* make curr_ret_stack visable before we add the ret_stack */ |
3440 | smp_wmb(); |
3441 | t->ret_stack = ret_stack; |
3442 | } |
3443 | } |
3444 | |
3445 | void ftrace_graph_exit_task(struct task_struct *t) |
3446 | { |
3447 | struct ftrace_ret_stack *ret_stack = t->ret_stack; |
3448 | |
3449 | t->ret_stack = NULL; |
3450 | /* NULL must become visible to IRQs before we free it: */ |
3451 | barrier(); |
3452 | |
3453 | kfree(ret_stack); |
3454 | } |
3455 | |
3456 | void ftrace_graph_stop(void) |
3457 | { |
3458 | ftrace_stop(); |
3459 | } |
3460 | #endif |
3461 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9