Root/
1 | /* |
2 | * trace task wakeup timings |
3 | * |
4 | * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> |
5 | * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com> |
6 | * |
7 | * Based on code from the latency_tracer, that is: |
8 | * |
9 | * Copyright (C) 2004-2006 Ingo Molnar |
10 | * Copyright (C) 2004 William Lee Irwin III |
11 | */ |
12 | #include <linux/module.h> |
13 | #include <linux/fs.h> |
14 | #include <linux/debugfs.h> |
15 | #include <linux/kallsyms.h> |
16 | #include <linux/uaccess.h> |
17 | #include <linux/ftrace.h> |
18 | #include <trace/events/sched.h> |
19 | |
20 | #include "trace.h" |
21 | |
22 | static struct trace_array *wakeup_trace; |
23 | static int __read_mostly tracer_enabled; |
24 | |
25 | static struct task_struct *wakeup_task; |
26 | static int wakeup_cpu; |
27 | static int wakeup_current_cpu; |
28 | static unsigned wakeup_prio = -1; |
29 | static int wakeup_rt; |
30 | |
31 | static arch_spinlock_t wakeup_lock = |
32 | (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; |
33 | |
34 | static void wakeup_reset(struct trace_array *tr); |
35 | static void __wakeup_reset(struct trace_array *tr); |
36 | static int wakeup_graph_entry(struct ftrace_graph_ent *trace); |
37 | static void wakeup_graph_return(struct ftrace_graph_ret *trace); |
38 | |
39 | static int save_lat_flag; |
40 | |
41 | #define TRACE_DISPLAY_GRAPH 1 |
42 | |
43 | static struct tracer_opt trace_opts[] = { |
44 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
45 | /* display latency trace as call graph */ |
46 | { TRACER_OPT(display-graph, TRACE_DISPLAY_GRAPH) }, |
47 | #endif |
48 | { } /* Empty entry */ |
49 | }; |
50 | |
51 | static struct tracer_flags tracer_flags = { |
52 | .val = 0, |
53 | .opts = trace_opts, |
54 | }; |
55 | |
56 | #define is_graph() (tracer_flags.val & TRACE_DISPLAY_GRAPH) |
57 | |
58 | #ifdef CONFIG_FUNCTION_TRACER |
59 | |
60 | /* |
61 | * Prologue for the wakeup function tracers. |
62 | * |
63 | * Returns 1 if it is OK to continue, and preemption |
64 | * is disabled and data->disabled is incremented. |
65 | * 0 if the trace is to be ignored, and preemption |
66 | * is not disabled and data->disabled is |
67 | * kept the same. |
68 | * |
69 | * Note, this function is also used outside this ifdef but |
70 | * inside the #ifdef of the function graph tracer below. |
71 | * This is OK, since the function graph tracer is |
72 | * dependent on the function tracer. |
73 | */ |
74 | static int |
75 | func_prolog_preempt_disable(struct trace_array *tr, |
76 | struct trace_array_cpu **data, |
77 | int *pc) |
78 | { |
79 | long disabled; |
80 | int cpu; |
81 | |
82 | if (likely(!wakeup_task)) |
83 | return 0; |
84 | |
85 | *pc = preempt_count(); |
86 | preempt_disable_notrace(); |
87 | |
88 | cpu = raw_smp_processor_id(); |
89 | if (cpu != wakeup_current_cpu) |
90 | goto out_enable; |
91 | |
92 | *data = tr->data[cpu]; |
93 | disabled = atomic_inc_return(&(*data)->disabled); |
94 | if (unlikely(disabled != 1)) |
95 | goto out; |
96 | |
97 | return 1; |
98 | |
99 | out: |
100 | atomic_dec(&(*data)->disabled); |
101 | |
102 | out_enable: |
103 | preempt_enable_notrace(); |
104 | return 0; |
105 | } |
106 | |
107 | /* |
108 | * wakeup uses its own tracer function to keep the overhead down: |
109 | */ |
110 | static void |
111 | wakeup_tracer_call(unsigned long ip, unsigned long parent_ip) |
112 | { |
113 | struct trace_array *tr = wakeup_trace; |
114 | struct trace_array_cpu *data; |
115 | unsigned long flags; |
116 | int pc; |
117 | |
118 | if (!func_prolog_preempt_disable(tr, &data, &pc)) |
119 | return; |
120 | |
121 | local_irq_save(flags); |
122 | trace_function(tr, ip, parent_ip, flags, pc); |
123 | local_irq_restore(flags); |
124 | |
125 | atomic_dec(&data->disabled); |
126 | preempt_enable_notrace(); |
127 | } |
128 | |
129 | static struct ftrace_ops trace_ops __read_mostly = |
130 | { |
131 | .func = wakeup_tracer_call, |
132 | .flags = FTRACE_OPS_FL_GLOBAL, |
133 | }; |
134 | #endif /* CONFIG_FUNCTION_TRACER */ |
135 | |
136 | static int start_func_tracer(int graph) |
137 | { |
138 | int ret; |
139 | |
140 | if (!graph) |
141 | ret = register_ftrace_function(&trace_ops); |
142 | else |
143 | ret = register_ftrace_graph(&wakeup_graph_return, |
144 | &wakeup_graph_entry); |
145 | |
146 | if (!ret && tracing_is_enabled()) |
147 | tracer_enabled = 1; |
148 | else |
149 | tracer_enabled = 0; |
150 | |
151 | return ret; |
152 | } |
153 | |
154 | static void stop_func_tracer(int graph) |
155 | { |
156 | tracer_enabled = 0; |
157 | |
158 | if (!graph) |
159 | unregister_ftrace_function(&trace_ops); |
160 | else |
161 | unregister_ftrace_graph(); |
162 | } |
163 | |
164 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
165 | static int wakeup_set_flag(u32 old_flags, u32 bit, int set) |
166 | { |
167 | |
168 | if (!(bit & TRACE_DISPLAY_GRAPH)) |
169 | return -EINVAL; |
170 | |
171 | if (!(is_graph() ^ set)) |
172 | return 0; |
173 | |
174 | stop_func_tracer(!set); |
175 | |
176 | wakeup_reset(wakeup_trace); |
177 | tracing_max_latency = 0; |
178 | |
179 | return start_func_tracer(set); |
180 | } |
181 | |
182 | static int wakeup_graph_entry(struct ftrace_graph_ent *trace) |
183 | { |
184 | struct trace_array *tr = wakeup_trace; |
185 | struct trace_array_cpu *data; |
186 | unsigned long flags; |
187 | int pc, ret = 0; |
188 | |
189 | if (!func_prolog_preempt_disable(tr, &data, &pc)) |
190 | return 0; |
191 | |
192 | local_save_flags(flags); |
193 | ret = __trace_graph_entry(tr, trace, flags, pc); |
194 | atomic_dec(&data->disabled); |
195 | preempt_enable_notrace(); |
196 | |
197 | return ret; |
198 | } |
199 | |
200 | static void wakeup_graph_return(struct ftrace_graph_ret *trace) |
201 | { |
202 | struct trace_array *tr = wakeup_trace; |
203 | struct trace_array_cpu *data; |
204 | unsigned long flags; |
205 | int pc; |
206 | |
207 | if (!func_prolog_preempt_disable(tr, &data, &pc)) |
208 | return; |
209 | |
210 | local_save_flags(flags); |
211 | __trace_graph_return(tr, trace, flags, pc); |
212 | atomic_dec(&data->disabled); |
213 | |
214 | preempt_enable_notrace(); |
215 | return; |
216 | } |
217 | |
218 | static void wakeup_trace_open(struct trace_iterator *iter) |
219 | { |
220 | if (is_graph()) |
221 | graph_trace_open(iter); |
222 | } |
223 | |
224 | static void wakeup_trace_close(struct trace_iterator *iter) |
225 | { |
226 | if (iter->private) |
227 | graph_trace_close(iter); |
228 | } |
229 | |
230 | #define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_PROC) |
231 | |
232 | static enum print_line_t wakeup_print_line(struct trace_iterator *iter) |
233 | { |
234 | /* |
235 | * In graph mode call the graph tracer output function, |
236 | * otherwise go with the TRACE_FN event handler |
237 | */ |
238 | if (is_graph()) |
239 | return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS); |
240 | |
241 | return TRACE_TYPE_UNHANDLED; |
242 | } |
243 | |
244 | static void wakeup_print_header(struct seq_file *s) |
245 | { |
246 | if (is_graph()) |
247 | print_graph_headers_flags(s, GRAPH_TRACER_FLAGS); |
248 | else |
249 | trace_default_header(s); |
250 | } |
251 | |
252 | static void |
253 | __trace_function(struct trace_array *tr, |
254 | unsigned long ip, unsigned long parent_ip, |
255 | unsigned long flags, int pc) |
256 | { |
257 | if (is_graph()) |
258 | trace_graph_function(tr, ip, parent_ip, flags, pc); |
259 | else |
260 | trace_function(tr, ip, parent_ip, flags, pc); |
261 | } |
262 | #else |
263 | #define __trace_function trace_function |
264 | |
265 | static int wakeup_set_flag(u32 old_flags, u32 bit, int set) |
266 | { |
267 | return -EINVAL; |
268 | } |
269 | |
270 | static int wakeup_graph_entry(struct ftrace_graph_ent *trace) |
271 | { |
272 | return -1; |
273 | } |
274 | |
275 | static enum print_line_t wakeup_print_line(struct trace_iterator *iter) |
276 | { |
277 | return TRACE_TYPE_UNHANDLED; |
278 | } |
279 | |
280 | static void wakeup_graph_return(struct ftrace_graph_ret *trace) { } |
281 | static void wakeup_print_header(struct seq_file *s) { } |
282 | static void wakeup_trace_open(struct trace_iterator *iter) { } |
283 | static void wakeup_trace_close(struct trace_iterator *iter) { } |
284 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
285 | |
286 | /* |
287 | * Should this new latency be reported/recorded? |
288 | */ |
289 | static int report_latency(cycle_t delta) |
290 | { |
291 | if (tracing_thresh) { |
292 | if (delta < tracing_thresh) |
293 | return 0; |
294 | } else { |
295 | if (delta <= tracing_max_latency) |
296 | return 0; |
297 | } |
298 | return 1; |
299 | } |
300 | |
301 | static void |
302 | probe_wakeup_migrate_task(void *ignore, struct task_struct *task, int cpu) |
303 | { |
304 | if (task != wakeup_task) |
305 | return; |
306 | |
307 | wakeup_current_cpu = cpu; |
308 | } |
309 | |
310 | static void notrace |
311 | probe_wakeup_sched_switch(void *ignore, |
312 | struct task_struct *prev, struct task_struct *next) |
313 | { |
314 | struct trace_array_cpu *data; |
315 | cycle_t T0, T1, delta; |
316 | unsigned long flags; |
317 | long disabled; |
318 | int cpu; |
319 | int pc; |
320 | |
321 | tracing_record_cmdline(prev); |
322 | |
323 | if (unlikely(!tracer_enabled)) |
324 | return; |
325 | |
326 | /* |
327 | * When we start a new trace, we set wakeup_task to NULL |
328 | * and then set tracer_enabled = 1. We want to make sure |
329 | * that another CPU does not see the tracer_enabled = 1 |
330 | * and the wakeup_task with an older task, that might |
331 | * actually be the same as next. |
332 | */ |
333 | smp_rmb(); |
334 | |
335 | if (next != wakeup_task) |
336 | return; |
337 | |
338 | pc = preempt_count(); |
339 | |
340 | /* disable local data, not wakeup_cpu data */ |
341 | cpu = raw_smp_processor_id(); |
342 | disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled); |
343 | if (likely(disabled != 1)) |
344 | goto out; |
345 | |
346 | local_irq_save(flags); |
347 | arch_spin_lock(&wakeup_lock); |
348 | |
349 | /* We could race with grabbing wakeup_lock */ |
350 | if (unlikely(!tracer_enabled || next != wakeup_task)) |
351 | goto out_unlock; |
352 | |
353 | /* The task we are waiting for is waking up */ |
354 | data = wakeup_trace->data[wakeup_cpu]; |
355 | |
356 | __trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc); |
357 | tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc); |
358 | |
359 | T0 = data->preempt_timestamp; |
360 | T1 = ftrace_now(cpu); |
361 | delta = T1-T0; |
362 | |
363 | if (!report_latency(delta)) |
364 | goto out_unlock; |
365 | |
366 | if (likely(!is_tracing_stopped())) { |
367 | tracing_max_latency = delta; |
368 | update_max_tr(wakeup_trace, wakeup_task, wakeup_cpu); |
369 | } |
370 | |
371 | out_unlock: |
372 | __wakeup_reset(wakeup_trace); |
373 | arch_spin_unlock(&wakeup_lock); |
374 | local_irq_restore(flags); |
375 | out: |
376 | atomic_dec(&wakeup_trace->data[cpu]->disabled); |
377 | } |
378 | |
379 | static void __wakeup_reset(struct trace_array *tr) |
380 | { |
381 | wakeup_cpu = -1; |
382 | wakeup_prio = -1; |
383 | |
384 | if (wakeup_task) |
385 | put_task_struct(wakeup_task); |
386 | |
387 | wakeup_task = NULL; |
388 | } |
389 | |
390 | static void wakeup_reset(struct trace_array *tr) |
391 | { |
392 | unsigned long flags; |
393 | |
394 | tracing_reset_online_cpus(tr); |
395 | |
396 | local_irq_save(flags); |
397 | arch_spin_lock(&wakeup_lock); |
398 | __wakeup_reset(tr); |
399 | arch_spin_unlock(&wakeup_lock); |
400 | local_irq_restore(flags); |
401 | } |
402 | |
403 | static void |
404 | probe_wakeup(void *ignore, struct task_struct *p, int success) |
405 | { |
406 | struct trace_array_cpu *data; |
407 | int cpu = smp_processor_id(); |
408 | unsigned long flags; |
409 | long disabled; |
410 | int pc; |
411 | |
412 | if (likely(!tracer_enabled)) |
413 | return; |
414 | |
415 | tracing_record_cmdline(p); |
416 | tracing_record_cmdline(current); |
417 | |
418 | if ((wakeup_rt && !rt_task(p)) || |
419 | p->prio >= wakeup_prio || |
420 | p->prio >= current->prio) |
421 | return; |
422 | |
423 | pc = preempt_count(); |
424 | disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled); |
425 | if (unlikely(disabled != 1)) |
426 | goto out; |
427 | |
428 | /* interrupts should be off from try_to_wake_up */ |
429 | arch_spin_lock(&wakeup_lock); |
430 | |
431 | /* check for races. */ |
432 | if (!tracer_enabled || p->prio >= wakeup_prio) |
433 | goto out_locked; |
434 | |
435 | /* reset the trace */ |
436 | __wakeup_reset(wakeup_trace); |
437 | |
438 | wakeup_cpu = task_cpu(p); |
439 | wakeup_current_cpu = wakeup_cpu; |
440 | wakeup_prio = p->prio; |
441 | |
442 | wakeup_task = p; |
443 | get_task_struct(wakeup_task); |
444 | |
445 | local_save_flags(flags); |
446 | |
447 | data = wakeup_trace->data[wakeup_cpu]; |
448 | data->preempt_timestamp = ftrace_now(cpu); |
449 | tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc); |
450 | |
451 | /* |
452 | * We must be careful in using CALLER_ADDR2. But since wake_up |
453 | * is not called by an assembly function (where as schedule is) |
454 | * it should be safe to use it here. |
455 | */ |
456 | __trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc); |
457 | |
458 | out_locked: |
459 | arch_spin_unlock(&wakeup_lock); |
460 | out: |
461 | atomic_dec(&wakeup_trace->data[cpu]->disabled); |
462 | } |
463 | |
464 | static void start_wakeup_tracer(struct trace_array *tr) |
465 | { |
466 | int ret; |
467 | |
468 | ret = register_trace_sched_wakeup(probe_wakeup, NULL); |
469 | if (ret) { |
470 | pr_info("wakeup trace: Couldn't activate tracepoint" |
471 | " probe to kernel_sched_wakeup\n"); |
472 | return; |
473 | } |
474 | |
475 | ret = register_trace_sched_wakeup_new(probe_wakeup, NULL); |
476 | if (ret) { |
477 | pr_info("wakeup trace: Couldn't activate tracepoint" |
478 | " probe to kernel_sched_wakeup_new\n"); |
479 | goto fail_deprobe; |
480 | } |
481 | |
482 | ret = register_trace_sched_switch(probe_wakeup_sched_switch, NULL); |
483 | if (ret) { |
484 | pr_info("sched trace: Couldn't activate tracepoint" |
485 | " probe to kernel_sched_switch\n"); |
486 | goto fail_deprobe_wake_new; |
487 | } |
488 | |
489 | ret = register_trace_sched_migrate_task(probe_wakeup_migrate_task, NULL); |
490 | if (ret) { |
491 | pr_info("wakeup trace: Couldn't activate tracepoint" |
492 | " probe to kernel_sched_migrate_task\n"); |
493 | return; |
494 | } |
495 | |
496 | wakeup_reset(tr); |
497 | |
498 | /* |
499 | * Don't let the tracer_enabled = 1 show up before |
500 | * the wakeup_task is reset. This may be overkill since |
501 | * wakeup_reset does a spin_unlock after setting the |
502 | * wakeup_task to NULL, but I want to be safe. |
503 | * This is a slow path anyway. |
504 | */ |
505 | smp_wmb(); |
506 | |
507 | if (start_func_tracer(is_graph())) |
508 | printk(KERN_ERR "failed to start wakeup tracer\n"); |
509 | |
510 | return; |
511 | fail_deprobe_wake_new: |
512 | unregister_trace_sched_wakeup_new(probe_wakeup, NULL); |
513 | fail_deprobe: |
514 | unregister_trace_sched_wakeup(probe_wakeup, NULL); |
515 | } |
516 | |
517 | static void stop_wakeup_tracer(struct trace_array *tr) |
518 | { |
519 | tracer_enabled = 0; |
520 | stop_func_tracer(is_graph()); |
521 | unregister_trace_sched_switch(probe_wakeup_sched_switch, NULL); |
522 | unregister_trace_sched_wakeup_new(probe_wakeup, NULL); |
523 | unregister_trace_sched_wakeup(probe_wakeup, NULL); |
524 | unregister_trace_sched_migrate_task(probe_wakeup_migrate_task, NULL); |
525 | } |
526 | |
527 | static int __wakeup_tracer_init(struct trace_array *tr) |
528 | { |
529 | save_lat_flag = trace_flags & TRACE_ITER_LATENCY_FMT; |
530 | trace_flags |= TRACE_ITER_LATENCY_FMT; |
531 | |
532 | tracing_max_latency = 0; |
533 | wakeup_trace = tr; |
534 | start_wakeup_tracer(tr); |
535 | return 0; |
536 | } |
537 | |
538 | static int wakeup_tracer_init(struct trace_array *tr) |
539 | { |
540 | wakeup_rt = 0; |
541 | return __wakeup_tracer_init(tr); |
542 | } |
543 | |
544 | static int wakeup_rt_tracer_init(struct trace_array *tr) |
545 | { |
546 | wakeup_rt = 1; |
547 | return __wakeup_tracer_init(tr); |
548 | } |
549 | |
550 | static void wakeup_tracer_reset(struct trace_array *tr) |
551 | { |
552 | stop_wakeup_tracer(tr); |
553 | /* make sure we put back any tasks we are tracing */ |
554 | wakeup_reset(tr); |
555 | |
556 | if (!save_lat_flag) |
557 | trace_flags &= ~TRACE_ITER_LATENCY_FMT; |
558 | } |
559 | |
560 | static void wakeup_tracer_start(struct trace_array *tr) |
561 | { |
562 | wakeup_reset(tr); |
563 | tracer_enabled = 1; |
564 | } |
565 | |
566 | static void wakeup_tracer_stop(struct trace_array *tr) |
567 | { |
568 | tracer_enabled = 0; |
569 | } |
570 | |
571 | static struct tracer wakeup_tracer __read_mostly = |
572 | { |
573 | .name = "wakeup", |
574 | .init = wakeup_tracer_init, |
575 | .reset = wakeup_tracer_reset, |
576 | .start = wakeup_tracer_start, |
577 | .stop = wakeup_tracer_stop, |
578 | .print_max = 1, |
579 | .print_header = wakeup_print_header, |
580 | .print_line = wakeup_print_line, |
581 | .flags = &tracer_flags, |
582 | .set_flag = wakeup_set_flag, |
583 | #ifdef CONFIG_FTRACE_SELFTEST |
584 | .selftest = trace_selftest_startup_wakeup, |
585 | #endif |
586 | .open = wakeup_trace_open, |
587 | .close = wakeup_trace_close, |
588 | .use_max_tr = 1, |
589 | }; |
590 | |
591 | static struct tracer wakeup_rt_tracer __read_mostly = |
592 | { |
593 | .name = "wakeup_rt", |
594 | .init = wakeup_rt_tracer_init, |
595 | .reset = wakeup_tracer_reset, |
596 | .start = wakeup_tracer_start, |
597 | .stop = wakeup_tracer_stop, |
598 | .wait_pipe = poll_wait_pipe, |
599 | .print_max = 1, |
600 | .print_header = wakeup_print_header, |
601 | .print_line = wakeup_print_line, |
602 | .flags = &tracer_flags, |
603 | .set_flag = wakeup_set_flag, |
604 | #ifdef CONFIG_FTRACE_SELFTEST |
605 | .selftest = trace_selftest_startup_wakeup, |
606 | #endif |
607 | .open = wakeup_trace_open, |
608 | .close = wakeup_trace_close, |
609 | .use_max_tr = 1, |
610 | }; |
611 | |
612 | __init static int init_wakeup_tracer(void) |
613 | { |
614 | int ret; |
615 | |
616 | ret = register_tracer(&wakeup_tracer); |
617 | if (ret) |
618 | return ret; |
619 | |
620 | ret = register_tracer(&wakeup_rt_tracer); |
621 | if (ret) |
622 | return ret; |
623 | |
624 | return 0; |
625 | } |
626 | device_initcall(init_wakeup_tracer); |
627 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9