Root/
1 | /* |
2 | * unlikely profiler |
3 | * |
4 | * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com> |
5 | */ |
6 | #include <linux/kallsyms.h> |
7 | #include <linux/seq_file.h> |
8 | #include <linux/spinlock.h> |
9 | #include <linux/irqflags.h> |
10 | #include <linux/debugfs.h> |
11 | #include <linux/uaccess.h> |
12 | #include <linux/module.h> |
13 | #include <linux/ftrace.h> |
14 | #include <linux/hash.h> |
15 | #include <linux/fs.h> |
16 | #include <asm/local.h> |
17 | |
18 | #include "trace.h" |
19 | #include "trace_stat.h" |
20 | #include "trace_output.h" |
21 | |
22 | #ifdef CONFIG_BRANCH_TRACER |
23 | |
24 | static struct tracer branch_trace; |
25 | static int branch_tracing_enabled __read_mostly; |
26 | static DEFINE_MUTEX(branch_tracing_mutex); |
27 | |
28 | static struct trace_array *branch_tracer; |
29 | |
30 | static void |
31 | probe_likely_condition(struct ftrace_branch_data *f, int val, int expect) |
32 | { |
33 | struct ftrace_event_call *call = &event_branch; |
34 | struct trace_array *tr = branch_tracer; |
35 | struct ring_buffer_event *event; |
36 | struct trace_branch *entry; |
37 | struct ring_buffer *buffer; |
38 | unsigned long flags; |
39 | int cpu, pc; |
40 | const char *p; |
41 | |
42 | /* |
43 | * I would love to save just the ftrace_likely_data pointer, but |
44 | * this code can also be used by modules. Ugly things can happen |
45 | * if the module is unloaded, and then we go and read the |
46 | * pointer. This is slower, but much safer. |
47 | */ |
48 | |
49 | if (unlikely(!tr)) |
50 | return; |
51 | |
52 | local_irq_save(flags); |
53 | cpu = raw_smp_processor_id(); |
54 | if (atomic_inc_return(&tr->data[cpu]->disabled) != 1) |
55 | goto out; |
56 | |
57 | pc = preempt_count(); |
58 | buffer = tr->buffer; |
59 | event = trace_buffer_lock_reserve(buffer, TRACE_BRANCH, |
60 | sizeof(*entry), flags, pc); |
61 | if (!event) |
62 | goto out; |
63 | |
64 | entry = ring_buffer_event_data(event); |
65 | |
66 | /* Strip off the path, only save the file */ |
67 | p = f->file + strlen(f->file); |
68 | while (p >= f->file && *p != '/') |
69 | p--; |
70 | p++; |
71 | |
72 | strncpy(entry->func, f->func, TRACE_FUNC_SIZE); |
73 | strncpy(entry->file, p, TRACE_FILE_SIZE); |
74 | entry->func[TRACE_FUNC_SIZE] = 0; |
75 | entry->file[TRACE_FILE_SIZE] = 0; |
76 | entry->line = f->line; |
77 | entry->correct = val == expect; |
78 | |
79 | if (!filter_check_discard(call, entry, buffer, event)) |
80 | ring_buffer_unlock_commit(buffer, event); |
81 | |
82 | out: |
83 | atomic_dec(&tr->data[cpu]->disabled); |
84 | local_irq_restore(flags); |
85 | } |
86 | |
87 | static inline |
88 | void trace_likely_condition(struct ftrace_branch_data *f, int val, int expect) |
89 | { |
90 | if (!branch_tracing_enabled) |
91 | return; |
92 | |
93 | probe_likely_condition(f, val, expect); |
94 | } |
95 | |
96 | int enable_branch_tracing(struct trace_array *tr) |
97 | { |
98 | mutex_lock(&branch_tracing_mutex); |
99 | branch_tracer = tr; |
100 | /* |
101 | * Must be seen before enabling. The reader is a condition |
102 | * where we do not need a matching rmb() |
103 | */ |
104 | smp_wmb(); |
105 | branch_tracing_enabled++; |
106 | mutex_unlock(&branch_tracing_mutex); |
107 | |
108 | return 0; |
109 | } |
110 | |
111 | void disable_branch_tracing(void) |
112 | { |
113 | mutex_lock(&branch_tracing_mutex); |
114 | |
115 | if (!branch_tracing_enabled) |
116 | goto out_unlock; |
117 | |
118 | branch_tracing_enabled--; |
119 | |
120 | out_unlock: |
121 | mutex_unlock(&branch_tracing_mutex); |
122 | } |
123 | |
124 | static void start_branch_trace(struct trace_array *tr) |
125 | { |
126 | enable_branch_tracing(tr); |
127 | } |
128 | |
129 | static void stop_branch_trace(struct trace_array *tr) |
130 | { |
131 | disable_branch_tracing(); |
132 | } |
133 | |
134 | static int branch_trace_init(struct trace_array *tr) |
135 | { |
136 | start_branch_trace(tr); |
137 | return 0; |
138 | } |
139 | |
140 | static void branch_trace_reset(struct trace_array *tr) |
141 | { |
142 | stop_branch_trace(tr); |
143 | } |
144 | |
145 | static enum print_line_t trace_branch_print(struct trace_iterator *iter, |
146 | int flags, struct trace_event *event) |
147 | { |
148 | struct trace_branch *field; |
149 | |
150 | trace_assign_type(field, iter->ent); |
151 | |
152 | if (trace_seq_printf(&iter->seq, "[%s] %s:%s:%d\n", |
153 | field->correct ? " ok " : " MISS ", |
154 | field->func, |
155 | field->file, |
156 | field->line)) |
157 | return TRACE_TYPE_PARTIAL_LINE; |
158 | |
159 | return TRACE_TYPE_HANDLED; |
160 | } |
161 | |
162 | static void branch_print_header(struct seq_file *s) |
163 | { |
164 | seq_puts(s, "# TASK-PID CPU# TIMESTAMP CORRECT" |
165 | " FUNC:FILE:LINE\n"); |
166 | seq_puts(s, "# | | | | | " |
167 | " |\n"); |
168 | } |
169 | |
170 | static struct trace_event_functions trace_branch_funcs = { |
171 | .trace = trace_branch_print, |
172 | }; |
173 | |
174 | static struct trace_event trace_branch_event = { |
175 | .type = TRACE_BRANCH, |
176 | .funcs = &trace_branch_funcs, |
177 | }; |
178 | |
179 | static struct tracer branch_trace __read_mostly = |
180 | { |
181 | .name = "branch", |
182 | .init = branch_trace_init, |
183 | .reset = branch_trace_reset, |
184 | #ifdef CONFIG_FTRACE_SELFTEST |
185 | .selftest = trace_selftest_startup_branch, |
186 | #endif /* CONFIG_FTRACE_SELFTEST */ |
187 | .print_header = branch_print_header, |
188 | }; |
189 | |
190 | __init static int init_branch_tracer(void) |
191 | { |
192 | int ret; |
193 | |
194 | ret = register_ftrace_event(&trace_branch_event); |
195 | if (!ret) { |
196 | printk(KERN_WARNING "Warning: could not register " |
197 | "branch events\n"); |
198 | return 1; |
199 | } |
200 | return register_tracer(&branch_trace); |
201 | } |
202 | device_initcall(init_branch_tracer); |
203 | |
204 | #else |
205 | static inline |
206 | void trace_likely_condition(struct ftrace_branch_data *f, int val, int expect) |
207 | { |
208 | } |
209 | #endif /* CONFIG_BRANCH_TRACER */ |
210 | |
211 | void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect) |
212 | { |
213 | /* |
214 | * I would love to have a trace point here instead, but the |
215 | * trace point code is so inundated with unlikely and likely |
216 | * conditions that the recursive nightmare that exists is too |
217 | * much to try to get working. At least for now. |
218 | */ |
219 | trace_likely_condition(f, val, expect); |
220 | |
221 | /* FIXME: Make this atomic! */ |
222 | if (val == expect) |
223 | f->correct++; |
224 | else |
225 | f->incorrect++; |
226 | } |
227 | EXPORT_SYMBOL(ftrace_likely_update); |
228 | |
229 | extern unsigned long __start_annotated_branch_profile[]; |
230 | extern unsigned long __stop_annotated_branch_profile[]; |
231 | |
232 | static int annotated_branch_stat_headers(struct seq_file *m) |
233 | { |
234 | seq_printf(m, " correct incorrect %% "); |
235 | seq_printf(m, " Function " |
236 | " File Line\n" |
237 | " ------- --------- - " |
238 | " -------- " |
239 | " ---- ----\n"); |
240 | return 0; |
241 | } |
242 | |
243 | static inline long get_incorrect_percent(struct ftrace_branch_data *p) |
244 | { |
245 | long percent; |
246 | |
247 | if (p->correct) { |
248 | percent = p->incorrect * 100; |
249 | percent /= p->correct + p->incorrect; |
250 | } else |
251 | percent = p->incorrect ? 100 : -1; |
252 | |
253 | return percent; |
254 | } |
255 | |
256 | static int branch_stat_show(struct seq_file *m, void *v) |
257 | { |
258 | struct ftrace_branch_data *p = v; |
259 | const char *f; |
260 | long percent; |
261 | |
262 | /* Only print the file, not the path */ |
263 | f = p->file + strlen(p->file); |
264 | while (f >= p->file && *f != '/') |
265 | f--; |
266 | f++; |
267 | |
268 | /* |
269 | * The miss is overlayed on correct, and hit on incorrect. |
270 | */ |
271 | percent = get_incorrect_percent(p); |
272 | |
273 | seq_printf(m, "%8lu %8lu ", p->correct, p->incorrect); |
274 | if (percent < 0) |
275 | seq_printf(m, " X "); |
276 | else |
277 | seq_printf(m, "%3ld ", percent); |
278 | seq_printf(m, "%-30.30s %-20.20s %d\n", p->func, f, p->line); |
279 | return 0; |
280 | } |
281 | |
282 | static void *annotated_branch_stat_start(struct tracer_stat *trace) |
283 | { |
284 | return __start_annotated_branch_profile; |
285 | } |
286 | |
287 | static void * |
288 | annotated_branch_stat_next(void *v, int idx) |
289 | { |
290 | struct ftrace_branch_data *p = v; |
291 | |
292 | ++p; |
293 | |
294 | if ((void *)p >= (void *)__stop_annotated_branch_profile) |
295 | return NULL; |
296 | |
297 | return p; |
298 | } |
299 | |
300 | static int annotated_branch_stat_cmp(void *p1, void *p2) |
301 | { |
302 | struct ftrace_branch_data *a = p1; |
303 | struct ftrace_branch_data *b = p2; |
304 | |
305 | long percent_a, percent_b; |
306 | |
307 | percent_a = get_incorrect_percent(a); |
308 | percent_b = get_incorrect_percent(b); |
309 | |
310 | if (percent_a < percent_b) |
311 | return -1; |
312 | if (percent_a > percent_b) |
313 | return 1; |
314 | |
315 | if (a->incorrect < b->incorrect) |
316 | return -1; |
317 | if (a->incorrect > b->incorrect) |
318 | return 1; |
319 | |
320 | /* |
321 | * Since the above shows worse (incorrect) cases |
322 | * first, we continue that by showing best (correct) |
323 | * cases last. |
324 | */ |
325 | if (a->correct > b->correct) |
326 | return -1; |
327 | if (a->correct < b->correct) |
328 | return 1; |
329 | |
330 | return 0; |
331 | } |
332 | |
333 | static struct tracer_stat annotated_branch_stats = { |
334 | .name = "branch_annotated", |
335 | .stat_start = annotated_branch_stat_start, |
336 | .stat_next = annotated_branch_stat_next, |
337 | .stat_cmp = annotated_branch_stat_cmp, |
338 | .stat_headers = annotated_branch_stat_headers, |
339 | .stat_show = branch_stat_show |
340 | }; |
341 | |
342 | __init static int init_annotated_branch_stats(void) |
343 | { |
344 | int ret; |
345 | |
346 | ret = register_stat_tracer(&annotated_branch_stats); |
347 | if (!ret) { |
348 | printk(KERN_WARNING "Warning: could not register " |
349 | "annotated branches stats\n"); |
350 | return 1; |
351 | } |
352 | return 0; |
353 | } |
354 | fs_initcall(init_annotated_branch_stats); |
355 | |
356 | #ifdef CONFIG_PROFILE_ALL_BRANCHES |
357 | |
358 | extern unsigned long __start_branch_profile[]; |
359 | extern unsigned long __stop_branch_profile[]; |
360 | |
361 | static int all_branch_stat_headers(struct seq_file *m) |
362 | { |
363 | seq_printf(m, " miss hit %% "); |
364 | seq_printf(m, " Function " |
365 | " File Line\n" |
366 | " ------- --------- - " |
367 | " -------- " |
368 | " ---- ----\n"); |
369 | return 0; |
370 | } |
371 | |
372 | static void *all_branch_stat_start(struct tracer_stat *trace) |
373 | { |
374 | return __start_branch_profile; |
375 | } |
376 | |
377 | static void * |
378 | all_branch_stat_next(void *v, int idx) |
379 | { |
380 | struct ftrace_branch_data *p = v; |
381 | |
382 | ++p; |
383 | |
384 | if ((void *)p >= (void *)__stop_branch_profile) |
385 | return NULL; |
386 | |
387 | return p; |
388 | } |
389 | |
390 | static struct tracer_stat all_branch_stats = { |
391 | .name = "branch_all", |
392 | .stat_start = all_branch_stat_start, |
393 | .stat_next = all_branch_stat_next, |
394 | .stat_headers = all_branch_stat_headers, |
395 | .stat_show = branch_stat_show |
396 | }; |
397 | |
398 | __init static int all_annotated_branch_stats(void) |
399 | { |
400 | int ret; |
401 | |
402 | ret = register_stat_tracer(&all_branch_stats); |
403 | if (!ret) { |
404 | printk(KERN_WARNING "Warning: could not register " |
405 | "all branches stats\n"); |
406 | return 1; |
407 | } |
408 | return 0; |
409 | } |
410 | fs_initcall(all_annotated_branch_stats); |
411 | #endif /* CONFIG_PROFILE_ALL_BRANCHES */ |
412 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9