Root/kernel/trace/trace_functions.c

1/*
2 * ring buffer based function tracer
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Based on code from the latency_tracer, that is:
8 *
9 * Copyright (C) 2004-2006 Ingo Molnar
10 * Copyright (C) 2004 William Lee Irwin III
11 */
12#include <linux/ring_buffer.h>
13#include <linux/debugfs.h>
14#include <linux/uaccess.h>
15#include <linux/ftrace.h>
16#include <linux/fs.h>
17
18#include "trace.h"
19
20/* function tracing enabled */
21static int ftrace_function_enabled;
22
23static struct trace_array *func_trace;
24
25static void tracing_start_function_trace(void);
26static void tracing_stop_function_trace(void);
27
28static int function_trace_init(struct trace_array *tr)
29{
30    func_trace = tr;
31    tr->cpu = get_cpu();
32    put_cpu();
33
34    tracing_start_cmdline_record();
35    tracing_start_function_trace();
36    return 0;
37}
38
39static void function_trace_reset(struct trace_array *tr)
40{
41    tracing_stop_function_trace();
42    tracing_stop_cmdline_record();
43}
44
45static void function_trace_start(struct trace_array *tr)
46{
47    tracing_reset_online_cpus(tr);
48}
49
50static void
51function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip)
52{
53    struct trace_array *tr = func_trace;
54    struct trace_array_cpu *data;
55    unsigned long flags;
56    long disabled;
57    int cpu;
58    int pc;
59
60    if (unlikely(!ftrace_function_enabled))
61        return;
62
63    pc = preempt_count();
64    preempt_disable_notrace();
65    local_save_flags(flags);
66    cpu = raw_smp_processor_id();
67    data = tr->data[cpu];
68    disabled = atomic_inc_return(&data->disabled);
69
70    if (likely(disabled == 1))
71        trace_function(tr, ip, parent_ip, flags, pc);
72
73    atomic_dec(&data->disabled);
74    preempt_enable_notrace();
75}
76
77static void
78function_trace_call(unsigned long ip, unsigned long parent_ip)
79{
80    struct trace_array *tr = func_trace;
81    struct trace_array_cpu *data;
82    unsigned long flags;
83    long disabled;
84    int cpu;
85    int pc;
86
87    if (unlikely(!ftrace_function_enabled))
88        return;
89
90    /*
91     * Need to use raw, since this must be called before the
92     * recursive protection is performed.
93     */
94    local_irq_save(flags);
95    cpu = raw_smp_processor_id();
96    data = tr->data[cpu];
97    disabled = atomic_inc_return(&data->disabled);
98
99    if (likely(disabled == 1)) {
100        pc = preempt_count();
101        trace_function(tr, ip, parent_ip, flags, pc);
102    }
103
104    atomic_dec(&data->disabled);
105    local_irq_restore(flags);
106}
107
108static void
109function_stack_trace_call(unsigned long ip, unsigned long parent_ip)
110{
111    struct trace_array *tr = func_trace;
112    struct trace_array_cpu *data;
113    unsigned long flags;
114    long disabled;
115    int cpu;
116    int pc;
117
118    if (unlikely(!ftrace_function_enabled))
119        return;
120
121    /*
122     * Need to use raw, since this must be called before the
123     * recursive protection is performed.
124     */
125    local_irq_save(flags);
126    cpu = raw_smp_processor_id();
127    data = tr->data[cpu];
128    disabled = atomic_inc_return(&data->disabled);
129
130    if (likely(disabled == 1)) {
131        pc = preempt_count();
132        trace_function(tr, ip, parent_ip, flags, pc);
133        /*
134         * skip over 5 funcs:
135         * __ftrace_trace_stack,
136         * __trace_stack,
137         * function_stack_trace_call
138         * ftrace_list_func
139         * ftrace_call
140         */
141        __trace_stack(tr, flags, 5, pc);
142    }
143
144    atomic_dec(&data->disabled);
145    local_irq_restore(flags);
146}
147
148
149static struct ftrace_ops trace_ops __read_mostly =
150{
151    .func = function_trace_call,
152    .flags = FTRACE_OPS_FL_GLOBAL,
153};
154
155static struct ftrace_ops trace_stack_ops __read_mostly =
156{
157    .func = function_stack_trace_call,
158    .flags = FTRACE_OPS_FL_GLOBAL,
159};
160
161/* Our two options */
162enum {
163    TRACE_FUNC_OPT_STACK = 0x1,
164};
165
166static struct tracer_opt func_opts[] = {
167#ifdef CONFIG_STACKTRACE
168    { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
169#endif
170    { } /* Always set a last empty entry */
171};
172
173static struct tracer_flags func_flags = {
174    .val = 0, /* By default: all flags disabled */
175    .opts = func_opts
176};
177
178static void tracing_start_function_trace(void)
179{
180    ftrace_function_enabled = 0;
181
182    if (trace_flags & TRACE_ITER_PREEMPTONLY)
183        trace_ops.func = function_trace_call_preempt_only;
184    else
185        trace_ops.func = function_trace_call;
186
187    if (func_flags.val & TRACE_FUNC_OPT_STACK)
188        register_ftrace_function(&trace_stack_ops);
189    else
190        register_ftrace_function(&trace_ops);
191
192    ftrace_function_enabled = 1;
193}
194
195static void tracing_stop_function_trace(void)
196{
197    ftrace_function_enabled = 0;
198
199    if (func_flags.val & TRACE_FUNC_OPT_STACK)
200        unregister_ftrace_function(&trace_stack_ops);
201    else
202        unregister_ftrace_function(&trace_ops);
203}
204
205static int func_set_flag(u32 old_flags, u32 bit, int set)
206{
207    if (bit == TRACE_FUNC_OPT_STACK) {
208        /* do nothing if already set */
209        if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
210            return 0;
211
212        if (set) {
213            unregister_ftrace_function(&trace_ops);
214            register_ftrace_function(&trace_stack_ops);
215        } else {
216            unregister_ftrace_function(&trace_stack_ops);
217            register_ftrace_function(&trace_ops);
218        }
219
220        return 0;
221    }
222
223    return -EINVAL;
224}
225
226static struct tracer function_trace __read_mostly =
227{
228    .name = "function",
229    .init = function_trace_init,
230    .reset = function_trace_reset,
231    .start = function_trace_start,
232    .wait_pipe = poll_wait_pipe,
233    .flags = &func_flags,
234    .set_flag = func_set_flag,
235#ifdef CONFIG_FTRACE_SELFTEST
236    .selftest = trace_selftest_startup_function,
237#endif
238};
239
240#ifdef CONFIG_DYNAMIC_FTRACE
241static void
242ftrace_traceon(unsigned long ip, unsigned long parent_ip, void **data)
243{
244    long *count = (long *)data;
245
246    if (tracing_is_on())
247        return;
248
249    if (!*count)
250        return;
251
252    if (*count != -1)
253        (*count)--;
254
255    tracing_on();
256}
257
258static void
259ftrace_traceoff(unsigned long ip, unsigned long parent_ip, void **data)
260{
261    long *count = (long *)data;
262
263    if (!tracing_is_on())
264        return;
265
266    if (!*count)
267        return;
268
269    if (*count != -1)
270        (*count)--;
271
272    tracing_off();
273}
274
275static int
276ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip,
277             struct ftrace_probe_ops *ops, void *data);
278
279static struct ftrace_probe_ops traceon_probe_ops = {
280    .func = ftrace_traceon,
281    .print = ftrace_trace_onoff_print,
282};
283
284static struct ftrace_probe_ops traceoff_probe_ops = {
285    .func = ftrace_traceoff,
286    .print = ftrace_trace_onoff_print,
287};
288
289static int
290ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip,
291             struct ftrace_probe_ops *ops, void *data)
292{
293    long count = (long)data;
294
295    seq_printf(m, "%ps:", (void *)ip);
296
297    if (ops == &traceon_probe_ops)
298        seq_printf(m, "traceon");
299    else
300        seq_printf(m, "traceoff");
301
302    if (count == -1)
303        seq_printf(m, ":unlimited\n");
304    else
305        seq_printf(m, ":count=%ld\n", count);
306
307    return 0;
308}
309
310static int
311ftrace_trace_onoff_unreg(char *glob, char *cmd, char *param)
312{
313    struct ftrace_probe_ops *ops;
314
315    /* we register both traceon and traceoff to this callback */
316    if (strcmp(cmd, "traceon") == 0)
317        ops = &traceon_probe_ops;
318    else
319        ops = &traceoff_probe_ops;
320
321    unregister_ftrace_function_probe_func(glob, ops);
322
323    return 0;
324}
325
326static int
327ftrace_trace_onoff_callback(char *glob, char *cmd, char *param, int enable)
328{
329    struct ftrace_probe_ops *ops;
330    void *count = (void *)-1;
331    char *number;
332    int ret;
333
334    /* hash funcs only work with set_ftrace_filter */
335    if (!enable)
336        return -EINVAL;
337
338    if (glob[0] == '!')
339        return ftrace_trace_onoff_unreg(glob+1, cmd, param);
340
341    /* we register both traceon and traceoff to this callback */
342    if (strcmp(cmd, "traceon") == 0)
343        ops = &traceon_probe_ops;
344    else
345        ops = &traceoff_probe_ops;
346
347    if (!param)
348        goto out_reg;
349
350    number = strsep(&param, ":");
351
352    if (!strlen(number))
353        goto out_reg;
354
355    /*
356     * We use the callback data field (which is a pointer)
357     * as our counter.
358     */
359    ret = strict_strtoul(number, 0, (unsigned long *)&count);
360    if (ret)
361        return ret;
362
363 out_reg:
364    ret = register_ftrace_function_probe(glob, ops, count);
365
366    return ret < 0 ? ret : 0;
367}
368
369static struct ftrace_func_command ftrace_traceon_cmd = {
370    .name = "traceon",
371    .func = ftrace_trace_onoff_callback,
372};
373
374static struct ftrace_func_command ftrace_traceoff_cmd = {
375    .name = "traceoff",
376    .func = ftrace_trace_onoff_callback,
377};
378
379static int __init init_func_cmd_traceon(void)
380{
381    int ret;
382
383    ret = register_ftrace_command(&ftrace_traceoff_cmd);
384    if (ret)
385        return ret;
386
387    ret = register_ftrace_command(&ftrace_traceon_cmd);
388    if (ret)
389        unregister_ftrace_command(&ftrace_traceoff_cmd);
390    return ret;
391}
392#else
393static inline int init_func_cmd_traceon(void)
394{
395    return 0;
396}
397#endif /* CONFIG_DYNAMIC_FTRACE */
398
399static __init int init_function_trace(void)
400{
401    init_func_cmd_traceon();
402    return register_tracer(&function_trace);
403}
404device_initcall(init_function_trace);
405
406

Archive Download this file



interactive