Root/
1 | #ifndef _LINUX_FTRACE_EVENT_H |
2 | #define _LINUX_FTRACE_EVENT_H |
3 | |
4 | #include <linux/ring_buffer.h> |
5 | #include <linux/trace_seq.h> |
6 | #include <linux/percpu.h> |
7 | #include <linux/hardirq.h> |
8 | #include <linux/perf_event.h> |
9 | |
10 | struct trace_array; |
11 | struct tracer; |
12 | struct dentry; |
13 | |
14 | DECLARE_PER_CPU(struct trace_seq, ftrace_event_seq); |
15 | |
16 | struct trace_print_flags { |
17 | unsigned long mask; |
18 | const char *name; |
19 | }; |
20 | |
21 | const char *ftrace_print_flags_seq(struct trace_seq *p, const char *delim, |
22 | unsigned long flags, |
23 | const struct trace_print_flags *flag_array); |
24 | |
25 | const char *ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val, |
26 | const struct trace_print_flags *symbol_array); |
27 | |
28 | /* |
29 | * The trace entry - the most basic unit of tracing. This is what |
30 | * is printed in the end as a single line in the trace output, such as: |
31 | * |
32 | * bash-15816 [01] 235.197585: idle_cpu <- irq_enter |
33 | */ |
34 | struct trace_entry { |
35 | unsigned short type; |
36 | unsigned char flags; |
37 | unsigned char preempt_count; |
38 | int pid; |
39 | int lock_depth; |
40 | }; |
41 | |
42 | #define FTRACE_MAX_EVENT \ |
43 | ((1 << (sizeof(((struct trace_entry *)0)->type) * 8)) - 1) |
44 | |
45 | /* |
46 | * Trace iterator - used by printout routines who present trace |
47 | * results to users and which routines might sleep, etc: |
48 | */ |
49 | struct trace_iterator { |
50 | struct trace_array *tr; |
51 | struct tracer *trace; |
52 | void *private; |
53 | int cpu_file; |
54 | struct mutex mutex; |
55 | struct ring_buffer_iter *buffer_iter[NR_CPUS]; |
56 | unsigned long iter_flags; |
57 | |
58 | /* The below is zeroed out in pipe_read */ |
59 | struct trace_seq seq; |
60 | struct trace_entry *ent; |
61 | int leftover; |
62 | int cpu; |
63 | u64 ts; |
64 | |
65 | loff_t pos; |
66 | long idx; |
67 | |
68 | cpumask_var_t started; |
69 | }; |
70 | |
71 | |
72 | typedef enum print_line_t (*trace_print_func)(struct trace_iterator *iter, |
73 | int flags); |
74 | struct trace_event { |
75 | struct hlist_node node; |
76 | struct list_head list; |
77 | int type; |
78 | trace_print_func trace; |
79 | trace_print_func raw; |
80 | trace_print_func hex; |
81 | trace_print_func binary; |
82 | }; |
83 | |
84 | extern int register_ftrace_event(struct trace_event *event); |
85 | extern int unregister_ftrace_event(struct trace_event *event); |
86 | |
87 | /* Return values for print_line callback */ |
88 | enum print_line_t { |
89 | TRACE_TYPE_PARTIAL_LINE = 0, /* Retry after flushing the seq */ |
90 | TRACE_TYPE_HANDLED = 1, |
91 | TRACE_TYPE_UNHANDLED = 2, /* Relay to other output functions */ |
92 | TRACE_TYPE_NO_CONSUME = 3 /* Handled but ask to not consume */ |
93 | }; |
94 | |
95 | void tracing_generic_entry_update(struct trace_entry *entry, |
96 | unsigned long flags, |
97 | int pc); |
98 | struct ring_buffer_event * |
99 | trace_current_buffer_lock_reserve(struct ring_buffer **current_buffer, |
100 | int type, unsigned long len, |
101 | unsigned long flags, int pc); |
102 | void trace_current_buffer_unlock_commit(struct ring_buffer *buffer, |
103 | struct ring_buffer_event *event, |
104 | unsigned long flags, int pc); |
105 | void trace_nowake_buffer_unlock_commit(struct ring_buffer *buffer, |
106 | struct ring_buffer_event *event, |
107 | unsigned long flags, int pc); |
108 | void trace_current_buffer_discard_commit(struct ring_buffer *buffer, |
109 | struct ring_buffer_event *event); |
110 | |
111 | void tracing_record_cmdline(struct task_struct *tsk); |
112 | |
113 | struct event_filter; |
114 | |
115 | struct ftrace_event_call { |
116 | struct list_head list; |
117 | char *name; |
118 | char *system; |
119 | struct dentry *dir; |
120 | struct trace_event *event; |
121 | int enabled; |
122 | int (*regfunc)(struct ftrace_event_call *); |
123 | void (*unregfunc)(struct ftrace_event_call *); |
124 | int id; |
125 | const char *print_fmt; |
126 | int (*raw_init)(struct ftrace_event_call *); |
127 | int (*define_fields)(struct ftrace_event_call *); |
128 | struct list_head fields; |
129 | int filter_active; |
130 | struct event_filter *filter; |
131 | void *mod; |
132 | void *data; |
133 | |
134 | int perf_refcount; |
135 | int (*perf_event_enable)(struct ftrace_event_call *); |
136 | void (*perf_event_disable)(struct ftrace_event_call *); |
137 | }; |
138 | |
139 | #define PERF_MAX_TRACE_SIZE 2048 |
140 | |
141 | #define MAX_FILTER_PRED 32 |
142 | #define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */ |
143 | |
144 | extern void destroy_preds(struct ftrace_event_call *call); |
145 | extern int filter_match_preds(struct event_filter *filter, void *rec); |
146 | extern int filter_current_check_discard(struct ring_buffer *buffer, |
147 | struct ftrace_event_call *call, |
148 | void *rec, |
149 | struct ring_buffer_event *event); |
150 | |
151 | enum { |
152 | FILTER_OTHER = 0, |
153 | FILTER_STATIC_STRING, |
154 | FILTER_DYN_STRING, |
155 | FILTER_PTR_STRING, |
156 | }; |
157 | |
158 | extern int trace_event_raw_init(struct ftrace_event_call *call); |
159 | extern int trace_define_field(struct ftrace_event_call *call, const char *type, |
160 | const char *name, int offset, int size, |
161 | int is_signed, int filter_type); |
162 | extern int trace_add_event_call(struct ftrace_event_call *call); |
163 | extern void trace_remove_event_call(struct ftrace_event_call *call); |
164 | |
165 | #define is_signed_type(type) (((type)(-1)) < 0) |
166 | |
167 | int trace_set_clr_event(const char *system, const char *event, int set); |
168 | |
169 | /* |
170 | * The double __builtin_constant_p is because gcc will give us an error |
171 | * if we try to allocate the static variable to fmt if it is not a |
172 | * constant. Even with the outer if statement optimizing out. |
173 | */ |
174 | #define event_trace_printk(ip, fmt, args...) \ |
175 | do { \ |
176 | __trace_printk_check_format(fmt, ##args); \ |
177 | tracing_record_cmdline(current); \ |
178 | if (__builtin_constant_p(fmt)) { \ |
179 | static const char *trace_printk_fmt \ |
180 | __attribute__((section("__trace_printk_fmt"))) = \ |
181 | __builtin_constant_p(fmt) ? fmt : NULL; \ |
182 | \ |
183 | __trace_bprintk(ip, trace_printk_fmt, ##args); \ |
184 | } else \ |
185 | __trace_printk(ip, fmt, ##args); \ |
186 | } while (0) |
187 | |
188 | #ifdef CONFIG_PERF_EVENTS |
189 | struct perf_event; |
190 | |
191 | DECLARE_PER_CPU(struct pt_regs, perf_trace_regs); |
192 | |
193 | extern int perf_trace_enable(int event_id); |
194 | extern void perf_trace_disable(int event_id); |
195 | extern int ftrace_profile_set_filter(struct perf_event *event, int event_id, |
196 | char *filter_str); |
197 | extern void ftrace_profile_free_filter(struct perf_event *event); |
198 | extern void * |
199 | perf_trace_buf_prepare(int size, unsigned short type, int *rctxp, |
200 | unsigned long *irq_flags); |
201 | |
202 | static inline void |
203 | perf_trace_buf_submit(void *raw_data, int size, int rctx, u64 addr, |
204 | u64 count, unsigned long irq_flags, struct pt_regs *regs) |
205 | { |
206 | struct trace_entry *entry = raw_data; |
207 | |
208 | perf_tp_event(entry->type, addr, count, raw_data, size, regs); |
209 | perf_swevent_put_recursion_context(rctx); |
210 | local_irq_restore(irq_flags); |
211 | } |
212 | #endif |
213 | |
214 | #endif /* _LINUX_FTRACE_EVENT_H */ |
215 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9