Root/
1 | /* |
2 | * trace context switch |
3 | * |
4 | * Copyright (C) 2007 Steven Rostedt <srostedt@redhat.com> |
5 | * |
6 | */ |
7 | #include <linux/module.h> |
8 | #include <linux/fs.h> |
9 | #include <linux/debugfs.h> |
10 | #include <linux/kallsyms.h> |
11 | #include <linux/uaccess.h> |
12 | #include <linux/ftrace.h> |
13 | #include <trace/events/sched.h> |
14 | |
15 | #include "trace.h" |
16 | |
17 | static struct trace_array *ctx_trace; |
18 | static int __read_mostly tracer_enabled; |
19 | static int sched_ref; |
20 | static DEFINE_MUTEX(sched_register_mutex); |
21 | static int sched_stopped; |
22 | |
23 | |
24 | void |
25 | tracing_sched_switch_trace(struct trace_array *tr, |
26 | struct task_struct *prev, |
27 | struct task_struct *next, |
28 | unsigned long flags, int pc) |
29 | { |
30 | struct ftrace_event_call *call = &event_context_switch; |
31 | struct ring_buffer *buffer = tr->buffer; |
32 | struct ring_buffer_event *event; |
33 | struct ctx_switch_entry *entry; |
34 | |
35 | event = trace_buffer_lock_reserve(buffer, TRACE_CTX, |
36 | sizeof(*entry), flags, pc); |
37 | if (!event) |
38 | return; |
39 | entry = ring_buffer_event_data(event); |
40 | entry->prev_pid = prev->pid; |
41 | entry->prev_prio = prev->prio; |
42 | entry->prev_state = prev->state; |
43 | entry->next_pid = next->pid; |
44 | entry->next_prio = next->prio; |
45 | entry->next_state = next->state; |
46 | entry->next_cpu = task_cpu(next); |
47 | |
48 | if (!filter_check_discard(call, entry, buffer, event)) |
49 | trace_buffer_unlock_commit(buffer, event, flags, pc); |
50 | } |
51 | |
52 | static void |
53 | probe_sched_switch(void *ignore, struct task_struct *prev, struct task_struct *next) |
54 | { |
55 | struct trace_array_cpu *data; |
56 | unsigned long flags; |
57 | int cpu; |
58 | int pc; |
59 | |
60 | if (unlikely(!sched_ref)) |
61 | return; |
62 | |
63 | tracing_record_cmdline(prev); |
64 | tracing_record_cmdline(next); |
65 | |
66 | if (!tracer_enabled || sched_stopped) |
67 | return; |
68 | |
69 | pc = preempt_count(); |
70 | local_irq_save(flags); |
71 | cpu = raw_smp_processor_id(); |
72 | data = ctx_trace->data[cpu]; |
73 | |
74 | if (likely(!atomic_read(&data->disabled))) |
75 | tracing_sched_switch_trace(ctx_trace, prev, next, flags, pc); |
76 | |
77 | local_irq_restore(flags); |
78 | } |
79 | |
80 | void |
81 | tracing_sched_wakeup_trace(struct trace_array *tr, |
82 | struct task_struct *wakee, |
83 | struct task_struct *curr, |
84 | unsigned long flags, int pc) |
85 | { |
86 | struct ftrace_event_call *call = &event_wakeup; |
87 | struct ring_buffer_event *event; |
88 | struct ctx_switch_entry *entry; |
89 | struct ring_buffer *buffer = tr->buffer; |
90 | |
91 | event = trace_buffer_lock_reserve(buffer, TRACE_WAKE, |
92 | sizeof(*entry), flags, pc); |
93 | if (!event) |
94 | return; |
95 | entry = ring_buffer_event_data(event); |
96 | entry->prev_pid = curr->pid; |
97 | entry->prev_prio = curr->prio; |
98 | entry->prev_state = curr->state; |
99 | entry->next_pid = wakee->pid; |
100 | entry->next_prio = wakee->prio; |
101 | entry->next_state = wakee->state; |
102 | entry->next_cpu = task_cpu(wakee); |
103 | |
104 | if (!filter_check_discard(call, entry, buffer, event)) |
105 | ring_buffer_unlock_commit(buffer, event); |
106 | ftrace_trace_stack(tr->buffer, flags, 6, pc); |
107 | ftrace_trace_userstack(tr->buffer, flags, pc); |
108 | } |
109 | |
110 | static void |
111 | probe_sched_wakeup(void *ignore, struct task_struct *wakee, int success) |
112 | { |
113 | struct trace_array_cpu *data; |
114 | unsigned long flags; |
115 | int cpu, pc; |
116 | |
117 | if (unlikely(!sched_ref)) |
118 | return; |
119 | |
120 | tracing_record_cmdline(current); |
121 | |
122 | if (!tracer_enabled || sched_stopped) |
123 | return; |
124 | |
125 | pc = preempt_count(); |
126 | local_irq_save(flags); |
127 | cpu = raw_smp_processor_id(); |
128 | data = ctx_trace->data[cpu]; |
129 | |
130 | if (likely(!atomic_read(&data->disabled))) |
131 | tracing_sched_wakeup_trace(ctx_trace, wakee, current, |
132 | flags, pc); |
133 | |
134 | local_irq_restore(flags); |
135 | } |
136 | |
137 | static int tracing_sched_register(void) |
138 | { |
139 | int ret; |
140 | |
141 | ret = register_trace_sched_wakeup(probe_sched_wakeup, NULL); |
142 | if (ret) { |
143 | pr_info("wakeup trace: Couldn't activate tracepoint" |
144 | " probe to kernel_sched_wakeup\n"); |
145 | return ret; |
146 | } |
147 | |
148 | ret = register_trace_sched_wakeup_new(probe_sched_wakeup, NULL); |
149 | if (ret) { |
150 | pr_info("wakeup trace: Couldn't activate tracepoint" |
151 | " probe to kernel_sched_wakeup_new\n"); |
152 | goto fail_deprobe; |
153 | } |
154 | |
155 | ret = register_trace_sched_switch(probe_sched_switch, NULL); |
156 | if (ret) { |
157 | pr_info("sched trace: Couldn't activate tracepoint" |
158 | " probe to kernel_sched_switch\n"); |
159 | goto fail_deprobe_wake_new; |
160 | } |
161 | |
162 | return ret; |
163 | fail_deprobe_wake_new: |
164 | unregister_trace_sched_wakeup_new(probe_sched_wakeup, NULL); |
165 | fail_deprobe: |
166 | unregister_trace_sched_wakeup(probe_sched_wakeup, NULL); |
167 | return ret; |
168 | } |
169 | |
170 | static void tracing_sched_unregister(void) |
171 | { |
172 | unregister_trace_sched_switch(probe_sched_switch, NULL); |
173 | unregister_trace_sched_wakeup_new(probe_sched_wakeup, NULL); |
174 | unregister_trace_sched_wakeup(probe_sched_wakeup, NULL); |
175 | } |
176 | |
177 | static void tracing_start_sched_switch(void) |
178 | { |
179 | mutex_lock(&sched_register_mutex); |
180 | if (!(sched_ref++)) |
181 | tracing_sched_register(); |
182 | mutex_unlock(&sched_register_mutex); |
183 | } |
184 | |
185 | static void tracing_stop_sched_switch(void) |
186 | { |
187 | mutex_lock(&sched_register_mutex); |
188 | if (!(--sched_ref)) |
189 | tracing_sched_unregister(); |
190 | mutex_unlock(&sched_register_mutex); |
191 | } |
192 | |
193 | void tracing_start_cmdline_record(void) |
194 | { |
195 | tracing_start_sched_switch(); |
196 | } |
197 | |
198 | void tracing_stop_cmdline_record(void) |
199 | { |
200 | tracing_stop_sched_switch(); |
201 | } |
202 | |
203 | /** |
204 | * tracing_start_sched_switch_record - start tracing context switches |
205 | * |
206 | * Turns on context switch tracing for a tracer. |
207 | */ |
208 | void tracing_start_sched_switch_record(void) |
209 | { |
210 | if (unlikely(!ctx_trace)) { |
211 | WARN_ON(1); |
212 | return; |
213 | } |
214 | |
215 | tracing_start_sched_switch(); |
216 | |
217 | mutex_lock(&sched_register_mutex); |
218 | tracer_enabled++; |
219 | mutex_unlock(&sched_register_mutex); |
220 | } |
221 | |
222 | /** |
223 | * tracing_stop_sched_switch_record - start tracing context switches |
224 | * |
225 | * Turns off context switch tracing for a tracer. |
226 | */ |
227 | void tracing_stop_sched_switch_record(void) |
228 | { |
229 | mutex_lock(&sched_register_mutex); |
230 | tracer_enabled--; |
231 | WARN_ON(tracer_enabled < 0); |
232 | mutex_unlock(&sched_register_mutex); |
233 | |
234 | tracing_stop_sched_switch(); |
235 | } |
236 | |
237 | /** |
238 | * tracing_sched_switch_assign_trace - assign a trace array for ctx switch |
239 | * @tr: trace array pointer to assign |
240 | * |
241 | * Some tracers might want to record the context switches in their |
242 | * trace. This function lets those tracers assign the trace array |
243 | * to use. |
244 | */ |
245 | void tracing_sched_switch_assign_trace(struct trace_array *tr) |
246 | { |
247 | ctx_trace = tr; |
248 | } |
249 | |
250 | static void stop_sched_trace(struct trace_array *tr) |
251 | { |
252 | tracing_stop_sched_switch_record(); |
253 | } |
254 | |
255 | static int sched_switch_trace_init(struct trace_array *tr) |
256 | { |
257 | ctx_trace = tr; |
258 | tracing_reset_online_cpus(tr); |
259 | tracing_start_sched_switch_record(); |
260 | return 0; |
261 | } |
262 | |
263 | static void sched_switch_trace_reset(struct trace_array *tr) |
264 | { |
265 | if (sched_ref) |
266 | stop_sched_trace(tr); |
267 | } |
268 | |
269 | static void sched_switch_trace_start(struct trace_array *tr) |
270 | { |
271 | sched_stopped = 0; |
272 | } |
273 | |
274 | static void sched_switch_trace_stop(struct trace_array *tr) |
275 | { |
276 | sched_stopped = 1; |
277 | } |
278 | |
279 | static struct tracer sched_switch_trace __read_mostly = |
280 | { |
281 | .name = "sched_switch", |
282 | .init = sched_switch_trace_init, |
283 | .reset = sched_switch_trace_reset, |
284 | .start = sched_switch_trace_start, |
285 | .stop = sched_switch_trace_stop, |
286 | .wait_pipe = poll_wait_pipe, |
287 | #ifdef CONFIG_FTRACE_SELFTEST |
288 | .selftest = trace_selftest_startup_sched_switch, |
289 | #endif |
290 | }; |
291 | |
292 | __init static int init_sched_switch_trace(void) |
293 | { |
294 | return register_tracer(&sched_switch_trace); |
295 | } |
296 | device_initcall(init_sched_switch_trace); |
297 | |
298 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9