Root/kernel/trace/trace_sched_switch.c

1/*
2 * trace context switch
3 *
4 * Copyright (C) 2007 Steven Rostedt <srostedt@redhat.com>
5 *
6 */
7#include <linux/module.h>
8#include <linux/fs.h>
9#include <linux/debugfs.h>
10#include <linux/kallsyms.h>
11#include <linux/uaccess.h>
12#include <linux/ftrace.h>
13#include <trace/events/sched.h>
14
15#include "trace.h"
16
17static struct trace_array *ctx_trace;
18static int __read_mostly tracer_enabled;
19static int sched_ref;
20static DEFINE_MUTEX(sched_register_mutex);
21static int sched_stopped;
22
23
24void
25tracing_sched_switch_trace(struct trace_array *tr,
26               struct task_struct *prev,
27               struct task_struct *next,
28               unsigned long flags, int pc)
29{
30    struct ftrace_event_call *call = &event_context_switch;
31    struct ring_buffer *buffer = tr->buffer;
32    struct ring_buffer_event *event;
33    struct ctx_switch_entry *entry;
34
35    event = trace_buffer_lock_reserve(buffer, TRACE_CTX,
36                      sizeof(*entry), flags, pc);
37    if (!event)
38        return;
39    entry = ring_buffer_event_data(event);
40    entry->prev_pid = prev->pid;
41    entry->prev_prio = prev->prio;
42    entry->prev_state = prev->state;
43    entry->next_pid = next->pid;
44    entry->next_prio = next->prio;
45    entry->next_state = next->state;
46    entry->next_cpu = task_cpu(next);
47
48    if (!filter_check_discard(call, entry, buffer, event))
49        trace_buffer_unlock_commit(buffer, event, flags, pc);
50}
51
52static void
53probe_sched_switch(void *ignore, struct task_struct *prev, struct task_struct *next)
54{
55    struct trace_array_cpu *data;
56    unsigned long flags;
57    int cpu;
58    int pc;
59
60    if (unlikely(!sched_ref))
61        return;
62
63    tracing_record_cmdline(prev);
64    tracing_record_cmdline(next);
65
66    if (!tracer_enabled || sched_stopped)
67        return;
68
69    pc = preempt_count();
70    local_irq_save(flags);
71    cpu = raw_smp_processor_id();
72    data = ctx_trace->data[cpu];
73
74    if (likely(!atomic_read(&data->disabled)))
75        tracing_sched_switch_trace(ctx_trace, prev, next, flags, pc);
76
77    local_irq_restore(flags);
78}
79
80void
81tracing_sched_wakeup_trace(struct trace_array *tr,
82               struct task_struct *wakee,
83               struct task_struct *curr,
84               unsigned long flags, int pc)
85{
86    struct ftrace_event_call *call = &event_wakeup;
87    struct ring_buffer_event *event;
88    struct ctx_switch_entry *entry;
89    struct ring_buffer *buffer = tr->buffer;
90
91    event = trace_buffer_lock_reserve(buffer, TRACE_WAKE,
92                      sizeof(*entry), flags, pc);
93    if (!event)
94        return;
95    entry = ring_buffer_event_data(event);
96    entry->prev_pid = curr->pid;
97    entry->prev_prio = curr->prio;
98    entry->prev_state = curr->state;
99    entry->next_pid = wakee->pid;
100    entry->next_prio = wakee->prio;
101    entry->next_state = wakee->state;
102    entry->next_cpu = task_cpu(wakee);
103
104    if (!filter_check_discard(call, entry, buffer, event))
105        ring_buffer_unlock_commit(buffer, event);
106    ftrace_trace_stack(tr->buffer, flags, 6, pc);
107    ftrace_trace_userstack(tr->buffer, flags, pc);
108}
109
110static void
111probe_sched_wakeup(void *ignore, struct task_struct *wakee, int success)
112{
113    struct trace_array_cpu *data;
114    unsigned long flags;
115    int cpu, pc;
116
117    if (unlikely(!sched_ref))
118        return;
119
120    tracing_record_cmdline(current);
121
122    if (!tracer_enabled || sched_stopped)
123        return;
124
125    pc = preempt_count();
126    local_irq_save(flags);
127    cpu = raw_smp_processor_id();
128    data = ctx_trace->data[cpu];
129
130    if (likely(!atomic_read(&data->disabled)))
131        tracing_sched_wakeup_trace(ctx_trace, wakee, current,
132                       flags, pc);
133
134    local_irq_restore(flags);
135}
136
137static int tracing_sched_register(void)
138{
139    int ret;
140
141    ret = register_trace_sched_wakeup(probe_sched_wakeup, NULL);
142    if (ret) {
143        pr_info("wakeup trace: Couldn't activate tracepoint"
144            " probe to kernel_sched_wakeup\n");
145        return ret;
146    }
147
148    ret = register_trace_sched_wakeup_new(probe_sched_wakeup, NULL);
149    if (ret) {
150        pr_info("wakeup trace: Couldn't activate tracepoint"
151            " probe to kernel_sched_wakeup_new\n");
152        goto fail_deprobe;
153    }
154
155    ret = register_trace_sched_switch(probe_sched_switch, NULL);
156    if (ret) {
157        pr_info("sched trace: Couldn't activate tracepoint"
158            " probe to kernel_sched_switch\n");
159        goto fail_deprobe_wake_new;
160    }
161
162    return ret;
163fail_deprobe_wake_new:
164    unregister_trace_sched_wakeup_new(probe_sched_wakeup, NULL);
165fail_deprobe:
166    unregister_trace_sched_wakeup(probe_sched_wakeup, NULL);
167    return ret;
168}
169
170static void tracing_sched_unregister(void)
171{
172    unregister_trace_sched_switch(probe_sched_switch, NULL);
173    unregister_trace_sched_wakeup_new(probe_sched_wakeup, NULL);
174    unregister_trace_sched_wakeup(probe_sched_wakeup, NULL);
175}
176
177static void tracing_start_sched_switch(void)
178{
179    mutex_lock(&sched_register_mutex);
180    if (!(sched_ref++))
181        tracing_sched_register();
182    mutex_unlock(&sched_register_mutex);
183}
184
185static void tracing_stop_sched_switch(void)
186{
187    mutex_lock(&sched_register_mutex);
188    if (!(--sched_ref))
189        tracing_sched_unregister();
190    mutex_unlock(&sched_register_mutex);
191}
192
193void tracing_start_cmdline_record(void)
194{
195    tracing_start_sched_switch();
196}
197
198void tracing_stop_cmdline_record(void)
199{
200    tracing_stop_sched_switch();
201}
202
203/**
204 * tracing_start_sched_switch_record - start tracing context switches
205 *
206 * Turns on context switch tracing for a tracer.
207 */
208void tracing_start_sched_switch_record(void)
209{
210    if (unlikely(!ctx_trace)) {
211        WARN_ON(1);
212        return;
213    }
214
215    tracing_start_sched_switch();
216
217    mutex_lock(&sched_register_mutex);
218    tracer_enabled++;
219    mutex_unlock(&sched_register_mutex);
220}
221
222/**
223 * tracing_stop_sched_switch_record - start tracing context switches
224 *
225 * Turns off context switch tracing for a tracer.
226 */
227void tracing_stop_sched_switch_record(void)
228{
229    mutex_lock(&sched_register_mutex);
230    tracer_enabled--;
231    WARN_ON(tracer_enabled < 0);
232    mutex_unlock(&sched_register_mutex);
233
234    tracing_stop_sched_switch();
235}
236
237/**
238 * tracing_sched_switch_assign_trace - assign a trace array for ctx switch
239 * @tr: trace array pointer to assign
240 *
241 * Some tracers might want to record the context switches in their
242 * trace. This function lets those tracers assign the trace array
243 * to use.
244 */
245void tracing_sched_switch_assign_trace(struct trace_array *tr)
246{
247    ctx_trace = tr;
248}
249
250

Archive Download this file



interactive