Root/
1 | /* |
2 | * Context tracking: Probe on high level context boundaries such as kernel |
3 | * and userspace. This includes syscalls and exceptions entry/exit. |
4 | * |
5 | * This is used by RCU to remove its dependency on the timer tick while a CPU |
6 | * runs in userspace. |
7 | * |
8 | * Started by Frederic Weisbecker: |
9 | * |
10 | * Copyright (C) 2012 Red Hat, Inc., Frederic Weisbecker <fweisbec@redhat.com> |
11 | * |
12 | * Many thanks to Gilad Ben-Yossef, Paul McKenney, Ingo Molnar, Andrew Morton, |
13 | * Steven Rostedt, Peter Zijlstra for suggestions and improvements. |
14 | * |
15 | */ |
16 | |
17 | #include <linux/context_tracking.h> |
18 | #include <linux/rcupdate.h> |
19 | #include <linux/sched.h> |
20 | #include <linux/hardirq.h> |
21 | #include <linux/export.h> |
22 | #include <linux/kprobes.h> |
23 | |
24 | #define CREATE_TRACE_POINTS |
25 | #include <trace/events/context_tracking.h> |
26 | |
27 | struct static_key context_tracking_enabled = STATIC_KEY_INIT_FALSE; |
28 | EXPORT_SYMBOL_GPL(context_tracking_enabled); |
29 | |
30 | DEFINE_PER_CPU(struct context_tracking, context_tracking); |
31 | EXPORT_SYMBOL_GPL(context_tracking); |
32 | |
33 | void context_tracking_cpu_set(int cpu) |
34 | { |
35 | if (!per_cpu(context_tracking.active, cpu)) { |
36 | per_cpu(context_tracking.active, cpu) = true; |
37 | static_key_slow_inc(&context_tracking_enabled); |
38 | } |
39 | } |
40 | |
41 | /** |
42 | * context_tracking_user_enter - Inform the context tracking that the CPU is going to |
43 | * enter userspace mode. |
44 | * |
45 | * This function must be called right before we switch from the kernel |
46 | * to userspace, when it's guaranteed the remaining kernel instructions |
47 | * to execute won't use any RCU read side critical section because this |
48 | * function sets RCU in extended quiescent state. |
49 | */ |
50 | void context_tracking_user_enter(void) |
51 | { |
52 | unsigned long flags; |
53 | |
54 | /* |
55 | * Repeat the user_enter() check here because some archs may be calling |
56 | * this from asm and if no CPU needs context tracking, they shouldn't |
57 | * go further. Repeat the check here until they support the inline static |
58 | * key check. |
59 | */ |
60 | if (!context_tracking_is_enabled()) |
61 | return; |
62 | |
63 | /* |
64 | * Some contexts may involve an exception occuring in an irq, |
65 | * leading to that nesting: |
66 | * rcu_irq_enter() rcu_user_exit() rcu_user_exit() rcu_irq_exit() |
67 | * This would mess up the dyntick_nesting count though. And rcu_irq_*() |
68 | * helpers are enough to protect RCU uses inside the exception. So |
69 | * just return immediately if we detect we are in an IRQ. |
70 | */ |
71 | if (in_interrupt()) |
72 | return; |
73 | |
74 | /* Kernel threads aren't supposed to go to userspace */ |
75 | WARN_ON_ONCE(!current->mm); |
76 | |
77 | local_irq_save(flags); |
78 | if ( __this_cpu_read(context_tracking.state) != IN_USER) { |
79 | if (__this_cpu_read(context_tracking.active)) { |
80 | trace_user_enter(0); |
81 | /* |
82 | * At this stage, only low level arch entry code remains and |
83 | * then we'll run in userspace. We can assume there won't be |
84 | * any RCU read-side critical section until the next call to |
85 | * user_exit() or rcu_irq_enter(). Let's remove RCU's dependency |
86 | * on the tick. |
87 | */ |
88 | vtime_user_enter(current); |
89 | rcu_user_enter(); |
90 | } |
91 | /* |
92 | * Even if context tracking is disabled on this CPU, because it's outside |
93 | * the full dynticks mask for example, we still have to keep track of the |
94 | * context transitions and states to prevent inconsistency on those of |
95 | * other CPUs. |
96 | * If a task triggers an exception in userspace, sleep on the exception |
97 | * handler and then migrate to another CPU, that new CPU must know where |
98 | * the exception returns by the time we call exception_exit(). |
99 | * This information can only be provided by the previous CPU when it called |
100 | * exception_enter(). |
101 | * OTOH we can spare the calls to vtime and RCU when context_tracking.active |
102 | * is false because we know that CPU is not tickless. |
103 | */ |
104 | __this_cpu_write(context_tracking.state, IN_USER); |
105 | } |
106 | local_irq_restore(flags); |
107 | } |
108 | NOKPROBE_SYMBOL(context_tracking_user_enter); |
109 | |
110 | #ifdef CONFIG_PREEMPT |
111 | /** |
112 | * preempt_schedule_context - preempt_schedule called by tracing |
113 | * |
114 | * The tracing infrastructure uses preempt_enable_notrace to prevent |
115 | * recursion and tracing preempt enabling caused by the tracing |
116 | * infrastructure itself. But as tracing can happen in areas coming |
117 | * from userspace or just about to enter userspace, a preempt enable |
118 | * can occur before user_exit() is called. This will cause the scheduler |
119 | * to be called when the system is still in usermode. |
120 | * |
121 | * To prevent this, the preempt_enable_notrace will use this function |
122 | * instead of preempt_schedule() to exit user context if needed before |
123 | * calling the scheduler. |
124 | */ |
125 | asmlinkage __visible void __sched notrace preempt_schedule_context(void) |
126 | { |
127 | enum ctx_state prev_ctx; |
128 | |
129 | if (likely(!preemptible())) |
130 | return; |
131 | |
132 | /* |
133 | * Need to disable preemption in case user_exit() is traced |
134 | * and the tracer calls preempt_enable_notrace() causing |
135 | * an infinite recursion. |
136 | */ |
137 | preempt_disable_notrace(); |
138 | prev_ctx = exception_enter(); |
139 | preempt_enable_no_resched_notrace(); |
140 | |
141 | preempt_schedule(); |
142 | |
143 | preempt_disable_notrace(); |
144 | exception_exit(prev_ctx); |
145 | preempt_enable_notrace(); |
146 | } |
147 | EXPORT_SYMBOL_GPL(preempt_schedule_context); |
148 | #endif /* CONFIG_PREEMPT */ |
149 | |
150 | /** |
151 | * context_tracking_user_exit - Inform the context tracking that the CPU is |
152 | * exiting userspace mode and entering the kernel. |
153 | * |
154 | * This function must be called after we entered the kernel from userspace |
155 | * before any use of RCU read side critical section. This potentially include |
156 | * any high level kernel code like syscalls, exceptions, signal handling, etc... |
157 | * |
158 | * This call supports re-entrancy. This way it can be called from any exception |
159 | * handler without needing to know if we came from userspace or not. |
160 | */ |
161 | void context_tracking_user_exit(void) |
162 | { |
163 | unsigned long flags; |
164 | |
165 | if (!context_tracking_is_enabled()) |
166 | return; |
167 | |
168 | if (in_interrupt()) |
169 | return; |
170 | |
171 | local_irq_save(flags); |
172 | if (__this_cpu_read(context_tracking.state) == IN_USER) { |
173 | if (__this_cpu_read(context_tracking.active)) { |
174 | /* |
175 | * We are going to run code that may use RCU. Inform |
176 | * RCU core about that (ie: we may need the tick again). |
177 | */ |
178 | rcu_user_exit(); |
179 | vtime_user_exit(current); |
180 | trace_user_exit(0); |
181 | } |
182 | __this_cpu_write(context_tracking.state, IN_KERNEL); |
183 | } |
184 | local_irq_restore(flags); |
185 | } |
186 | NOKPROBE_SYMBOL(context_tracking_user_exit); |
187 | |
188 | /** |
189 | * __context_tracking_task_switch - context switch the syscall callbacks |
190 | * @prev: the task that is being switched out |
191 | * @next: the task that is being switched in |
192 | * |
193 | * The context tracking uses the syscall slow path to implement its user-kernel |
194 | * boundaries probes on syscalls. This way it doesn't impact the syscall fast |
195 | * path on CPUs that don't do context tracking. |
196 | * |
197 | * But we need to clear the flag on the previous task because it may later |
198 | * migrate to some CPU that doesn't do the context tracking. As such the TIF |
199 | * flag may not be desired there. |
200 | */ |
201 | void __context_tracking_task_switch(struct task_struct *prev, |
202 | struct task_struct *next) |
203 | { |
204 | clear_tsk_thread_flag(prev, TIF_NOHZ); |
205 | set_tsk_thread_flag(next, TIF_NOHZ); |
206 | } |
207 | |
208 | #ifdef CONFIG_CONTEXT_TRACKING_FORCE |
209 | void __init context_tracking_init(void) |
210 | { |
211 | int cpu; |
212 | |
213 | for_each_possible_cpu(cpu) |
214 | context_tracking_cpu_set(cpu); |
215 | } |
216 | #endif |
217 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9