Root/
1 | /* |
2 | * Read-Copy Update mechanism for mutual exclusion |
3 | * |
4 | * This program is free software; you can redistribute it and/or modify |
5 | * it under the terms of the GNU General Public License as published by |
6 | * the Free Software Foundation; either version 2 of the License, or |
7 | * (at your option) any later version. |
8 | * |
9 | * This program is distributed in the hope that it will be useful, |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
12 | * GNU General Public License for more details. |
13 | * |
14 | * You should have received a copy of the GNU General Public License |
15 | * along with this program; if not, write to the Free Software |
16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
17 | * |
18 | * Copyright IBM Corporation, 2001 |
19 | * |
20 | * Authors: Dipankar Sarma <dipankar@in.ibm.com> |
21 | * Manfred Spraul <manfred@colorfullife.com> |
22 | * |
23 | * Based on the original work by Paul McKenney <paulmck@us.ibm.com> |
24 | * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. |
25 | * Papers: |
26 | * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf |
27 | * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) |
28 | * |
29 | * For detailed explanation of Read-Copy Update mechanism see - |
30 | * http://lse.sourceforge.net/locking/rcupdate.html |
31 | * |
32 | */ |
33 | #include <linux/types.h> |
34 | #include <linux/kernel.h> |
35 | #include <linux/init.h> |
36 | #include <linux/spinlock.h> |
37 | #include <linux/smp.h> |
38 | #include <linux/interrupt.h> |
39 | #include <linux/sched.h> |
40 | #include <linux/atomic.h> |
41 | #include <linux/bitops.h> |
42 | #include <linux/percpu.h> |
43 | #include <linux/notifier.h> |
44 | #include <linux/cpu.h> |
45 | #include <linux/mutex.h> |
46 | #include <linux/export.h> |
47 | #include <linux/hardirq.h> |
48 | |
49 | #define CREATE_TRACE_POINTS |
50 | #include <trace/events/rcu.h> |
51 | |
52 | #include "rcu.h" |
53 | |
54 | #ifdef CONFIG_PREEMPT_RCU |
55 | |
56 | /* |
57 | * Preemptible RCU implementation for rcu_read_lock(). |
58 | * Just increment ->rcu_read_lock_nesting, shared state will be updated |
59 | * if we block. |
60 | */ |
61 | void __rcu_read_lock(void) |
62 | { |
63 | current->rcu_read_lock_nesting++; |
64 | barrier(); /* critical section after entry code. */ |
65 | } |
66 | EXPORT_SYMBOL_GPL(__rcu_read_lock); |
67 | |
68 | /* |
69 | * Preemptible RCU implementation for rcu_read_unlock(). |
70 | * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost |
71 | * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then |
72 | * invoke rcu_read_unlock_special() to clean up after a context switch |
73 | * in an RCU read-side critical section and other special cases. |
74 | */ |
75 | void __rcu_read_unlock(void) |
76 | { |
77 | struct task_struct *t = current; |
78 | |
79 | if (t->rcu_read_lock_nesting != 1) { |
80 | --t->rcu_read_lock_nesting; |
81 | } else { |
82 | barrier(); /* critical section before exit code. */ |
83 | t->rcu_read_lock_nesting = INT_MIN; |
84 | barrier(); /* assign before ->rcu_read_unlock_special load */ |
85 | if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special))) |
86 | rcu_read_unlock_special(t); |
87 | barrier(); /* ->rcu_read_unlock_special load before assign */ |
88 | t->rcu_read_lock_nesting = 0; |
89 | } |
90 | #ifdef CONFIG_PROVE_LOCKING |
91 | { |
92 | int rrln = ACCESS_ONCE(t->rcu_read_lock_nesting); |
93 | |
94 | WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2); |
95 | } |
96 | #endif /* #ifdef CONFIG_PROVE_LOCKING */ |
97 | } |
98 | EXPORT_SYMBOL_GPL(__rcu_read_unlock); |
99 | |
100 | /* |
101 | * Check for a task exiting while in a preemptible-RCU read-side |
102 | * critical section, clean up if so. No need to issue warnings, |
103 | * as debug_check_no_locks_held() already does this if lockdep |
104 | * is enabled. |
105 | */ |
106 | void exit_rcu(void) |
107 | { |
108 | struct task_struct *t = current; |
109 | |
110 | if (likely(list_empty(¤t->rcu_node_entry))) |
111 | return; |
112 | t->rcu_read_lock_nesting = 1; |
113 | barrier(); |
114 | t->rcu_read_unlock_special = RCU_READ_UNLOCK_BLOCKED; |
115 | __rcu_read_unlock(); |
116 | } |
117 | |
118 | #else /* #ifdef CONFIG_PREEMPT_RCU */ |
119 | |
120 | void exit_rcu(void) |
121 | { |
122 | } |
123 | |
124 | #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ |
125 | |
126 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
127 | static struct lock_class_key rcu_lock_key; |
128 | struct lockdep_map rcu_lock_map = |
129 | STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key); |
130 | EXPORT_SYMBOL_GPL(rcu_lock_map); |
131 | |
132 | static struct lock_class_key rcu_bh_lock_key; |
133 | struct lockdep_map rcu_bh_lock_map = |
134 | STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_bh", &rcu_bh_lock_key); |
135 | EXPORT_SYMBOL_GPL(rcu_bh_lock_map); |
136 | |
137 | static struct lock_class_key rcu_sched_lock_key; |
138 | struct lockdep_map rcu_sched_lock_map = |
139 | STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_sched", &rcu_sched_lock_key); |
140 | EXPORT_SYMBOL_GPL(rcu_sched_lock_map); |
141 | #endif |
142 | |
143 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
144 | |
145 | int debug_lockdep_rcu_enabled(void) |
146 | { |
147 | return rcu_scheduler_active && debug_locks && |
148 | current->lockdep_recursion == 0; |
149 | } |
150 | EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled); |
151 | |
152 | /** |
153 | * rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section? |
154 | * |
155 | * Check for bottom half being disabled, which covers both the |
156 | * CONFIG_PROVE_RCU and not cases. Note that if someone uses |
157 | * rcu_read_lock_bh(), but then later enables BH, lockdep (if enabled) |
158 | * will show the situation. This is useful for debug checks in functions |
159 | * that require that they be called within an RCU read-side critical |
160 | * section. |
161 | * |
162 | * Check debug_lockdep_rcu_enabled() to prevent false positives during boot. |
163 | * |
164 | * Note that rcu_read_lock() is disallowed if the CPU is either idle or |
165 | * offline from an RCU perspective, so check for those as well. |
166 | */ |
167 | int rcu_read_lock_bh_held(void) |
168 | { |
169 | if (!debug_lockdep_rcu_enabled()) |
170 | return 1; |
171 | if (rcu_is_cpu_idle()) |
172 | return 0; |
173 | if (!rcu_lockdep_current_cpu_online()) |
174 | return 0; |
175 | return in_softirq() || irqs_disabled(); |
176 | } |
177 | EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held); |
178 | |
179 | #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ |
180 | |
181 | struct rcu_synchronize { |
182 | struct rcu_head head; |
183 | struct completion completion; |
184 | }; |
185 | |
186 | /* |
187 | * Awaken the corresponding synchronize_rcu() instance now that a |
188 | * grace period has elapsed. |
189 | */ |
190 | static void wakeme_after_rcu(struct rcu_head *head) |
191 | { |
192 | struct rcu_synchronize *rcu; |
193 | |
194 | rcu = container_of(head, struct rcu_synchronize, head); |
195 | complete(&rcu->completion); |
196 | } |
197 | |
198 | void wait_rcu_gp(call_rcu_func_t crf) |
199 | { |
200 | struct rcu_synchronize rcu; |
201 | |
202 | init_rcu_head_on_stack(&rcu.head); |
203 | init_completion(&rcu.completion); |
204 | /* Will wake me after RCU finished. */ |
205 | crf(&rcu.head, wakeme_after_rcu); |
206 | /* Wait for it. */ |
207 | wait_for_completion(&rcu.completion); |
208 | destroy_rcu_head_on_stack(&rcu.head); |
209 | } |
210 | EXPORT_SYMBOL_GPL(wait_rcu_gp); |
211 | |
212 | #ifdef CONFIG_PROVE_RCU |
213 | /* |
214 | * wrapper function to avoid #include problems. |
215 | */ |
216 | int rcu_my_thread_group_empty(void) |
217 | { |
218 | return thread_group_empty(current); |
219 | } |
220 | EXPORT_SYMBOL_GPL(rcu_my_thread_group_empty); |
221 | #endif /* #ifdef CONFIG_PROVE_RCU */ |
222 | |
223 | #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD |
224 | static inline void debug_init_rcu_head(struct rcu_head *head) |
225 | { |
226 | debug_object_init(head, &rcuhead_debug_descr); |
227 | } |
228 | |
229 | static inline void debug_rcu_head_free(struct rcu_head *head) |
230 | { |
231 | debug_object_free(head, &rcuhead_debug_descr); |
232 | } |
233 | |
234 | /* |
235 | * fixup_init is called when: |
236 | * - an active object is initialized |
237 | */ |
238 | static int rcuhead_fixup_init(void *addr, enum debug_obj_state state) |
239 | { |
240 | struct rcu_head *head = addr; |
241 | |
242 | switch (state) { |
243 | case ODEBUG_STATE_ACTIVE: |
244 | /* |
245 | * Ensure that queued callbacks are all executed. |
246 | * If we detect that we are nested in a RCU read-side critical |
247 | * section, we should simply fail, otherwise we would deadlock. |
248 | * In !PREEMPT configurations, there is no way to tell if we are |
249 | * in a RCU read-side critical section or not, so we never |
250 | * attempt any fixup and just print a warning. |
251 | */ |
252 | #ifndef CONFIG_PREEMPT |
253 | WARN_ON_ONCE(1); |
254 | return 0; |
255 | #endif |
256 | if (rcu_preempt_depth() != 0 || preempt_count() != 0 || |
257 | irqs_disabled()) { |
258 | WARN_ON_ONCE(1); |
259 | return 0; |
260 | } |
261 | rcu_barrier(); |
262 | rcu_barrier_sched(); |
263 | rcu_barrier_bh(); |
264 | debug_object_init(head, &rcuhead_debug_descr); |
265 | return 1; |
266 | default: |
267 | return 0; |
268 | } |
269 | } |
270 | |
271 | /* |
272 | * fixup_activate is called when: |
273 | * - an active object is activated |
274 | * - an unknown object is activated (might be a statically initialized object) |
275 | * Activation is performed internally by call_rcu(). |
276 | */ |
277 | static int rcuhead_fixup_activate(void *addr, enum debug_obj_state state) |
278 | { |
279 | struct rcu_head *head = addr; |
280 | |
281 | switch (state) { |
282 | |
283 | case ODEBUG_STATE_NOTAVAILABLE: |
284 | /* |
285 | * This is not really a fixup. We just make sure that it is |
286 | * tracked in the object tracker. |
287 | */ |
288 | debug_object_init(head, &rcuhead_debug_descr); |
289 | debug_object_activate(head, &rcuhead_debug_descr); |
290 | return 0; |
291 | |
292 | case ODEBUG_STATE_ACTIVE: |
293 | /* |
294 | * Ensure that queued callbacks are all executed. |
295 | * If we detect that we are nested in a RCU read-side critical |
296 | * section, we should simply fail, otherwise we would deadlock. |
297 | * In !PREEMPT configurations, there is no way to tell if we are |
298 | * in a RCU read-side critical section or not, so we never |
299 | * attempt any fixup and just print a warning. |
300 | */ |
301 | #ifndef CONFIG_PREEMPT |
302 | WARN_ON_ONCE(1); |
303 | return 0; |
304 | #endif |
305 | if (rcu_preempt_depth() != 0 || preempt_count() != 0 || |
306 | irqs_disabled()) { |
307 | WARN_ON_ONCE(1); |
308 | return 0; |
309 | } |
310 | rcu_barrier(); |
311 | rcu_barrier_sched(); |
312 | rcu_barrier_bh(); |
313 | debug_object_activate(head, &rcuhead_debug_descr); |
314 | return 1; |
315 | default: |
316 | return 0; |
317 | } |
318 | } |
319 | |
320 | /* |
321 | * fixup_free is called when: |
322 | * - an active object is freed |
323 | */ |
324 | static int rcuhead_fixup_free(void *addr, enum debug_obj_state state) |
325 | { |
326 | struct rcu_head *head = addr; |
327 | |
328 | switch (state) { |
329 | case ODEBUG_STATE_ACTIVE: |
330 | /* |
331 | * Ensure that queued callbacks are all executed. |
332 | * If we detect that we are nested in a RCU read-side critical |
333 | * section, we should simply fail, otherwise we would deadlock. |
334 | * In !PREEMPT configurations, there is no way to tell if we are |
335 | * in a RCU read-side critical section or not, so we never |
336 | * attempt any fixup and just print a warning. |
337 | */ |
338 | #ifndef CONFIG_PREEMPT |
339 | WARN_ON_ONCE(1); |
340 | return 0; |
341 | #endif |
342 | if (rcu_preempt_depth() != 0 || preempt_count() != 0 || |
343 | irqs_disabled()) { |
344 | WARN_ON_ONCE(1); |
345 | return 0; |
346 | } |
347 | rcu_barrier(); |
348 | rcu_barrier_sched(); |
349 | rcu_barrier_bh(); |
350 | debug_object_free(head, &rcuhead_debug_descr); |
351 | return 1; |
352 | default: |
353 | return 0; |
354 | } |
355 | } |
356 | |
357 | /** |
358 | * init_rcu_head_on_stack() - initialize on-stack rcu_head for debugobjects |
359 | * @head: pointer to rcu_head structure to be initialized |
360 | * |
361 | * This function informs debugobjects of a new rcu_head structure that |
362 | * has been allocated as an auto variable on the stack. This function |
363 | * is not required for rcu_head structures that are statically defined or |
364 | * that are dynamically allocated on the heap. This function has no |
365 | * effect for !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel builds. |
366 | */ |
367 | void init_rcu_head_on_stack(struct rcu_head *head) |
368 | { |
369 | debug_object_init_on_stack(head, &rcuhead_debug_descr); |
370 | } |
371 | EXPORT_SYMBOL_GPL(init_rcu_head_on_stack); |
372 | |
373 | /** |
374 | * destroy_rcu_head_on_stack() - destroy on-stack rcu_head for debugobjects |
375 | * @head: pointer to rcu_head structure to be initialized |
376 | * |
377 | * This function informs debugobjects that an on-stack rcu_head structure |
378 | * is about to go out of scope. As with init_rcu_head_on_stack(), this |
379 | * function is not required for rcu_head structures that are statically |
380 | * defined or that are dynamically allocated on the heap. Also as with |
381 | * init_rcu_head_on_stack(), this function has no effect for |
382 | * !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel builds. |
383 | */ |
384 | void destroy_rcu_head_on_stack(struct rcu_head *head) |
385 | { |
386 | debug_object_free(head, &rcuhead_debug_descr); |
387 | } |
388 | EXPORT_SYMBOL_GPL(destroy_rcu_head_on_stack); |
389 | |
390 | struct debug_obj_descr rcuhead_debug_descr = { |
391 | .name = "rcu_head", |
392 | .fixup_init = rcuhead_fixup_init, |
393 | .fixup_activate = rcuhead_fixup_activate, |
394 | .fixup_free = rcuhead_fixup_free, |
395 | }; |
396 | EXPORT_SYMBOL_GPL(rcuhead_debug_descr); |
397 | #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ |
398 | |
399 | #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) || defined(CONFIG_RCU_TRACE) |
400 | void do_trace_rcu_torture_read(char *rcutorturename, struct rcu_head *rhp) |
401 | { |
402 | trace_rcu_torture_read(rcutorturename, rhp); |
403 | } |
404 | EXPORT_SYMBOL_GPL(do_trace_rcu_torture_read); |
405 | #else |
406 | #define do_trace_rcu_torture_read(rcutorturename, rhp) do { } while (0) |
407 | #endif |
408 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9