Root/
1 | /* |
2 | * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition. |
3 | * |
4 | * This program is free software; you can redistribute it and/or modify |
5 | * it under the terms of the GNU General Public License as published by |
6 | * the Free Software Foundation; either version 2 of the License, or |
7 | * (at your option) any later version. |
8 | * |
9 | * This program is distributed in the hope that it will be useful, |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
12 | * GNU General Public License for more details. |
13 | * |
14 | * You should have received a copy of the GNU General Public License |
15 | * along with this program; if not, write to the Free Software |
16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
17 | * |
18 | * Copyright IBM Corporation, 2008 |
19 | * |
20 | * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com> |
21 | * |
22 | * For detailed explanation of Read-Copy Update mechanism see - |
23 | * Documentation/RCU |
24 | */ |
25 | #include <linux/completion.h> |
26 | #include <linux/interrupt.h> |
27 | #include <linux/notifier.h> |
28 | #include <linux/rcupdate.h> |
29 | #include <linux/kernel.h> |
30 | #include <linux/export.h> |
31 | #include <linux/mutex.h> |
32 | #include <linux/sched.h> |
33 | #include <linux/types.h> |
34 | #include <linux/init.h> |
35 | #include <linux/time.h> |
36 | #include <linux/cpu.h> |
37 | #include <linux/prefetch.h> |
38 | |
39 | #ifdef CONFIG_RCU_TRACE |
40 | #include <trace/events/rcu.h> |
41 | #endif /* #else #ifdef CONFIG_RCU_TRACE */ |
42 | |
43 | #include "rcu.h" |
44 | |
45 | /* Forward declarations for rcutiny_plugin.h. */ |
46 | struct rcu_ctrlblk; |
47 | static void invoke_rcu_callbacks(void); |
48 | static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp); |
49 | static void rcu_process_callbacks(struct softirq_action *unused); |
50 | static void __call_rcu(struct rcu_head *head, |
51 | void (*func)(struct rcu_head *rcu), |
52 | struct rcu_ctrlblk *rcp); |
53 | |
54 | static long long rcu_dynticks_nesting = DYNTICK_TASK_EXIT_IDLE; |
55 | |
56 | #include "rcutiny_plugin.h" |
57 | |
58 | /* Common code for rcu_idle_enter() and rcu_irq_exit(), see kernel/rcutree.c. */ |
59 | static void rcu_idle_enter_common(long long newval) |
60 | { |
61 | if (newval) { |
62 | RCU_TRACE(trace_rcu_dyntick("--=", |
63 | rcu_dynticks_nesting, newval)); |
64 | rcu_dynticks_nesting = newval; |
65 | return; |
66 | } |
67 | RCU_TRACE(trace_rcu_dyntick("Start", rcu_dynticks_nesting, newval)); |
68 | if (!is_idle_task(current)) { |
69 | struct task_struct *idle = idle_task(smp_processor_id()); |
70 | |
71 | RCU_TRACE(trace_rcu_dyntick("Error on entry: not idle task", |
72 | rcu_dynticks_nesting, newval)); |
73 | ftrace_dump(DUMP_ALL); |
74 | WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s", |
75 | current->pid, current->comm, |
76 | idle->pid, idle->comm); /* must be idle task! */ |
77 | } |
78 | rcu_sched_qs(0); /* implies rcu_bh_qsctr_inc(0) */ |
79 | barrier(); |
80 | rcu_dynticks_nesting = newval; |
81 | } |
82 | |
83 | /* |
84 | * Enter idle, which is an extended quiescent state if we have fully |
85 | * entered that mode (i.e., if the new value of dynticks_nesting is zero). |
86 | */ |
87 | void rcu_idle_enter(void) |
88 | { |
89 | unsigned long flags; |
90 | long long newval; |
91 | |
92 | local_irq_save(flags); |
93 | WARN_ON_ONCE((rcu_dynticks_nesting & DYNTICK_TASK_NEST_MASK) == 0); |
94 | if ((rcu_dynticks_nesting & DYNTICK_TASK_NEST_MASK) == |
95 | DYNTICK_TASK_NEST_VALUE) |
96 | newval = 0; |
97 | else |
98 | newval = rcu_dynticks_nesting - DYNTICK_TASK_NEST_VALUE; |
99 | rcu_idle_enter_common(newval); |
100 | local_irq_restore(flags); |
101 | } |
102 | EXPORT_SYMBOL_GPL(rcu_idle_enter); |
103 | |
104 | /* |
105 | * Exit an interrupt handler towards idle. |
106 | */ |
107 | void rcu_irq_exit(void) |
108 | { |
109 | unsigned long flags; |
110 | long long newval; |
111 | |
112 | local_irq_save(flags); |
113 | newval = rcu_dynticks_nesting - 1; |
114 | WARN_ON_ONCE(newval < 0); |
115 | rcu_idle_enter_common(newval); |
116 | local_irq_restore(flags); |
117 | } |
118 | EXPORT_SYMBOL_GPL(rcu_irq_exit); |
119 | |
120 | /* Common code for rcu_idle_exit() and rcu_irq_enter(), see kernel/rcutree.c. */ |
121 | static void rcu_idle_exit_common(long long oldval) |
122 | { |
123 | if (oldval) { |
124 | RCU_TRACE(trace_rcu_dyntick("++=", |
125 | oldval, rcu_dynticks_nesting)); |
126 | return; |
127 | } |
128 | RCU_TRACE(trace_rcu_dyntick("End", oldval, rcu_dynticks_nesting)); |
129 | if (!is_idle_task(current)) { |
130 | struct task_struct *idle = idle_task(smp_processor_id()); |
131 | |
132 | RCU_TRACE(trace_rcu_dyntick("Error on exit: not idle task", |
133 | oldval, rcu_dynticks_nesting)); |
134 | ftrace_dump(DUMP_ALL); |
135 | WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s", |
136 | current->pid, current->comm, |
137 | idle->pid, idle->comm); /* must be idle task! */ |
138 | } |
139 | } |
140 | |
141 | /* |
142 | * Exit idle, so that we are no longer in an extended quiescent state. |
143 | */ |
144 | void rcu_idle_exit(void) |
145 | { |
146 | unsigned long flags; |
147 | long long oldval; |
148 | |
149 | local_irq_save(flags); |
150 | oldval = rcu_dynticks_nesting; |
151 | WARN_ON_ONCE(rcu_dynticks_nesting < 0); |
152 | if (rcu_dynticks_nesting & DYNTICK_TASK_NEST_MASK) |
153 | rcu_dynticks_nesting += DYNTICK_TASK_NEST_VALUE; |
154 | else |
155 | rcu_dynticks_nesting = DYNTICK_TASK_EXIT_IDLE; |
156 | rcu_idle_exit_common(oldval); |
157 | local_irq_restore(flags); |
158 | } |
159 | EXPORT_SYMBOL_GPL(rcu_idle_exit); |
160 | |
161 | /* |
162 | * Enter an interrupt handler, moving away from idle. |
163 | */ |
164 | void rcu_irq_enter(void) |
165 | { |
166 | unsigned long flags; |
167 | long long oldval; |
168 | |
169 | local_irq_save(flags); |
170 | oldval = rcu_dynticks_nesting; |
171 | rcu_dynticks_nesting++; |
172 | WARN_ON_ONCE(rcu_dynticks_nesting == 0); |
173 | rcu_idle_exit_common(oldval); |
174 | local_irq_restore(flags); |
175 | } |
176 | EXPORT_SYMBOL_GPL(rcu_irq_enter); |
177 | |
178 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
179 | |
180 | /* |
181 | * Test whether RCU thinks that the current CPU is idle. |
182 | */ |
183 | int rcu_is_cpu_idle(void) |
184 | { |
185 | return !rcu_dynticks_nesting; |
186 | } |
187 | EXPORT_SYMBOL(rcu_is_cpu_idle); |
188 | |
189 | #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ |
190 | |
191 | /* |
192 | * Test whether the current CPU was interrupted from idle. Nested |
193 | * interrupts don't count, we must be running at the first interrupt |
194 | * level. |
195 | */ |
196 | static int rcu_is_cpu_rrupt_from_idle(void) |
197 | { |
198 | return rcu_dynticks_nesting <= 1; |
199 | } |
200 | |
201 | /* |
202 | * Helper function for rcu_sched_qs() and rcu_bh_qs(). |
203 | * Also irqs are disabled to avoid confusion due to interrupt handlers |
204 | * invoking call_rcu(). |
205 | */ |
206 | static int rcu_qsctr_help(struct rcu_ctrlblk *rcp) |
207 | { |
208 | reset_cpu_stall_ticks(rcp); |
209 | if (rcp->rcucblist != NULL && |
210 | rcp->donetail != rcp->curtail) { |
211 | rcp->donetail = rcp->curtail; |
212 | return 1; |
213 | } |
214 | |
215 | return 0; |
216 | } |
217 | |
218 | /* |
219 | * Record an rcu quiescent state. And an rcu_bh quiescent state while we |
220 | * are at it, given that any rcu quiescent state is also an rcu_bh |
221 | * quiescent state. Use "+" instead of "||" to defeat short circuiting. |
222 | */ |
223 | void rcu_sched_qs(int cpu) |
224 | { |
225 | unsigned long flags; |
226 | |
227 | local_irq_save(flags); |
228 | if (rcu_qsctr_help(&rcu_sched_ctrlblk) + |
229 | rcu_qsctr_help(&rcu_bh_ctrlblk)) |
230 | invoke_rcu_callbacks(); |
231 | local_irq_restore(flags); |
232 | } |
233 | |
234 | /* |
235 | * Record an rcu_bh quiescent state. |
236 | */ |
237 | void rcu_bh_qs(int cpu) |
238 | { |
239 | unsigned long flags; |
240 | |
241 | local_irq_save(flags); |
242 | if (rcu_qsctr_help(&rcu_bh_ctrlblk)) |
243 | invoke_rcu_callbacks(); |
244 | local_irq_restore(flags); |
245 | } |
246 | |
247 | /* |
248 | * Check to see if the scheduling-clock interrupt came from an extended |
249 | * quiescent state, and, if so, tell RCU about it. This function must |
250 | * be called from hardirq context. It is normally called from the |
251 | * scheduling-clock interrupt. |
252 | */ |
253 | void rcu_check_callbacks(int cpu, int user) |
254 | { |
255 | check_cpu_stalls(); |
256 | if (user || rcu_is_cpu_rrupt_from_idle()) |
257 | rcu_sched_qs(cpu); |
258 | else if (!in_softirq()) |
259 | rcu_bh_qs(cpu); |
260 | rcu_preempt_check_callbacks(); |
261 | } |
262 | |
263 | /* |
264 | * Invoke the RCU callbacks on the specified rcu_ctrlkblk structure |
265 | * whose grace period has elapsed. |
266 | */ |
267 | static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp) |
268 | { |
269 | char *rn = NULL; |
270 | struct rcu_head *next, *list; |
271 | unsigned long flags; |
272 | RCU_TRACE(int cb_count = 0); |
273 | |
274 | /* If no RCU callbacks ready to invoke, just return. */ |
275 | if (&rcp->rcucblist == rcp->donetail) { |
276 | RCU_TRACE(trace_rcu_batch_start(rcp->name, 0, 0, -1)); |
277 | RCU_TRACE(trace_rcu_batch_end(rcp->name, 0, |
278 | ACCESS_ONCE(rcp->rcucblist), |
279 | need_resched(), |
280 | is_idle_task(current), |
281 | rcu_is_callbacks_kthread())); |
282 | return; |
283 | } |
284 | |
285 | /* Move the ready-to-invoke callbacks to a local list. */ |
286 | local_irq_save(flags); |
287 | RCU_TRACE(trace_rcu_batch_start(rcp->name, 0, rcp->qlen, -1)); |
288 | list = rcp->rcucblist; |
289 | rcp->rcucblist = *rcp->donetail; |
290 | *rcp->donetail = NULL; |
291 | if (rcp->curtail == rcp->donetail) |
292 | rcp->curtail = &rcp->rcucblist; |
293 | rcu_preempt_remove_callbacks(rcp); |
294 | rcp->donetail = &rcp->rcucblist; |
295 | local_irq_restore(flags); |
296 | |
297 | /* Invoke the callbacks on the local list. */ |
298 | RCU_TRACE(rn = rcp->name); |
299 | while (list) { |
300 | next = list->next; |
301 | prefetch(next); |
302 | debug_rcu_head_unqueue(list); |
303 | local_bh_disable(); |
304 | __rcu_reclaim(rn, list); |
305 | local_bh_enable(); |
306 | list = next; |
307 | RCU_TRACE(cb_count++); |
308 | } |
309 | RCU_TRACE(rcu_trace_sub_qlen(rcp, cb_count)); |
310 | RCU_TRACE(trace_rcu_batch_end(rcp->name, cb_count, 0, need_resched(), |
311 | is_idle_task(current), |
312 | rcu_is_callbacks_kthread())); |
313 | } |
314 | |
315 | static void rcu_process_callbacks(struct softirq_action *unused) |
316 | { |
317 | __rcu_process_callbacks(&rcu_sched_ctrlblk); |
318 | __rcu_process_callbacks(&rcu_bh_ctrlblk); |
319 | rcu_preempt_process_callbacks(); |
320 | } |
321 | |
322 | /* |
323 | * Wait for a grace period to elapse. But it is illegal to invoke |
324 | * synchronize_sched() from within an RCU read-side critical section. |
325 | * Therefore, any legal call to synchronize_sched() is a quiescent |
326 | * state, and so on a UP system, synchronize_sched() need do nothing. |
327 | * Ditto for synchronize_rcu_bh(). (But Lai Jiangshan points out the |
328 | * benefits of doing might_sleep() to reduce latency.) |
329 | * |
330 | * Cool, huh? (Due to Josh Triplett.) |
331 | * |
332 | * But we want to make this a static inline later. The cond_resched() |
333 | * currently makes this problematic. |
334 | */ |
335 | void synchronize_sched(void) |
336 | { |
337 | rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) && |
338 | !lock_is_held(&rcu_lock_map) && |
339 | !lock_is_held(&rcu_sched_lock_map), |
340 | "Illegal synchronize_sched() in RCU read-side critical section"); |
341 | cond_resched(); |
342 | } |
343 | EXPORT_SYMBOL_GPL(synchronize_sched); |
344 | |
345 | /* |
346 | * Helper function for call_rcu() and call_rcu_bh(). |
347 | */ |
348 | static void __call_rcu(struct rcu_head *head, |
349 | void (*func)(struct rcu_head *rcu), |
350 | struct rcu_ctrlblk *rcp) |
351 | { |
352 | unsigned long flags; |
353 | |
354 | debug_rcu_head_queue(head); |
355 | head->func = func; |
356 | head->next = NULL; |
357 | |
358 | local_irq_save(flags); |
359 | *rcp->curtail = head; |
360 | rcp->curtail = &head->next; |
361 | RCU_TRACE(rcp->qlen++); |
362 | local_irq_restore(flags); |
363 | } |
364 | |
365 | /* |
366 | * Post an RCU callback to be invoked after the end of an RCU-sched grace |
367 | * period. But since we have but one CPU, that would be after any |
368 | * quiescent state. |
369 | */ |
370 | void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) |
371 | { |
372 | __call_rcu(head, func, &rcu_sched_ctrlblk); |
373 | } |
374 | EXPORT_SYMBOL_GPL(call_rcu_sched); |
375 | |
376 | /* |
377 | * Post an RCU bottom-half callback to be invoked after any subsequent |
378 | * quiescent state. |
379 | */ |
380 | void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) |
381 | { |
382 | __call_rcu(head, func, &rcu_bh_ctrlblk); |
383 | } |
384 | EXPORT_SYMBOL_GPL(call_rcu_bh); |
385 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9