Root/
1 | /* |
2 | * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition. |
3 | * |
4 | * This program is free software; you can redistribute it and/or modify |
5 | * it under the terms of the GNU General Public License as published by |
6 | * the Free Software Foundation; either version 2 of the License, or |
7 | * (at your option) any later version. |
8 | * |
9 | * This program is distributed in the hope that it will be useful, |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
12 | * GNU General Public License for more details. |
13 | * |
14 | * You should have received a copy of the GNU General Public License |
15 | * along with this program; if not, write to the Free Software |
16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
17 | * |
18 | * Copyright IBM Corporation, 2008 |
19 | * |
20 | * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com> |
21 | * |
22 | * For detailed explanation of Read-Copy Update mechanism see - |
23 | * Documentation/RCU |
24 | */ |
25 | #include <linux/moduleparam.h> |
26 | #include <linux/completion.h> |
27 | #include <linux/interrupt.h> |
28 | #include <linux/notifier.h> |
29 | #include <linux/rcupdate.h> |
30 | #include <linux/kernel.h> |
31 | #include <linux/module.h> |
32 | #include <linux/mutex.h> |
33 | #include <linux/sched.h> |
34 | #include <linux/types.h> |
35 | #include <linux/init.h> |
36 | #include <linux/time.h> |
37 | #include <linux/cpu.h> |
38 | |
39 | /* Controls for rcu_kthread() kthread, replacing RCU_SOFTIRQ used previously. */ |
40 | static struct task_struct *rcu_kthread_task; |
41 | static DECLARE_WAIT_QUEUE_HEAD(rcu_kthread_wq); |
42 | static unsigned long have_rcu_kthread_work; |
43 | static void invoke_rcu_kthread(void); |
44 | |
45 | /* Forward declarations for rcutiny_plugin.h. */ |
46 | struct rcu_ctrlblk; |
47 | static void rcu_process_callbacks(struct rcu_ctrlblk *rcp); |
48 | static int rcu_kthread(void *arg); |
49 | static void __call_rcu(struct rcu_head *head, |
50 | void (*func)(struct rcu_head *rcu), |
51 | struct rcu_ctrlblk *rcp); |
52 | |
53 | #include "rcutiny_plugin.h" |
54 | |
55 | #ifdef CONFIG_NO_HZ |
56 | |
57 | static long rcu_dynticks_nesting = 1; |
58 | |
59 | /* |
60 | * Enter dynticks-idle mode, which is an extended quiescent state |
61 | * if we have fully entered that mode (i.e., if the new value of |
62 | * dynticks_nesting is zero). |
63 | */ |
64 | void rcu_enter_nohz(void) |
65 | { |
66 | if (--rcu_dynticks_nesting == 0) |
67 | rcu_sched_qs(0); /* implies rcu_bh_qsctr_inc(0) */ |
68 | } |
69 | |
70 | /* |
71 | * Exit dynticks-idle mode, so that we are no longer in an extended |
72 | * quiescent state. |
73 | */ |
74 | void rcu_exit_nohz(void) |
75 | { |
76 | rcu_dynticks_nesting++; |
77 | } |
78 | |
79 | #endif /* #ifdef CONFIG_NO_HZ */ |
80 | |
81 | /* |
82 | * Helper function for rcu_qsctr_inc() and rcu_bh_qsctr_inc(). |
83 | * Also disable irqs to avoid confusion due to interrupt handlers |
84 | * invoking call_rcu(). |
85 | */ |
86 | static int rcu_qsctr_help(struct rcu_ctrlblk *rcp) |
87 | { |
88 | unsigned long flags; |
89 | |
90 | local_irq_save(flags); |
91 | if (rcp->rcucblist != NULL && |
92 | rcp->donetail != rcp->curtail) { |
93 | rcp->donetail = rcp->curtail; |
94 | local_irq_restore(flags); |
95 | return 1; |
96 | } |
97 | local_irq_restore(flags); |
98 | |
99 | return 0; |
100 | } |
101 | |
102 | /* |
103 | * Record an rcu quiescent state. And an rcu_bh quiescent state while we |
104 | * are at it, given that any rcu quiescent state is also an rcu_bh |
105 | * quiescent state. Use "+" instead of "||" to defeat short circuiting. |
106 | */ |
107 | void rcu_sched_qs(int cpu) |
108 | { |
109 | if (rcu_qsctr_help(&rcu_sched_ctrlblk) + |
110 | rcu_qsctr_help(&rcu_bh_ctrlblk)) |
111 | invoke_rcu_kthread(); |
112 | } |
113 | |
114 | /* |
115 | * Record an rcu_bh quiescent state. |
116 | */ |
117 | void rcu_bh_qs(int cpu) |
118 | { |
119 | if (rcu_qsctr_help(&rcu_bh_ctrlblk)) |
120 | invoke_rcu_kthread(); |
121 | } |
122 | |
123 | /* |
124 | * Check to see if the scheduling-clock interrupt came from an extended |
125 | * quiescent state, and, if so, tell RCU about it. |
126 | */ |
127 | void rcu_check_callbacks(int cpu, int user) |
128 | { |
129 | if (user || |
130 | (idle_cpu(cpu) && |
131 | !in_softirq() && |
132 | hardirq_count() <= (1 << HARDIRQ_SHIFT))) |
133 | rcu_sched_qs(cpu); |
134 | else if (!in_softirq()) |
135 | rcu_bh_qs(cpu); |
136 | rcu_preempt_check_callbacks(); |
137 | } |
138 | |
139 | /* |
140 | * Invoke the RCU callbacks on the specified rcu_ctrlkblk structure |
141 | * whose grace period has elapsed. |
142 | */ |
143 | static void rcu_process_callbacks(struct rcu_ctrlblk *rcp) |
144 | { |
145 | struct rcu_head *next, *list; |
146 | unsigned long flags; |
147 | RCU_TRACE(int cb_count = 0); |
148 | |
149 | /* If no RCU callbacks ready to invoke, just return. */ |
150 | if (&rcp->rcucblist == rcp->donetail) |
151 | return; |
152 | |
153 | /* Move the ready-to-invoke callbacks to a local list. */ |
154 | local_irq_save(flags); |
155 | list = rcp->rcucblist; |
156 | rcp->rcucblist = *rcp->donetail; |
157 | *rcp->donetail = NULL; |
158 | if (rcp->curtail == rcp->donetail) |
159 | rcp->curtail = &rcp->rcucblist; |
160 | rcu_preempt_remove_callbacks(rcp); |
161 | rcp->donetail = &rcp->rcucblist; |
162 | local_irq_restore(flags); |
163 | |
164 | /* Invoke the callbacks on the local list. */ |
165 | while (list) { |
166 | next = list->next; |
167 | prefetch(next); |
168 | debug_rcu_head_unqueue(list); |
169 | local_bh_disable(); |
170 | list->func(list); |
171 | local_bh_enable(); |
172 | list = next; |
173 | RCU_TRACE(cb_count++); |
174 | } |
175 | RCU_TRACE(rcu_trace_sub_qlen(rcp, cb_count)); |
176 | } |
177 | |
178 | /* |
179 | * This kthread invokes RCU callbacks whose grace periods have |
180 | * elapsed. It is awakened as needed, and takes the place of the |
181 | * RCU_SOFTIRQ that was used previously for this purpose. |
182 | * This is a kthread, but it is never stopped, at least not until |
183 | * the system goes down. |
184 | */ |
185 | static int rcu_kthread(void *arg) |
186 | { |
187 | unsigned long work; |
188 | unsigned long morework; |
189 | unsigned long flags; |
190 | |
191 | for (;;) { |
192 | wait_event_interruptible(rcu_kthread_wq, |
193 | have_rcu_kthread_work != 0); |
194 | morework = rcu_boost(); |
195 | local_irq_save(flags); |
196 | work = have_rcu_kthread_work; |
197 | have_rcu_kthread_work = morework; |
198 | local_irq_restore(flags); |
199 | if (work) { |
200 | rcu_process_callbacks(&rcu_sched_ctrlblk); |
201 | rcu_process_callbacks(&rcu_bh_ctrlblk); |
202 | rcu_preempt_process_callbacks(); |
203 | } |
204 | schedule_timeout_interruptible(1); /* Leave CPU for others. */ |
205 | } |
206 | |
207 | return 0; /* Not reached, but needed to shut gcc up. */ |
208 | } |
209 | |
210 | /* |
211 | * Wake up rcu_kthread() to process callbacks now eligible for invocation |
212 | * or to boost readers. |
213 | */ |
214 | static void invoke_rcu_kthread(void) |
215 | { |
216 | unsigned long flags; |
217 | |
218 | local_irq_save(flags); |
219 | have_rcu_kthread_work = 1; |
220 | wake_up(&rcu_kthread_wq); |
221 | local_irq_restore(flags); |
222 | } |
223 | |
224 | /* |
225 | * Wait for a grace period to elapse. But it is illegal to invoke |
226 | * synchronize_sched() from within an RCU read-side critical section. |
227 | * Therefore, any legal call to synchronize_sched() is a quiescent |
228 | * state, and so on a UP system, synchronize_sched() need do nothing. |
229 | * Ditto for synchronize_rcu_bh(). (But Lai Jiangshan points out the |
230 | * benefits of doing might_sleep() to reduce latency.) |
231 | * |
232 | * Cool, huh? (Due to Josh Triplett.) |
233 | * |
234 | * But we want to make this a static inline later. The cond_resched() |
235 | * currently makes this problematic. |
236 | */ |
237 | void synchronize_sched(void) |
238 | { |
239 | cond_resched(); |
240 | } |
241 | EXPORT_SYMBOL_GPL(synchronize_sched); |
242 | |
243 | /* |
244 | * Helper function for call_rcu() and call_rcu_bh(). |
245 | */ |
246 | static void __call_rcu(struct rcu_head *head, |
247 | void (*func)(struct rcu_head *rcu), |
248 | struct rcu_ctrlblk *rcp) |
249 | { |
250 | unsigned long flags; |
251 | |
252 | debug_rcu_head_queue(head); |
253 | head->func = func; |
254 | head->next = NULL; |
255 | |
256 | local_irq_save(flags); |
257 | *rcp->curtail = head; |
258 | rcp->curtail = &head->next; |
259 | RCU_TRACE(rcp->qlen++); |
260 | local_irq_restore(flags); |
261 | } |
262 | |
263 | /* |
264 | * Post an RCU callback to be invoked after the end of an RCU-sched grace |
265 | * period. But since we have but one CPU, that would be after any |
266 | * quiescent state. |
267 | */ |
268 | void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) |
269 | { |
270 | __call_rcu(head, func, &rcu_sched_ctrlblk); |
271 | } |
272 | EXPORT_SYMBOL_GPL(call_rcu_sched); |
273 | |
274 | /* |
275 | * Post an RCU bottom-half callback to be invoked after any subsequent |
276 | * quiescent state. |
277 | */ |
278 | void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) |
279 | { |
280 | __call_rcu(head, func, &rcu_bh_ctrlblk); |
281 | } |
282 | EXPORT_SYMBOL_GPL(call_rcu_bh); |
283 | |
284 | void rcu_barrier_bh(void) |
285 | { |
286 | struct rcu_synchronize rcu; |
287 | |
288 | init_rcu_head_on_stack(&rcu.head); |
289 | init_completion(&rcu.completion); |
290 | /* Will wake me after RCU finished. */ |
291 | call_rcu_bh(&rcu.head, wakeme_after_rcu); |
292 | /* Wait for it. */ |
293 | wait_for_completion(&rcu.completion); |
294 | destroy_rcu_head_on_stack(&rcu.head); |
295 | } |
296 | EXPORT_SYMBOL_GPL(rcu_barrier_bh); |
297 | |
298 | void rcu_barrier_sched(void) |
299 | { |
300 | struct rcu_synchronize rcu; |
301 | |
302 | init_rcu_head_on_stack(&rcu.head); |
303 | init_completion(&rcu.completion); |
304 | /* Will wake me after RCU finished. */ |
305 | call_rcu_sched(&rcu.head, wakeme_after_rcu); |
306 | /* Wait for it. */ |
307 | wait_for_completion(&rcu.completion); |
308 | destroy_rcu_head_on_stack(&rcu.head); |
309 | } |
310 | EXPORT_SYMBOL_GPL(rcu_barrier_sched); |
311 | |
312 | /* |
313 | * Spawn the kthread that invokes RCU callbacks. |
314 | */ |
315 | static int __init rcu_spawn_kthreads(void) |
316 | { |
317 | struct sched_param sp; |
318 | |
319 | rcu_kthread_task = kthread_run(rcu_kthread, NULL, "rcu_kthread"); |
320 | sp.sched_priority = RCU_BOOST_PRIO; |
321 | sched_setscheduler_nocheck(rcu_kthread_task, SCHED_FIFO, &sp); |
322 | return 0; |
323 | } |
324 | early_initcall(rcu_spawn_kthreads); |
325 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9