Root/
1 | /* |
2 | * kernel/stop_machine.c |
3 | * |
4 | * Copyright (C) 2008, 2005 IBM Corporation. |
5 | * Copyright (C) 2008, 2005 Rusty Russell rusty@rustcorp.com.au |
6 | * Copyright (C) 2010 SUSE Linux Products GmbH |
7 | * Copyright (C) 2010 Tejun Heo <tj@kernel.org> |
8 | * |
9 | * This file is released under the GPLv2 and any later version. |
10 | */ |
11 | #include <linux/completion.h> |
12 | #include <linux/cpu.h> |
13 | #include <linux/init.h> |
14 | #include <linux/kthread.h> |
15 | #include <linux/module.h> |
16 | #include <linux/percpu.h> |
17 | #include <linux/sched.h> |
18 | #include <linux/stop_machine.h> |
19 | #include <linux/interrupt.h> |
20 | #include <linux/kallsyms.h> |
21 | |
22 | #include <asm/atomic.h> |
23 | |
24 | /* |
25 | * Structure to determine completion condition and record errors. May |
26 | * be shared by works on different cpus. |
27 | */ |
28 | struct cpu_stop_done { |
29 | atomic_t nr_todo; /* nr left to execute */ |
30 | bool executed; /* actually executed? */ |
31 | int ret; /* collected return value */ |
32 | struct completion completion; /* fired if nr_todo reaches 0 */ |
33 | }; |
34 | |
35 | /* the actual stopper, one per every possible cpu, enabled on online cpus */ |
36 | struct cpu_stopper { |
37 | spinlock_t lock; |
38 | bool enabled; /* is this stopper enabled? */ |
39 | struct list_head works; /* list of pending works */ |
40 | struct task_struct *thread; /* stopper thread */ |
41 | }; |
42 | |
43 | static DEFINE_PER_CPU(struct cpu_stopper, cpu_stopper); |
44 | |
45 | static void cpu_stop_init_done(struct cpu_stop_done *done, unsigned int nr_todo) |
46 | { |
47 | memset(done, 0, sizeof(*done)); |
48 | atomic_set(&done->nr_todo, nr_todo); |
49 | init_completion(&done->completion); |
50 | } |
51 | |
52 | /* signal completion unless @done is NULL */ |
53 | static void cpu_stop_signal_done(struct cpu_stop_done *done, bool executed) |
54 | { |
55 | if (done) { |
56 | if (executed) |
57 | done->executed = true; |
58 | if (atomic_dec_and_test(&done->nr_todo)) |
59 | complete(&done->completion); |
60 | } |
61 | } |
62 | |
63 | /* queue @work to @stopper. if offline, @work is completed immediately */ |
64 | static void cpu_stop_queue_work(struct cpu_stopper *stopper, |
65 | struct cpu_stop_work *work) |
66 | { |
67 | unsigned long flags; |
68 | |
69 | spin_lock_irqsave(&stopper->lock, flags); |
70 | |
71 | if (stopper->enabled) { |
72 | list_add_tail(&work->list, &stopper->works); |
73 | wake_up_process(stopper->thread); |
74 | } else |
75 | cpu_stop_signal_done(work->done, false); |
76 | |
77 | spin_unlock_irqrestore(&stopper->lock, flags); |
78 | } |
79 | |
80 | /** |
81 | * stop_one_cpu - stop a cpu |
82 | * @cpu: cpu to stop |
83 | * @fn: function to execute |
84 | * @arg: argument to @fn |
85 | * |
86 | * Execute @fn(@arg) on @cpu. @fn is run in a process context with |
87 | * the highest priority preempting any task on the cpu and |
88 | * monopolizing it. This function returns after the execution is |
89 | * complete. |
90 | * |
91 | * This function doesn't guarantee @cpu stays online till @fn |
92 | * completes. If @cpu goes down in the middle, execution may happen |
93 | * partially or fully on different cpus. @fn should either be ready |
94 | * for that or the caller should ensure that @cpu stays online until |
95 | * this function completes. |
96 | * |
97 | * CONTEXT: |
98 | * Might sleep. |
99 | * |
100 | * RETURNS: |
101 | * -ENOENT if @fn(@arg) was not executed because @cpu was offline; |
102 | * otherwise, the return value of @fn. |
103 | */ |
104 | int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg) |
105 | { |
106 | struct cpu_stop_done done; |
107 | struct cpu_stop_work work = { .fn = fn, .arg = arg, .done = &done }; |
108 | |
109 | cpu_stop_init_done(&done, 1); |
110 | cpu_stop_queue_work(&per_cpu(cpu_stopper, cpu), &work); |
111 | wait_for_completion(&done.completion); |
112 | return done.executed ? done.ret : -ENOENT; |
113 | } |
114 | |
115 | /** |
116 | * stop_one_cpu_nowait - stop a cpu but don't wait for completion |
117 | * @cpu: cpu to stop |
118 | * @fn: function to execute |
119 | * @arg: argument to @fn |
120 | * |
121 | * Similar to stop_one_cpu() but doesn't wait for completion. The |
122 | * caller is responsible for ensuring @work_buf is currently unused |
123 | * and will remain untouched until stopper starts executing @fn. |
124 | * |
125 | * CONTEXT: |
126 | * Don't care. |
127 | */ |
128 | void stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg, |
129 | struct cpu_stop_work *work_buf) |
130 | { |
131 | *work_buf = (struct cpu_stop_work){ .fn = fn, .arg = arg, }; |
132 | cpu_stop_queue_work(&per_cpu(cpu_stopper, cpu), work_buf); |
133 | } |
134 | |
135 | /* static data for stop_cpus */ |
136 | static DEFINE_MUTEX(stop_cpus_mutex); |
137 | static DEFINE_PER_CPU(struct cpu_stop_work, stop_cpus_work); |
138 | |
139 | int __stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg) |
140 | { |
141 | struct cpu_stop_work *work; |
142 | struct cpu_stop_done done; |
143 | unsigned int cpu; |
144 | |
145 | /* initialize works and done */ |
146 | for_each_cpu(cpu, cpumask) { |
147 | work = &per_cpu(stop_cpus_work, cpu); |
148 | work->fn = fn; |
149 | work->arg = arg; |
150 | work->done = &done; |
151 | } |
152 | cpu_stop_init_done(&done, cpumask_weight(cpumask)); |
153 | |
154 | /* |
155 | * Disable preemption while queueing to avoid getting |
156 | * preempted by a stopper which might wait for other stoppers |
157 | * to enter @fn which can lead to deadlock. |
158 | */ |
159 | preempt_disable(); |
160 | for_each_cpu(cpu, cpumask) |
161 | cpu_stop_queue_work(&per_cpu(cpu_stopper, cpu), |
162 | &per_cpu(stop_cpus_work, cpu)); |
163 | preempt_enable(); |
164 | |
165 | wait_for_completion(&done.completion); |
166 | return done.executed ? done.ret : -ENOENT; |
167 | } |
168 | |
169 | /** |
170 | * stop_cpus - stop multiple cpus |
171 | * @cpumask: cpus to stop |
172 | * @fn: function to execute |
173 | * @arg: argument to @fn |
174 | * |
175 | * Execute @fn(@arg) on online cpus in @cpumask. On each target cpu, |
176 | * @fn is run in a process context with the highest priority |
177 | * preempting any task on the cpu and monopolizing it. This function |
178 | * returns after all executions are complete. |
179 | * |
180 | * This function doesn't guarantee the cpus in @cpumask stay online |
181 | * till @fn completes. If some cpus go down in the middle, execution |
182 | * on the cpu may happen partially or fully on different cpus. @fn |
183 | * should either be ready for that or the caller should ensure that |
184 | * the cpus stay online until this function completes. |
185 | * |
186 | * All stop_cpus() calls are serialized making it safe for @fn to wait |
187 | * for all cpus to start executing it. |
188 | * |
189 | * CONTEXT: |
190 | * Might sleep. |
191 | * |
192 | * RETURNS: |
193 | * -ENOENT if @fn(@arg) was not executed at all because all cpus in |
194 | * @cpumask were offline; otherwise, 0 if all executions of @fn |
195 | * returned 0, any non zero return value if any returned non zero. |
196 | */ |
197 | int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg) |
198 | { |
199 | int ret; |
200 | |
201 | /* static works are used, process one request at a time */ |
202 | mutex_lock(&stop_cpus_mutex); |
203 | ret = __stop_cpus(cpumask, fn, arg); |
204 | mutex_unlock(&stop_cpus_mutex); |
205 | return ret; |
206 | } |
207 | |
208 | /** |
209 | * try_stop_cpus - try to stop multiple cpus |
210 | * @cpumask: cpus to stop |
211 | * @fn: function to execute |
212 | * @arg: argument to @fn |
213 | * |
214 | * Identical to stop_cpus() except that it fails with -EAGAIN if |
215 | * someone else is already using the facility. |
216 | * |
217 | * CONTEXT: |
218 | * Might sleep. |
219 | * |
220 | * RETURNS: |
221 | * -EAGAIN if someone else is already stopping cpus, -ENOENT if |
222 | * @fn(@arg) was not executed at all because all cpus in @cpumask were |
223 | * offline; otherwise, 0 if all executions of @fn returned 0, any non |
224 | * zero return value if any returned non zero. |
225 | */ |
226 | int try_stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg) |
227 | { |
228 | int ret; |
229 | |
230 | /* static works are used, process one request at a time */ |
231 | if (!mutex_trylock(&stop_cpus_mutex)) |
232 | return -EAGAIN; |
233 | ret = __stop_cpus(cpumask, fn, arg); |
234 | mutex_unlock(&stop_cpus_mutex); |
235 | return ret; |
236 | } |
237 | |
238 | static int cpu_stopper_thread(void *data) |
239 | { |
240 | struct cpu_stopper *stopper = data; |
241 | struct cpu_stop_work *work; |
242 | int ret; |
243 | |
244 | repeat: |
245 | set_current_state(TASK_INTERRUPTIBLE); /* mb paired w/ kthread_stop */ |
246 | |
247 | if (kthread_should_stop()) { |
248 | __set_current_state(TASK_RUNNING); |
249 | return 0; |
250 | } |
251 | |
252 | work = NULL; |
253 | spin_lock_irq(&stopper->lock); |
254 | if (!list_empty(&stopper->works)) { |
255 | work = list_first_entry(&stopper->works, |
256 | struct cpu_stop_work, list); |
257 | list_del_init(&work->list); |
258 | } |
259 | spin_unlock_irq(&stopper->lock); |
260 | |
261 | if (work) { |
262 | cpu_stop_fn_t fn = work->fn; |
263 | void *arg = work->arg; |
264 | struct cpu_stop_done *done = work->done; |
265 | char ksym_buf[KSYM_NAME_LEN] __maybe_unused; |
266 | |
267 | __set_current_state(TASK_RUNNING); |
268 | |
269 | /* cpu stop callbacks are not allowed to sleep */ |
270 | preempt_disable(); |
271 | |
272 | ret = fn(arg); |
273 | if (ret) |
274 | done->ret = ret; |
275 | |
276 | /* restore preemption and check it's still balanced */ |
277 | preempt_enable(); |
278 | WARN_ONCE(preempt_count(), |
279 | "cpu_stop: %s(%p) leaked preempt count\n", |
280 | kallsyms_lookup((unsigned long)fn, NULL, NULL, NULL, |
281 | ksym_buf), arg); |
282 | |
283 | cpu_stop_signal_done(done, true); |
284 | } else |
285 | schedule(); |
286 | |
287 | goto repeat; |
288 | } |
289 | |
290 | extern void sched_set_stop_task(int cpu, struct task_struct *stop); |
291 | |
292 | /* manage stopper for a cpu, mostly lifted from sched migration thread mgmt */ |
293 | static int __cpuinit cpu_stop_cpu_callback(struct notifier_block *nfb, |
294 | unsigned long action, void *hcpu) |
295 | { |
296 | unsigned int cpu = (unsigned long)hcpu; |
297 | struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); |
298 | struct task_struct *p; |
299 | |
300 | switch (action & ~CPU_TASKS_FROZEN) { |
301 | case CPU_UP_PREPARE: |
302 | BUG_ON(stopper->thread || stopper->enabled || |
303 | !list_empty(&stopper->works)); |
304 | p = kthread_create(cpu_stopper_thread, stopper, "migration/%d", |
305 | cpu); |
306 | if (IS_ERR(p)) |
307 | return notifier_from_errno(PTR_ERR(p)); |
308 | get_task_struct(p); |
309 | kthread_bind(p, cpu); |
310 | sched_set_stop_task(cpu, p); |
311 | stopper->thread = p; |
312 | break; |
313 | |
314 | case CPU_ONLINE: |
315 | /* strictly unnecessary, as first user will wake it */ |
316 | wake_up_process(stopper->thread); |
317 | /* mark enabled */ |
318 | spin_lock_irq(&stopper->lock); |
319 | stopper->enabled = true; |
320 | spin_unlock_irq(&stopper->lock); |
321 | break; |
322 | |
323 | #ifdef CONFIG_HOTPLUG_CPU |
324 | case CPU_UP_CANCELED: |
325 | case CPU_POST_DEAD: |
326 | { |
327 | struct cpu_stop_work *work; |
328 | |
329 | sched_set_stop_task(cpu, NULL); |
330 | /* kill the stopper */ |
331 | kthread_stop(stopper->thread); |
332 | /* drain remaining works */ |
333 | spin_lock_irq(&stopper->lock); |
334 | list_for_each_entry(work, &stopper->works, list) |
335 | cpu_stop_signal_done(work->done, false); |
336 | stopper->enabled = false; |
337 | spin_unlock_irq(&stopper->lock); |
338 | /* release the stopper */ |
339 | put_task_struct(stopper->thread); |
340 | stopper->thread = NULL; |
341 | break; |
342 | } |
343 | #endif |
344 | } |
345 | |
346 | return NOTIFY_OK; |
347 | } |
348 | |
349 | /* |
350 | * Give it a higher priority so that cpu stopper is available to other |
351 | * cpu notifiers. It currently shares the same priority as sched |
352 | * migration_notifier. |
353 | */ |
354 | static struct notifier_block __cpuinitdata cpu_stop_cpu_notifier = { |
355 | .notifier_call = cpu_stop_cpu_callback, |
356 | .priority = 10, |
357 | }; |
358 | |
359 | static int __init cpu_stop_init(void) |
360 | { |
361 | void *bcpu = (void *)(long)smp_processor_id(); |
362 | unsigned int cpu; |
363 | int err; |
364 | |
365 | for_each_possible_cpu(cpu) { |
366 | struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); |
367 | |
368 | spin_lock_init(&stopper->lock); |
369 | INIT_LIST_HEAD(&stopper->works); |
370 | } |
371 | |
372 | /* start one for the boot cpu */ |
373 | err = cpu_stop_cpu_callback(&cpu_stop_cpu_notifier, CPU_UP_PREPARE, |
374 | bcpu); |
375 | BUG_ON(err != NOTIFY_OK); |
376 | cpu_stop_cpu_callback(&cpu_stop_cpu_notifier, CPU_ONLINE, bcpu); |
377 | register_cpu_notifier(&cpu_stop_cpu_notifier); |
378 | |
379 | return 0; |
380 | } |
381 | early_initcall(cpu_stop_init); |
382 | |
383 | #ifdef CONFIG_STOP_MACHINE |
384 | |
385 | /* This controls the threads on each CPU. */ |
386 | enum stopmachine_state { |
387 | /* Dummy starting state for thread. */ |
388 | STOPMACHINE_NONE, |
389 | /* Awaiting everyone to be scheduled. */ |
390 | STOPMACHINE_PREPARE, |
391 | /* Disable interrupts. */ |
392 | STOPMACHINE_DISABLE_IRQ, |
393 | /* Run the function */ |
394 | STOPMACHINE_RUN, |
395 | /* Exit */ |
396 | STOPMACHINE_EXIT, |
397 | }; |
398 | |
399 | struct stop_machine_data { |
400 | int (*fn)(void *); |
401 | void *data; |
402 | /* Like num_online_cpus(), but hotplug cpu uses us, so we need this. */ |
403 | unsigned int num_threads; |
404 | const struct cpumask *active_cpus; |
405 | |
406 | enum stopmachine_state state; |
407 | atomic_t thread_ack; |
408 | }; |
409 | |
410 | static void set_state(struct stop_machine_data *smdata, |
411 | enum stopmachine_state newstate) |
412 | { |
413 | /* Reset ack counter. */ |
414 | atomic_set(&smdata->thread_ack, smdata->num_threads); |
415 | smp_wmb(); |
416 | smdata->state = newstate; |
417 | } |
418 | |
419 | /* Last one to ack a state moves to the next state. */ |
420 | static void ack_state(struct stop_machine_data *smdata) |
421 | { |
422 | if (atomic_dec_and_test(&smdata->thread_ack)) |
423 | set_state(smdata, smdata->state + 1); |
424 | } |
425 | |
426 | /* This is the cpu_stop function which stops the CPU. */ |
427 | static int stop_machine_cpu_stop(void *data) |
428 | { |
429 | struct stop_machine_data *smdata = data; |
430 | enum stopmachine_state curstate = STOPMACHINE_NONE; |
431 | int cpu = smp_processor_id(), err = 0; |
432 | bool is_active; |
433 | |
434 | if (!smdata->active_cpus) |
435 | is_active = cpu == cpumask_first(cpu_online_mask); |
436 | else |
437 | is_active = cpumask_test_cpu(cpu, smdata->active_cpus); |
438 | |
439 | /* Simple state machine */ |
440 | do { |
441 | /* Chill out and ensure we re-read stopmachine_state. */ |
442 | cpu_relax(); |
443 | if (smdata->state != curstate) { |
444 | curstate = smdata->state; |
445 | switch (curstate) { |
446 | case STOPMACHINE_DISABLE_IRQ: |
447 | local_irq_disable(); |
448 | hard_irq_disable(); |
449 | break; |
450 | case STOPMACHINE_RUN: |
451 | if (is_active) |
452 | err = smdata->fn(smdata->data); |
453 | break; |
454 | default: |
455 | break; |
456 | } |
457 | ack_state(smdata); |
458 | } |
459 | } while (curstate != STOPMACHINE_EXIT); |
460 | |
461 | local_irq_enable(); |
462 | return err; |
463 | } |
464 | |
465 | int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus) |
466 | { |
467 | struct stop_machine_data smdata = { .fn = fn, .data = data, |
468 | .num_threads = num_online_cpus(), |
469 | .active_cpus = cpus }; |
470 | |
471 | /* Set the initial state and stop all online cpus. */ |
472 | set_state(&smdata, STOPMACHINE_PREPARE); |
473 | return stop_cpus(cpu_online_mask, stop_machine_cpu_stop, &smdata); |
474 | } |
475 | |
476 | int stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus) |
477 | { |
478 | int ret; |
479 | |
480 | /* No CPUs can come up or down during this. */ |
481 | get_online_cpus(); |
482 | ret = __stop_machine(fn, data, cpus); |
483 | put_online_cpus(); |
484 | return ret; |
485 | } |
486 | EXPORT_SYMBOL_GPL(stop_machine); |
487 | |
488 | #endif /* CONFIG_STOP_MACHINE */ |
489 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9