Root/kernel/stop_machine.c

1/*
2 * kernel/stop_machine.c
3 *
4 * Copyright (C) 2008, 2005 IBM Corporation.
5 * Copyright (C) 2008, 2005 Rusty Russell rusty@rustcorp.com.au
6 * Copyright (C) 2010 SUSE Linux Products GmbH
7 * Copyright (C) 2010 Tejun Heo <tj@kernel.org>
8 *
9 * This file is released under the GPLv2 and any later version.
10 */
11#include <linux/completion.h>
12#include <linux/cpu.h>
13#include <linux/init.h>
14#include <linux/kthread.h>
15#include <linux/export.h>
16#include <linux/percpu.h>
17#include <linux/sched.h>
18#include <linux/stop_machine.h>
19#include <linux/interrupt.h>
20#include <linux/kallsyms.h>
21#include <linux/smpboot.h>
22#include <linux/atomic.h>
23#include <linux/lglock.h>
24
25/*
26 * Structure to determine completion condition and record errors. May
27 * be shared by works on different cpus.
28 */
29struct cpu_stop_done {
30    atomic_t nr_todo; /* nr left to execute */
31    bool executed; /* actually executed? */
32    int ret; /* collected return value */
33    struct completion completion; /* fired if nr_todo reaches 0 */
34};
35
36/* the actual stopper, one per every possible cpu, enabled on online cpus */
37struct cpu_stopper {
38    spinlock_t lock;
39    bool enabled; /* is this stopper enabled? */
40    struct list_head works; /* list of pending works */
41};
42
43static DEFINE_PER_CPU(struct cpu_stopper, cpu_stopper);
44static DEFINE_PER_CPU(struct task_struct *, cpu_stopper_task);
45static bool stop_machine_initialized = false;
46
47/*
48 * Avoids a race between stop_two_cpus and global stop_cpus, where
49 * the stoppers could get queued up in reverse order, leading to
50 * system deadlock. Using an lglock means stop_two_cpus remains
51 * relatively cheap.
52 */
53DEFINE_STATIC_LGLOCK(stop_cpus_lock);
54
55static void cpu_stop_init_done(struct cpu_stop_done *done, unsigned int nr_todo)
56{
57    memset(done, 0, sizeof(*done));
58    atomic_set(&done->nr_todo, nr_todo);
59    init_completion(&done->completion);
60}
61
62/* signal completion unless @done is NULL */
63static void cpu_stop_signal_done(struct cpu_stop_done *done, bool executed)
64{
65    if (done) {
66        if (executed)
67            done->executed = true;
68        if (atomic_dec_and_test(&done->nr_todo))
69            complete(&done->completion);
70    }
71}
72
73/* queue @work to @stopper. if offline, @work is completed immediately */
74static void cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work)
75{
76    struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
77    struct task_struct *p = per_cpu(cpu_stopper_task, cpu);
78
79    unsigned long flags;
80
81    spin_lock_irqsave(&stopper->lock, flags);
82
83    if (stopper->enabled) {
84        list_add_tail(&work->list, &stopper->works);
85        wake_up_process(p);
86    } else
87        cpu_stop_signal_done(work->done, false);
88
89    spin_unlock_irqrestore(&stopper->lock, flags);
90}
91
92/**
93 * stop_one_cpu - stop a cpu
94 * @cpu: cpu to stop
95 * @fn: function to execute
96 * @arg: argument to @fn
97 *
98 * Execute @fn(@arg) on @cpu. @fn is run in a process context with
99 * the highest priority preempting any task on the cpu and
100 * monopolizing it. This function returns after the execution is
101 * complete.
102 *
103 * This function doesn't guarantee @cpu stays online till @fn
104 * completes. If @cpu goes down in the middle, execution may happen
105 * partially or fully on different cpus. @fn should either be ready
106 * for that or the caller should ensure that @cpu stays online until
107 * this function completes.
108 *
109 * CONTEXT:
110 * Might sleep.
111 *
112 * RETURNS:
113 * -ENOENT if @fn(@arg) was not executed because @cpu was offline;
114 * otherwise, the return value of @fn.
115 */
116int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg)
117{
118    struct cpu_stop_done done;
119    struct cpu_stop_work work = { .fn = fn, .arg = arg, .done = &done };
120
121    cpu_stop_init_done(&done, 1);
122    cpu_stop_queue_work(cpu, &work);
123    wait_for_completion(&done.completion);
124    return done.executed ? done.ret : -ENOENT;
125}
126
127/* This controls the threads on each CPU. */
128enum multi_stop_state {
129    /* Dummy starting state for thread. */
130    MULTI_STOP_NONE,
131    /* Awaiting everyone to be scheduled. */
132    MULTI_STOP_PREPARE,
133    /* Disable interrupts. */
134    MULTI_STOP_DISABLE_IRQ,
135    /* Run the function */
136    MULTI_STOP_RUN,
137    /* Exit */
138    MULTI_STOP_EXIT,
139};
140
141struct multi_stop_data {
142    int (*fn)(void *);
143    void *data;
144    /* Like num_online_cpus(), but hotplug cpu uses us, so we need this. */
145    unsigned int num_threads;
146    const struct cpumask *active_cpus;
147
148    enum multi_stop_state state;
149    atomic_t thread_ack;
150};
151
152static void set_state(struct multi_stop_data *msdata,
153              enum multi_stop_state newstate)
154{
155    /* Reset ack counter. */
156    atomic_set(&msdata->thread_ack, msdata->num_threads);
157    smp_wmb();
158    msdata->state = newstate;
159}
160
161/* Last one to ack a state moves to the next state. */
162static void ack_state(struct multi_stop_data *msdata)
163{
164    if (atomic_dec_and_test(&msdata->thread_ack))
165        set_state(msdata, msdata->state + 1);
166}
167
168/* This is the cpu_stop function which stops the CPU. */
169static int multi_cpu_stop(void *data)
170{
171    struct multi_stop_data *msdata = data;
172    enum multi_stop_state curstate = MULTI_STOP_NONE;
173    int cpu = smp_processor_id(), err = 0;
174    unsigned long flags;
175    bool is_active;
176
177    /*
178     * When called from stop_machine_from_inactive_cpu(), irq might
179     * already be disabled. Save the state and restore it on exit.
180     */
181    local_save_flags(flags);
182
183    if (!msdata->active_cpus)
184        is_active = cpu == cpumask_first(cpu_online_mask);
185    else
186        is_active = cpumask_test_cpu(cpu, msdata->active_cpus);
187
188    /* Simple state machine */
189    do {
190        /* Chill out and ensure we re-read multi_stop_state. */
191        cpu_relax();
192        if (msdata->state != curstate) {
193            curstate = msdata->state;
194            switch (curstate) {
195            case MULTI_STOP_DISABLE_IRQ:
196                local_irq_disable();
197                hard_irq_disable();
198                break;
199            case MULTI_STOP_RUN:
200                if (is_active)
201                    err = msdata->fn(msdata->data);
202                break;
203            default:
204                break;
205            }
206            ack_state(msdata);
207        }
208    } while (curstate != MULTI_STOP_EXIT);
209
210    local_irq_restore(flags);
211    return err;
212}
213
214struct irq_cpu_stop_queue_work_info {
215    int cpu1;
216    int cpu2;
217    struct cpu_stop_work *work1;
218    struct cpu_stop_work *work2;
219};
220
221/*
222 * This function is always run with irqs and preemption disabled.
223 * This guarantees that both work1 and work2 get queued, before
224 * our local migrate thread gets the chance to preempt us.
225 */
226static void irq_cpu_stop_queue_work(void *arg)
227{
228    struct irq_cpu_stop_queue_work_info *info = arg;
229    cpu_stop_queue_work(info->cpu1, info->work1);
230    cpu_stop_queue_work(info->cpu2, info->work2);
231}
232
233/**
234 * stop_two_cpus - stops two cpus
235 * @cpu1: the cpu to stop
236 * @cpu2: the other cpu to stop
237 * @fn: function to execute
238 * @arg: argument to @fn
239 *
240 * Stops both the current and specified CPU and runs @fn on one of them.
241 *
242 * returns when both are completed.
243 */
244int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *arg)
245{
246    struct cpu_stop_done done;
247    struct cpu_stop_work work1, work2;
248    struct irq_cpu_stop_queue_work_info call_args;
249    struct multi_stop_data msdata;
250
251    preempt_disable();
252    msdata = (struct multi_stop_data){
253        .fn = fn,
254        .data = arg,
255        .num_threads = 2,
256        .active_cpus = cpumask_of(cpu1),
257    };
258
259    work1 = work2 = (struct cpu_stop_work){
260        .fn = multi_cpu_stop,
261        .arg = &msdata,
262        .done = &done
263    };
264
265    call_args = (struct irq_cpu_stop_queue_work_info){
266        .cpu1 = cpu1,
267        .cpu2 = cpu2,
268        .work1 = &work1,
269        .work2 = &work2,
270    };
271
272    cpu_stop_init_done(&done, 2);
273    set_state(&msdata, MULTI_STOP_PREPARE);
274
275    /*
276     * If we observe both CPUs active we know _cpu_down() cannot yet have
277     * queued its stop_machine works and therefore ours will get executed
278     * first. Or its not either one of our CPUs that's getting unplugged,
279     * in which case we don't care.
280     *
281     * This relies on the stopper workqueues to be FIFO.
282     */
283    if (!cpu_active(cpu1) || !cpu_active(cpu2)) {
284        preempt_enable();
285        return -ENOENT;
286    }
287
288    lg_local_lock(&stop_cpus_lock);
289    /*
290     * Queuing needs to be done by the lowest numbered CPU, to ensure
291     * that works are always queued in the same order on every CPU.
292     * This prevents deadlocks.
293     */
294    smp_call_function_single(min(cpu1, cpu2),
295                 &irq_cpu_stop_queue_work,
296                 &call_args, 1);
297    lg_local_unlock(&stop_cpus_lock);
298    preempt_enable();
299
300    wait_for_completion(&done.completion);
301
302    return done.executed ? done.ret : -ENOENT;
303}
304
305/**
306 * stop_one_cpu_nowait - stop a cpu but don't wait for completion
307 * @cpu: cpu to stop
308 * @fn: function to execute
309 * @arg: argument to @fn
310 *
311 * Similar to stop_one_cpu() but doesn't wait for completion. The
312 * caller is responsible for ensuring @work_buf is currently unused
313 * and will remain untouched until stopper starts executing @fn.
314 *
315 * CONTEXT:
316 * Don't care.
317 */
318void stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg,
319            struct cpu_stop_work *work_buf)
320{
321    *work_buf = (struct cpu_stop_work){ .fn = fn, .arg = arg, };
322    cpu_stop_queue_work(cpu, work_buf);
323}
324
325/* static data for stop_cpus */
326static DEFINE_MUTEX(stop_cpus_mutex);
327static DEFINE_PER_CPU(struct cpu_stop_work, stop_cpus_work);
328
329static void queue_stop_cpus_work(const struct cpumask *cpumask,
330                 cpu_stop_fn_t fn, void *arg,
331                 struct cpu_stop_done *done)
332{
333    struct cpu_stop_work *work;
334    unsigned int cpu;
335
336    /* initialize works and done */
337    for_each_cpu(cpu, cpumask) {
338        work = &per_cpu(stop_cpus_work, cpu);
339        work->fn = fn;
340        work->arg = arg;
341        work->done = done;
342    }
343
344    /*
345     * Disable preemption while queueing to avoid getting
346     * preempted by a stopper which might wait for other stoppers
347     * to enter @fn which can lead to deadlock.
348     */
349    lg_global_lock(&stop_cpus_lock);
350    for_each_cpu(cpu, cpumask)
351        cpu_stop_queue_work(cpu, &per_cpu(stop_cpus_work, cpu));
352    lg_global_unlock(&stop_cpus_lock);
353}
354
355static int __stop_cpus(const struct cpumask *cpumask,
356               cpu_stop_fn_t fn, void *arg)
357{
358    struct cpu_stop_done done;
359
360    cpu_stop_init_done(&done, cpumask_weight(cpumask));
361    queue_stop_cpus_work(cpumask, fn, arg, &done);
362    wait_for_completion(&done.completion);
363    return done.executed ? done.ret : -ENOENT;
364}
365
366/**
367 * stop_cpus - stop multiple cpus
368 * @cpumask: cpus to stop
369 * @fn: function to execute
370 * @arg: argument to @fn
371 *
372 * Execute @fn(@arg) on online cpus in @cpumask. On each target cpu,
373 * @fn is run in a process context with the highest priority
374 * preempting any task on the cpu and monopolizing it. This function
375 * returns after all executions are complete.
376 *
377 * This function doesn't guarantee the cpus in @cpumask stay online
378 * till @fn completes. If some cpus go down in the middle, execution
379 * on the cpu may happen partially or fully on different cpus. @fn
380 * should either be ready for that or the caller should ensure that
381 * the cpus stay online until this function completes.
382 *
383 * All stop_cpus() calls are serialized making it safe for @fn to wait
384 * for all cpus to start executing it.
385 *
386 * CONTEXT:
387 * Might sleep.
388 *
389 * RETURNS:
390 * -ENOENT if @fn(@arg) was not executed at all because all cpus in
391 * @cpumask were offline; otherwise, 0 if all executions of @fn
392 * returned 0, any non zero return value if any returned non zero.
393 */
394int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)
395{
396    int ret;
397
398    /* static works are used, process one request at a time */
399    mutex_lock(&stop_cpus_mutex);
400    ret = __stop_cpus(cpumask, fn, arg);
401    mutex_unlock(&stop_cpus_mutex);
402    return ret;
403}
404
405/**
406 * try_stop_cpus - try to stop multiple cpus
407 * @cpumask: cpus to stop
408 * @fn: function to execute
409 * @arg: argument to @fn
410 *
411 * Identical to stop_cpus() except that it fails with -EAGAIN if
412 * someone else is already using the facility.
413 *
414 * CONTEXT:
415 * Might sleep.
416 *
417 * RETURNS:
418 * -EAGAIN if someone else is already stopping cpus, -ENOENT if
419 * @fn(@arg) was not executed at all because all cpus in @cpumask were
420 * offline; otherwise, 0 if all executions of @fn returned 0, any non
421 * zero return value if any returned non zero.
422 */
423int try_stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)
424{
425    int ret;
426
427    /* static works are used, process one request at a time */
428    if (!mutex_trylock(&stop_cpus_mutex))
429        return -EAGAIN;
430    ret = __stop_cpus(cpumask, fn, arg);
431    mutex_unlock(&stop_cpus_mutex);
432    return ret;
433}
434
435static int cpu_stop_should_run(unsigned int cpu)
436{
437    struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
438    unsigned long flags;
439    int run;
440
441    spin_lock_irqsave(&stopper->lock, flags);
442    run = !list_empty(&stopper->works);
443    spin_unlock_irqrestore(&stopper->lock, flags);
444    return run;
445}
446
447static void cpu_stopper_thread(unsigned int cpu)
448{
449    struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
450    struct cpu_stop_work *work;
451    int ret;
452
453repeat:
454    work = NULL;
455    spin_lock_irq(&stopper->lock);
456    if (!list_empty(&stopper->works)) {
457        work = list_first_entry(&stopper->works,
458                    struct cpu_stop_work, list);
459        list_del_init(&work->list);
460    }
461    spin_unlock_irq(&stopper->lock);
462
463    if (work) {
464        cpu_stop_fn_t fn = work->fn;
465        void *arg = work->arg;
466        struct cpu_stop_done *done = work->done;
467        char ksym_buf[KSYM_NAME_LEN] __maybe_unused;
468
469        /* cpu stop callbacks are not allowed to sleep */
470        preempt_disable();
471
472        ret = fn(arg);
473        if (ret)
474            done->ret = ret;
475
476        /* restore preemption and check it's still balanced */
477        preempt_enable();
478        WARN_ONCE(preempt_count(),
479              "cpu_stop: %s(%p) leaked preempt count\n",
480              kallsyms_lookup((unsigned long)fn, NULL, NULL, NULL,
481                      ksym_buf), arg);
482
483        cpu_stop_signal_done(done, true);
484        goto repeat;
485    }
486}
487
488extern void sched_set_stop_task(int cpu, struct task_struct *stop);
489
490static void cpu_stop_create(unsigned int cpu)
491{
492    sched_set_stop_task(cpu, per_cpu(cpu_stopper_task, cpu));
493}
494
495static void cpu_stop_park(unsigned int cpu)
496{
497    struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
498    struct cpu_stop_work *work;
499    unsigned long flags;
500
501    /* drain remaining works */
502    spin_lock_irqsave(&stopper->lock, flags);
503    list_for_each_entry(work, &stopper->works, list)
504        cpu_stop_signal_done(work->done, false);
505    stopper->enabled = false;
506    spin_unlock_irqrestore(&stopper->lock, flags);
507}
508
509static void cpu_stop_unpark(unsigned int cpu)
510{
511    struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
512
513    spin_lock_irq(&stopper->lock);
514    stopper->enabled = true;
515    spin_unlock_irq(&stopper->lock);
516}
517
518static struct smp_hotplug_thread cpu_stop_threads = {
519    .store = &cpu_stopper_task,
520    .thread_should_run = cpu_stop_should_run,
521    .thread_fn = cpu_stopper_thread,
522    .thread_comm = "migration/%u",
523    .create = cpu_stop_create,
524    .setup = cpu_stop_unpark,
525    .park = cpu_stop_park,
526    .pre_unpark = cpu_stop_unpark,
527    .selfparking = true,
528};
529
530static int __init cpu_stop_init(void)
531{
532    unsigned int cpu;
533
534    for_each_possible_cpu(cpu) {
535        struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
536
537        spin_lock_init(&stopper->lock);
538        INIT_LIST_HEAD(&stopper->works);
539    }
540
541    BUG_ON(smpboot_register_percpu_thread(&cpu_stop_threads));
542    stop_machine_initialized = true;
543    return 0;
544}
545early_initcall(cpu_stop_init);
546
547#ifdef CONFIG_STOP_MACHINE
548
549int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus)
550{
551    struct multi_stop_data msdata = {
552        .fn = fn,
553        .data = data,
554        .num_threads = num_online_cpus(),
555        .active_cpus = cpus,
556    };
557
558    if (!stop_machine_initialized) {
559        /*
560         * Handle the case where stop_machine() is called
561         * early in boot before stop_machine() has been
562         * initialized.
563         */
564        unsigned long flags;
565        int ret;
566
567        WARN_ON_ONCE(msdata.num_threads != 1);
568
569        local_irq_save(flags);
570        hard_irq_disable();
571        ret = (*fn)(data);
572        local_irq_restore(flags);
573
574        return ret;
575    }
576
577    /* Set the initial state and stop all online cpus. */
578    set_state(&msdata, MULTI_STOP_PREPARE);
579    return stop_cpus(cpu_online_mask, multi_cpu_stop, &msdata);
580}
581
582int stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus)
583{
584    int ret;
585
586    /* No CPUs can come up or down during this. */
587    get_online_cpus();
588    ret = __stop_machine(fn, data, cpus);
589    put_online_cpus();
590    return ret;
591}
592EXPORT_SYMBOL_GPL(stop_machine);
593
594/**
595 * stop_machine_from_inactive_cpu - stop_machine() from inactive CPU
596 * @fn: the function to run
597 * @data: the data ptr for the @fn()
598 * @cpus: the cpus to run the @fn() on (NULL = any online cpu)
599 *
600 * This is identical to stop_machine() but can be called from a CPU which
601 * is not active. The local CPU is in the process of hotplug (so no other
602 * CPU hotplug can start) and not marked active and doesn't have enough
603 * context to sleep.
604 *
605 * This function provides stop_machine() functionality for such state by
606 * using busy-wait for synchronization and executing @fn directly for local
607 * CPU.
608 *
609 * CONTEXT:
610 * Local CPU is inactive. Temporarily stops all active CPUs.
611 *
612 * RETURNS:
613 * 0 if all executions of @fn returned 0, any non zero return value if any
614 * returned non zero.
615 */
616int stop_machine_from_inactive_cpu(int (*fn)(void *), void *data,
617                  const struct cpumask *cpus)
618{
619    struct multi_stop_data msdata = { .fn = fn, .data = data,
620                        .active_cpus = cpus };
621    struct cpu_stop_done done;
622    int ret;
623
624    /* Local CPU must be inactive and CPU hotplug in progress. */
625    BUG_ON(cpu_active(raw_smp_processor_id()));
626    msdata.num_threads = num_active_cpus() + 1; /* +1 for local */
627
628    /* No proper task established and can't sleep - busy wait for lock. */
629    while (!mutex_trylock(&stop_cpus_mutex))
630        cpu_relax();
631
632    /* Schedule work on other CPUs and execute directly for local CPU */
633    set_state(&msdata, MULTI_STOP_PREPARE);
634    cpu_stop_init_done(&done, num_active_cpus());
635    queue_stop_cpus_work(cpu_active_mask, multi_cpu_stop, &msdata,
636                 &done);
637    ret = multi_cpu_stop(&msdata);
638
639    /* Busy wait for completion. */
640    while (!completion_done(&done.completion))
641        cpu_relax();
642
643    mutex_unlock(&stop_cpus_mutex);
644    return ret ?: done.ret;
645}
646
647#endif /* CONFIG_STOP_MACHINE */
648

Archive Download this file



interactive