Root/kernel/smp.c

1/*
2 * Generic helpers for smp ipi calls
3 *
4 * (C) Jens Axboe <jens.axboe@oracle.com> 2008
5 */
6#include <linux/rcupdate.h>
7#include <linux/rculist.h>
8#include <linux/kernel.h>
9#include <linux/module.h>
10#include <linux/percpu.h>
11#include <linux/init.h>
12#include <linux/gfp.h>
13#include <linux/smp.h>
14#include <linux/cpu.h>
15
16#ifdef CONFIG_USE_GENERIC_SMP_HELPERS
17static struct {
18    struct list_head queue;
19    raw_spinlock_t lock;
20} call_function __cacheline_aligned_in_smp =
21    {
22        .queue = LIST_HEAD_INIT(call_function.queue),
23        .lock = __RAW_SPIN_LOCK_UNLOCKED(call_function.lock),
24    };
25
26enum {
27    CSD_FLAG_LOCK = 0x01,
28};
29
30struct call_function_data {
31    struct call_single_data csd;
32    atomic_t refs;
33    cpumask_var_t cpumask;
34};
35
36static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data);
37
38struct call_single_queue {
39    struct list_head list;
40    raw_spinlock_t lock;
41};
42
43static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_queue, call_single_queue);
44
45static int
46hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
47{
48    long cpu = (long)hcpu;
49    struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
50
51    switch (action) {
52    case CPU_UP_PREPARE:
53    case CPU_UP_PREPARE_FROZEN:
54        if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
55                cpu_to_node(cpu)))
56            return notifier_from_errno(-ENOMEM);
57        break;
58
59#ifdef CONFIG_HOTPLUG_CPU
60    case CPU_UP_CANCELED:
61    case CPU_UP_CANCELED_FROZEN:
62
63    case CPU_DEAD:
64    case CPU_DEAD_FROZEN:
65        free_cpumask_var(cfd->cpumask);
66        break;
67#endif
68    };
69
70    return NOTIFY_OK;
71}
72
73static struct notifier_block __cpuinitdata hotplug_cfd_notifier = {
74    .notifier_call = hotplug_cfd,
75};
76
77static int __cpuinit init_call_single_data(void)
78{
79    void *cpu = (void *)(long)smp_processor_id();
80    int i;
81
82    for_each_possible_cpu(i) {
83        struct call_single_queue *q = &per_cpu(call_single_queue, i);
84
85        raw_spin_lock_init(&q->lock);
86        INIT_LIST_HEAD(&q->list);
87    }
88
89    hotplug_cfd(&hotplug_cfd_notifier, CPU_UP_PREPARE, cpu);
90    register_cpu_notifier(&hotplug_cfd_notifier);
91
92    return 0;
93}
94early_initcall(init_call_single_data);
95
96/*
97 * csd_lock/csd_unlock used to serialize access to per-cpu csd resources
98 *
99 * For non-synchronous ipi calls the csd can still be in use by the
100 * previous function call. For multi-cpu calls its even more interesting
101 * as we'll have to ensure no other cpu is observing our csd.
102 */
103static void csd_lock_wait(struct call_single_data *data)
104{
105    while (data->flags & CSD_FLAG_LOCK)
106        cpu_relax();
107}
108
109static void csd_lock(struct call_single_data *data)
110{
111    csd_lock_wait(data);
112    data->flags = CSD_FLAG_LOCK;
113
114    /*
115     * prevent CPU from reordering the above assignment
116     * to ->flags with any subsequent assignments to other
117     * fields of the specified call_single_data structure:
118     */
119    smp_mb();
120}
121
122static void csd_unlock(struct call_single_data *data)
123{
124    WARN_ON(!(data->flags & CSD_FLAG_LOCK));
125
126    /*
127     * ensure we're all done before releasing data:
128     */
129    smp_mb();
130
131    data->flags &= ~CSD_FLAG_LOCK;
132}
133
134/*
135 * Insert a previously allocated call_single_data element
136 * for execution on the given CPU. data must already have
137 * ->func, ->info, and ->flags set.
138 */
139static
140void generic_exec_single(int cpu, struct call_single_data *data, int wait)
141{
142    struct call_single_queue *dst = &per_cpu(call_single_queue, cpu);
143    unsigned long flags;
144    int ipi;
145
146    raw_spin_lock_irqsave(&dst->lock, flags);
147    ipi = list_empty(&dst->list);
148    list_add_tail(&data->list, &dst->list);
149    raw_spin_unlock_irqrestore(&dst->lock, flags);
150
151    /*
152     * The list addition should be visible before sending the IPI
153     * handler locks the list to pull the entry off it because of
154     * normal cache coherency rules implied by spinlocks.
155     *
156     * If IPIs can go out of order to the cache coherency protocol
157     * in an architecture, sufficient synchronisation should be added
158     * to arch code to make it appear to obey cache coherency WRT
159     * locking and barrier primitives. Generic code isn't really
160     * equipped to do the right thing...
161     */
162    if (ipi)
163        arch_send_call_function_single_ipi(cpu);
164
165    if (wait)
166        csd_lock_wait(data);
167}
168
169/*
170 * Invoked by arch to handle an IPI for call function. Must be called with
171 * interrupts disabled.
172 */
173void generic_smp_call_function_interrupt(void)
174{
175    struct call_function_data *data;
176    int cpu = smp_processor_id();
177
178    /*
179     * Shouldn't receive this interrupt on a cpu that is not yet online.
180     */
181    WARN_ON_ONCE(!cpu_online(cpu));
182
183    /*
184     * Ensure entry is visible on call_function_queue after we have
185     * entered the IPI. See comment in smp_call_function_many.
186     * If we don't have this, then we may miss an entry on the list
187     * and never get another IPI to process it.
188     */
189    smp_mb();
190
191    /*
192     * It's ok to use list_for_each_rcu() here even though we may
193     * delete 'pos', since list_del_rcu() doesn't clear ->next
194     */
195    list_for_each_entry_rcu(data, &call_function.queue, csd.list) {
196        int refs;
197        void (*func) (void *info);
198
199        /*
200         * Since we walk the list without any locks, we might
201         * see an entry that was completed, removed from the
202         * list and is in the process of being reused.
203         *
204         * We must check that the cpu is in the cpumask before
205         * checking the refs, and both must be set before
206         * executing the callback on this cpu.
207         */
208
209        if (!cpumask_test_cpu(cpu, data->cpumask))
210            continue;
211
212        smp_rmb();
213
214        if (atomic_read(&data->refs) == 0)
215            continue;
216
217        func = data->csd.func; /* for later warn */
218        data->csd.func(data->csd.info);
219
220        /*
221         * If the cpu mask is not still set then it enabled interrupts,
222         * we took another smp interrupt, and executed the function
223         * twice on this cpu. In theory that copy decremented refs.
224         */
225        if (!cpumask_test_and_clear_cpu(cpu, data->cpumask)) {
226            WARN(1, "%pS enabled interrupts and double executed\n",
227                 func);
228            continue;
229        }
230
231        refs = atomic_dec_return(&data->refs);
232        WARN_ON(refs < 0);
233
234        if (refs)
235            continue;
236
237        WARN_ON(!cpumask_empty(data->cpumask));
238
239        raw_spin_lock(&call_function.lock);
240        list_del_rcu(&data->csd.list);
241        raw_spin_unlock(&call_function.lock);
242
243        csd_unlock(&data->csd);
244    }
245
246}
247
248/*
249 * Invoked by arch to handle an IPI for call function single. Must be
250 * called from the arch with interrupts disabled.
251 */
252void generic_smp_call_function_single_interrupt(void)
253{
254    struct call_single_queue *q = &__get_cpu_var(call_single_queue);
255    unsigned int data_flags;
256    LIST_HEAD(list);
257
258    /*
259     * Shouldn't receive this interrupt on a cpu that is not yet online.
260     */
261    WARN_ON_ONCE(!cpu_online(smp_processor_id()));
262
263    raw_spin_lock(&q->lock);
264    list_replace_init(&q->list, &list);
265    raw_spin_unlock(&q->lock);
266
267    while (!list_empty(&list)) {
268        struct call_single_data *data;
269
270        data = list_entry(list.next, struct call_single_data, list);
271        list_del(&data->list);
272
273        /*
274         * 'data' can be invalid after this call if flags == 0
275         * (when called through generic_exec_single()),
276         * so save them away before making the call:
277         */
278        data_flags = data->flags;
279
280        data->func(data->info);
281
282        /*
283         * Unlocked CSDs are valid through generic_exec_single():
284         */
285        if (data_flags & CSD_FLAG_LOCK)
286            csd_unlock(data);
287    }
288}
289
290static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data);
291
292/*
293 * smp_call_function_single - Run a function on a specific CPU
294 * @func: The function to run. This must be fast and non-blocking.
295 * @info: An arbitrary pointer to pass to the function.
296 * @wait: If true, wait until function has completed on other CPUs.
297 *
298 * Returns 0 on success, else a negative status code.
299 */
300int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
301                 int wait)
302{
303    struct call_single_data d = {
304        .flags = 0,
305    };
306    unsigned long flags;
307    int this_cpu;
308    int err = 0;
309
310    /*
311     * prevent preemption and reschedule on another processor,
312     * as well as CPU removal
313     */
314    this_cpu = get_cpu();
315
316    /*
317     * Can deadlock when called with interrupts disabled.
318     * We allow cpu's that are not yet online though, as no one else can
319     * send smp call function interrupt to this cpu and as such deadlocks
320     * can't happen.
321     */
322    WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
323             && !oops_in_progress);
324
325    if (cpu == this_cpu) {
326        local_irq_save(flags);
327        func(info);
328        local_irq_restore(flags);
329    } else {
330        if ((unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) {
331            struct call_single_data *data = &d;
332
333            if (!wait)
334                data = &__get_cpu_var(csd_data);
335
336            csd_lock(data);
337
338            data->func = func;
339            data->info = info;
340            generic_exec_single(cpu, data, wait);
341        } else {
342            err = -ENXIO; /* CPU not online */
343        }
344    }
345
346    put_cpu();
347
348    return err;
349}
350EXPORT_SYMBOL(smp_call_function_single);
351
352/*
353 * smp_call_function_any - Run a function on any of the given cpus
354 * @mask: The mask of cpus it can run on.
355 * @func: The function to run. This must be fast and non-blocking.
356 * @info: An arbitrary pointer to pass to the function.
357 * @wait: If true, wait until function has completed.
358 *
359 * Returns 0 on success, else a negative status code (if no cpus were online).
360 * Note that @wait will be implicitly turned on in case of allocation failures,
361 * since we fall back to on-stack allocation.
362 *
363 * Selection preference:
364 * 1) current cpu if in @mask
365 * 2) any cpu of current node if in @mask
366 * 3) any other online cpu in @mask
367 */
368int smp_call_function_any(const struct cpumask *mask,
369              smp_call_func_t func, void *info, int wait)
370{
371    unsigned int cpu;
372    const struct cpumask *nodemask;
373    int ret;
374
375    /* Try for same CPU (cheapest) */
376    cpu = get_cpu();
377    if (cpumask_test_cpu(cpu, mask))
378        goto call;
379
380    /* Try for same node. */
381    nodemask = cpumask_of_node(cpu_to_node(cpu));
382    for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids;
383         cpu = cpumask_next_and(cpu, nodemask, mask)) {
384        if (cpu_online(cpu))
385            goto call;
386    }
387
388    /* Any online will do: smp_call_function_single handles nr_cpu_ids. */
389    cpu = cpumask_any_and(mask, cpu_online_mask);
390call:
391    ret = smp_call_function_single(cpu, func, info, wait);
392    put_cpu();
393    return ret;
394}
395EXPORT_SYMBOL_GPL(smp_call_function_any);
396
397/**
398 * __smp_call_function_single(): Run a function on a specific CPU
399 * @cpu: The CPU to run on.
400 * @data: Pre-allocated and setup data structure
401 * @wait: If true, wait until function has completed on specified CPU.
402 *
403 * Like smp_call_function_single(), but allow caller to pass in a
404 * pre-allocated data structure. Useful for embedding @data inside
405 * other structures, for instance.
406 */
407void __smp_call_function_single(int cpu, struct call_single_data *data,
408                int wait)
409{
410    unsigned int this_cpu;
411    unsigned long flags;
412
413    this_cpu = get_cpu();
414    /*
415     * Can deadlock when called with interrupts disabled.
416     * We allow cpu's that are not yet online though, as no one else can
417     * send smp call function interrupt to this cpu and as such deadlocks
418     * can't happen.
419     */
420    WARN_ON_ONCE(cpu_online(smp_processor_id()) && wait && irqs_disabled()
421             && !oops_in_progress);
422
423    if (cpu == this_cpu) {
424        local_irq_save(flags);
425        data->func(data->info);
426        local_irq_restore(flags);
427    } else {
428        csd_lock(data);
429        generic_exec_single(cpu, data, wait);
430    }
431    put_cpu();
432}
433
434/**
435 * smp_call_function_many(): Run a function on a set of other CPUs.
436 * @mask: The set of cpus to run on (only runs on online subset).
437 * @func: The function to run. This must be fast and non-blocking.
438 * @info: An arbitrary pointer to pass to the function.
439 * @wait: If true, wait (atomically) until function has completed
440 * on other CPUs.
441 *
442 * If @wait is true, then returns once @func has returned.
443 *
444 * You must not call this function with disabled interrupts or from a
445 * hardware interrupt handler or from a bottom half handler. Preemption
446 * must be disabled when calling this function.
447 */
448void smp_call_function_many(const struct cpumask *mask,
449                smp_call_func_t func, void *info, bool wait)
450{
451    struct call_function_data *data;
452    unsigned long flags;
453    int cpu, next_cpu, this_cpu = smp_processor_id();
454
455    /*
456     * Can deadlock when called with interrupts disabled.
457     * We allow cpu's that are not yet online though, as no one else can
458     * send smp call function interrupt to this cpu and as such deadlocks
459     * can't happen.
460     */
461    WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
462             && !oops_in_progress && !early_boot_irqs_disabled);
463
464    /* So, what's a CPU they want? Ignoring this one. */
465    cpu = cpumask_first_and(mask, cpu_online_mask);
466    if (cpu == this_cpu)
467        cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
468
469    /* No online cpus? We're done. */
470    if (cpu >= nr_cpu_ids)
471        return;
472
473    /* Do we have another CPU which isn't us? */
474    next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
475    if (next_cpu == this_cpu)
476        next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask);
477
478    /* Fastpath: do that cpu by itself. */
479    if (next_cpu >= nr_cpu_ids) {
480        smp_call_function_single(cpu, func, info, wait);
481        return;
482    }
483
484    data = &__get_cpu_var(cfd_data);
485    csd_lock(&data->csd);
486    BUG_ON(atomic_read(&data->refs) || !cpumask_empty(data->cpumask));
487
488    data->csd.func = func;
489    data->csd.info = info;
490    cpumask_and(data->cpumask, mask, cpu_online_mask);
491    cpumask_clear_cpu(this_cpu, data->cpumask);
492
493    /*
494     * To ensure the interrupt handler gets an complete view
495     * we order the cpumask and refs writes and order the read
496     * of them in the interrupt handler. In addition we may
497     * only clear our own cpu bit from the mask.
498     */
499    smp_wmb();
500
501    atomic_set(&data->refs, cpumask_weight(data->cpumask));
502
503    raw_spin_lock_irqsave(&call_function.lock, flags);
504    /*
505     * Place entry at the _HEAD_ of the list, so that any cpu still
506     * observing the entry in generic_smp_call_function_interrupt()
507     * will not miss any other list entries:
508     */
509    list_add_rcu(&data->csd.list, &call_function.queue);
510    raw_spin_unlock_irqrestore(&call_function.lock, flags);
511
512    /*
513     * Make the list addition visible before sending the ipi.
514     * (IPIs must obey or appear to obey normal Linux cache
515     * coherency rules -- see comment in generic_exec_single).
516     */
517    smp_mb();
518
519    /* Send a message to all CPUs in the map */
520    arch_send_call_function_ipi_mask(data->cpumask);
521
522    /* Optionally wait for the CPUs to complete */
523    if (wait)
524        csd_lock_wait(&data->csd);
525}
526EXPORT_SYMBOL(smp_call_function_many);
527
528/**
529 * smp_call_function(): Run a function on all other CPUs.
530 * @func: The function to run. This must be fast and non-blocking.
531 * @info: An arbitrary pointer to pass to the function.
532 * @wait: If true, wait (atomically) until function has completed
533 * on other CPUs.
534 *
535 * Returns 0.
536 *
537 * If @wait is true, then returns once @func has returned; otherwise
538 * it returns just before the target cpu calls @func.
539 *
540 * You must not call this function with disabled interrupts or from a
541 * hardware interrupt handler or from a bottom half handler.
542 */
543int smp_call_function(smp_call_func_t func, void *info, int wait)
544{
545    preempt_disable();
546    smp_call_function_many(cpu_online_mask, func, info, wait);
547    preempt_enable();
548
549    return 0;
550}
551EXPORT_SYMBOL(smp_call_function);
552
553void ipi_call_lock(void)
554{
555    raw_spin_lock(&call_function.lock);
556}
557
558void ipi_call_unlock(void)
559{
560    raw_spin_unlock(&call_function.lock);
561}
562
563void ipi_call_lock_irq(void)
564{
565    raw_spin_lock_irq(&call_function.lock);
566}
567
568void ipi_call_unlock_irq(void)
569{
570    raw_spin_unlock_irq(&call_function.lock);
571}
572#endif /* USE_GENERIC_SMP_HELPERS */
573
574/*
575 * Call a function on all processors. May be used during early boot while
576 * early_boot_irqs_disabled is set. Use local_irq_save/restore() instead
577 * of local_irq_disable/enable().
578 */
579int on_each_cpu(void (*func) (void *info), void *info, int wait)
580{
581    unsigned long flags;
582    int ret = 0;
583
584    preempt_disable();
585    ret = smp_call_function(func, info, wait);
586    local_irq_save(flags);
587    func(info);
588    local_irq_restore(flags);
589    preempt_enable();
590    return ret;
591}
592EXPORT_SYMBOL(on_each_cpu);
593

Archive Download this file



interactive