Root/kernel/rcutree_plugin.h

1/*
2 * Read-Copy Update mechanism for mutual exclusion (tree-based version)
3 * Internal non-public definitions that provide either classic
4 * or preemptible semantics.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 *
20 * Copyright Red Hat, 2009
21 * Copyright IBM Corporation, 2009
22 *
23 * Author: Ingo Molnar <mingo@elte.hu>
24 * Paul E. McKenney <paulmck@linux.vnet.ibm.com>
25 */
26
27#include <linux/delay.h>
28
29#define RCU_KTHREAD_PRIO 1
30
31#ifdef CONFIG_RCU_BOOST
32#define RCU_BOOST_PRIO CONFIG_RCU_BOOST_PRIO
33#else
34#define RCU_BOOST_PRIO RCU_KTHREAD_PRIO
35#endif
36
37/*
38 * Check the RCU kernel configuration parameters and print informative
39 * messages about anything out of the ordinary. If you like #ifdef, you
40 * will love this function.
41 */
42static void __init rcu_bootup_announce_oddness(void)
43{
44#ifdef CONFIG_RCU_TRACE
45    printk(KERN_INFO "\tRCU debugfs-based tracing is enabled.\n");
46#endif
47#if (defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 64) || (!defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 32)
48    printk(KERN_INFO "\tCONFIG_RCU_FANOUT set to non-default value of %d\n",
49           CONFIG_RCU_FANOUT);
50#endif
51#ifdef CONFIG_RCU_FANOUT_EXACT
52    printk(KERN_INFO "\tHierarchical RCU autobalancing is disabled.\n");
53#endif
54#ifdef CONFIG_RCU_FAST_NO_HZ
55    printk(KERN_INFO
56           "\tRCU dyntick-idle grace-period acceleration is enabled.\n");
57#endif
58#ifdef CONFIG_PROVE_RCU
59    printk(KERN_INFO "\tRCU lockdep checking is enabled.\n");
60#endif
61#ifdef CONFIG_RCU_TORTURE_TEST_RUNNABLE
62    printk(KERN_INFO "\tRCU torture testing starts during boot.\n");
63#endif
64#if defined(CONFIG_TREE_PREEMPT_RCU) && !defined(CONFIG_RCU_CPU_STALL_VERBOSE)
65    printk(KERN_INFO "\tDump stacks of tasks blocking RCU-preempt GP.\n");
66#endif
67#if defined(CONFIG_RCU_CPU_STALL_INFO)
68    printk(KERN_INFO "\tAdditional per-CPU info printed with stalls.\n");
69#endif
70#if NUM_RCU_LVL_4 != 0
71    printk(KERN_INFO "\tExperimental four-level hierarchy is enabled.\n");
72#endif
73}
74
75#ifdef CONFIG_TREE_PREEMPT_RCU
76
77struct rcu_state rcu_preempt_state = RCU_STATE_INITIALIZER(rcu_preempt);
78DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data);
79static struct rcu_state *rcu_state = &rcu_preempt_state;
80
81static void rcu_read_unlock_special(struct task_struct *t);
82static int rcu_preempted_readers_exp(struct rcu_node *rnp);
83
84/*
85 * Tell them what RCU they are running.
86 */
87static void __init rcu_bootup_announce(void)
88{
89    printk(KERN_INFO "Preemptible hierarchical RCU implementation.\n");
90    rcu_bootup_announce_oddness();
91}
92
93/*
94 * Return the number of RCU-preempt batches processed thus far
95 * for debug and statistics.
96 */
97long rcu_batches_completed_preempt(void)
98{
99    return rcu_preempt_state.completed;
100}
101EXPORT_SYMBOL_GPL(rcu_batches_completed_preempt);
102
103/*
104 * Return the number of RCU batches processed thus far for debug & stats.
105 */
106long rcu_batches_completed(void)
107{
108    return rcu_batches_completed_preempt();
109}
110EXPORT_SYMBOL_GPL(rcu_batches_completed);
111
112/*
113 * Force a quiescent state for preemptible RCU.
114 */
115void rcu_force_quiescent_state(void)
116{
117    force_quiescent_state(&rcu_preempt_state, 0);
118}
119EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
120
121/*
122 * Record a preemptible-RCU quiescent state for the specified CPU. Note
123 * that this just means that the task currently running on the CPU is
124 * not in a quiescent state. There might be any number of tasks blocked
125 * while in an RCU read-side critical section.
126 *
127 * Unlike the other rcu_*_qs() functions, callers to this function
128 * must disable irqs in order to protect the assignment to
129 * ->rcu_read_unlock_special.
130 */
131static void rcu_preempt_qs(int cpu)
132{
133    struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu);
134
135    rdp->passed_quiesce_gpnum = rdp->gpnum;
136    barrier();
137    if (rdp->passed_quiesce == 0)
138        trace_rcu_grace_period("rcu_preempt", rdp->gpnum, "cpuqs");
139    rdp->passed_quiesce = 1;
140    current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS;
141}
142
143/*
144 * We have entered the scheduler, and the current task might soon be
145 * context-switched away from. If this task is in an RCU read-side
146 * critical section, we will no longer be able to rely on the CPU to
147 * record that fact, so we enqueue the task on the blkd_tasks list.
148 * The task will dequeue itself when it exits the outermost enclosing
149 * RCU read-side critical section. Therefore, the current grace period
150 * cannot be permitted to complete until the blkd_tasks list entries
151 * predating the current grace period drain, in other words, until
152 * rnp->gp_tasks becomes NULL.
153 *
154 * Caller must disable preemption.
155 */
156static void rcu_preempt_note_context_switch(int cpu)
157{
158    struct task_struct *t = current;
159    unsigned long flags;
160    struct rcu_data *rdp;
161    struct rcu_node *rnp;
162
163    if (t->rcu_read_lock_nesting > 0 &&
164        (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) {
165
166        /* Possibly blocking in an RCU read-side critical section. */
167        rdp = per_cpu_ptr(rcu_preempt_state.rda, cpu);
168        rnp = rdp->mynode;
169        raw_spin_lock_irqsave(&rnp->lock, flags);
170        t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED;
171        t->rcu_blocked_node = rnp;
172
173        /*
174         * If this CPU has already checked in, then this task
175         * will hold up the next grace period rather than the
176         * current grace period. Queue the task accordingly.
177         * If the task is queued for the current grace period
178         * (i.e., this CPU has not yet passed through a quiescent
179         * state for the current grace period), then as long
180         * as that task remains queued, the current grace period
181         * cannot end. Note that there is some uncertainty as
182         * to exactly when the current grace period started.
183         * We take a conservative approach, which can result
184         * in unnecessarily waiting on tasks that started very
185         * slightly after the current grace period began. C'est
186         * la vie!!!
187         *
188         * But first, note that the current CPU must still be
189         * on line!
190         */
191        WARN_ON_ONCE((rdp->grpmask & rnp->qsmaskinit) == 0);
192        WARN_ON_ONCE(!list_empty(&t->rcu_node_entry));
193        if ((rnp->qsmask & rdp->grpmask) && rnp->gp_tasks != NULL) {
194            list_add(&t->rcu_node_entry, rnp->gp_tasks->prev);
195            rnp->gp_tasks = &t->rcu_node_entry;
196#ifdef CONFIG_RCU_BOOST
197            if (rnp->boost_tasks != NULL)
198                rnp->boost_tasks = rnp->gp_tasks;
199#endif /* #ifdef CONFIG_RCU_BOOST */
200        } else {
201            list_add(&t->rcu_node_entry, &rnp->blkd_tasks);
202            if (rnp->qsmask & rdp->grpmask)
203                rnp->gp_tasks = &t->rcu_node_entry;
204        }
205        trace_rcu_preempt_task(rdp->rsp->name,
206                       t->pid,
207                       (rnp->qsmask & rdp->grpmask)
208                       ? rnp->gpnum
209                       : rnp->gpnum + 1);
210        raw_spin_unlock_irqrestore(&rnp->lock, flags);
211    } else if (t->rcu_read_lock_nesting < 0 &&
212           t->rcu_read_unlock_special) {
213
214        /*
215         * Complete exit from RCU read-side critical section on
216         * behalf of preempted instance of __rcu_read_unlock().
217         */
218        rcu_read_unlock_special(t);
219    }
220
221    /*
222     * Either we were not in an RCU read-side critical section to
223     * begin with, or we have now recorded that critical section
224     * globally. Either way, we can now note a quiescent state
225     * for this CPU. Again, if we were in an RCU read-side critical
226     * section, and if that critical section was blocking the current
227     * grace period, then the fact that the task has been enqueued
228     * means that we continue to block the current grace period.
229     */
230    local_irq_save(flags);
231    rcu_preempt_qs(cpu);
232    local_irq_restore(flags);
233}
234
235/*
236 * Tree-preemptible RCU implementation for rcu_read_lock().
237 * Just increment ->rcu_read_lock_nesting, shared state will be updated
238 * if we block.
239 */
240void __rcu_read_lock(void)
241{
242    current->rcu_read_lock_nesting++;
243    barrier(); /* needed if we ever invoke rcu_read_lock in rcutree.c */
244}
245EXPORT_SYMBOL_GPL(__rcu_read_lock);
246
247/*
248 * Check for preempted RCU readers blocking the current grace period
249 * for the specified rcu_node structure. If the caller needs a reliable
250 * answer, it must hold the rcu_node's ->lock.
251 */
252static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
253{
254    return rnp->gp_tasks != NULL;
255}
256
257/*
258 * Record a quiescent state for all tasks that were previously queued
259 * on the specified rcu_node structure and that were blocking the current
260 * RCU grace period. The caller must hold the specified rnp->lock with
261 * irqs disabled, and this lock is released upon return, but irqs remain
262 * disabled.
263 */
264static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
265    __releases(rnp->lock)
266{
267    unsigned long mask;
268    struct rcu_node *rnp_p;
269
270    if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
271        raw_spin_unlock_irqrestore(&rnp->lock, flags);
272        return; /* Still need more quiescent states! */
273    }
274
275    rnp_p = rnp->parent;
276    if (rnp_p == NULL) {
277        /*
278         * Either there is only one rcu_node in the tree,
279         * or tasks were kicked up to root rcu_node due to
280         * CPUs going offline.
281         */
282        rcu_report_qs_rsp(&rcu_preempt_state, flags);
283        return;
284    }
285
286    /* Report up the rest of the hierarchy. */
287    mask = rnp->grpmask;
288    raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
289    raw_spin_lock(&rnp_p->lock); /* irqs already disabled. */
290    rcu_report_qs_rnp(mask, &rcu_preempt_state, rnp_p, flags);
291}
292
293/*
294 * Advance a ->blkd_tasks-list pointer to the next entry, instead
295 * returning NULL if at the end of the list.
296 */
297static struct list_head *rcu_next_node_entry(struct task_struct *t,
298                         struct rcu_node *rnp)
299{
300    struct list_head *np;
301
302    np = t->rcu_node_entry.next;
303    if (np == &rnp->blkd_tasks)
304        np = NULL;
305    return np;
306}
307
308/*
309 * Handle special cases during rcu_read_unlock(), such as needing to
310 * notify RCU core processing or task having blocked during the RCU
311 * read-side critical section.
312 */
313static noinline void rcu_read_unlock_special(struct task_struct *t)
314{
315    int empty;
316    int empty_exp;
317    int empty_exp_now;
318    unsigned long flags;
319    struct list_head *np;
320#ifdef CONFIG_RCU_BOOST
321    struct rt_mutex *rbmp = NULL;
322#endif /* #ifdef CONFIG_RCU_BOOST */
323    struct rcu_node *rnp;
324    int special;
325
326    /* NMI handlers cannot block and cannot safely manipulate state. */
327    if (in_nmi())
328        return;
329
330    local_irq_save(flags);
331
332    /*
333     * If RCU core is waiting for this CPU to exit critical section,
334     * let it know that we have done so.
335     */
336    special = t->rcu_read_unlock_special;
337    if (special & RCU_READ_UNLOCK_NEED_QS) {
338        rcu_preempt_qs(smp_processor_id());
339    }
340
341    /* Hardware IRQ handlers cannot block. */
342    if (in_irq() || in_serving_softirq()) {
343        local_irq_restore(flags);
344        return;
345    }
346
347    /* Clean up if blocked during RCU read-side critical section. */
348    if (special & RCU_READ_UNLOCK_BLOCKED) {
349        t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BLOCKED;
350
351        /*
352         * Remove this task from the list it blocked on. The
353         * task can migrate while we acquire the lock, but at
354         * most one time. So at most two passes through loop.
355         */
356        for (;;) {
357            rnp = t->rcu_blocked_node;
358            raw_spin_lock(&rnp->lock); /* irqs already disabled. */
359            if (rnp == t->rcu_blocked_node)
360                break;
361            raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
362        }
363        empty = !rcu_preempt_blocked_readers_cgp(rnp);
364        empty_exp = !rcu_preempted_readers_exp(rnp);
365        smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */
366        np = rcu_next_node_entry(t, rnp);
367        list_del_init(&t->rcu_node_entry);
368        t->rcu_blocked_node = NULL;
369        trace_rcu_unlock_preempted_task("rcu_preempt",
370                        rnp->gpnum, t->pid);
371        if (&t->rcu_node_entry == rnp->gp_tasks)
372            rnp->gp_tasks = np;
373        if (&t->rcu_node_entry == rnp->exp_tasks)
374            rnp->exp_tasks = np;
375#ifdef CONFIG_RCU_BOOST
376        if (&t->rcu_node_entry == rnp->boost_tasks)
377            rnp->boost_tasks = np;
378        /* Snapshot/clear ->rcu_boost_mutex with rcu_node lock held. */
379        if (t->rcu_boost_mutex) {
380            rbmp = t->rcu_boost_mutex;
381            t->rcu_boost_mutex = NULL;
382        }
383#endif /* #ifdef CONFIG_RCU_BOOST */
384
385        /*
386         * If this was the last task on the current list, and if
387         * we aren't waiting on any CPUs, report the quiescent state.
388         * Note that rcu_report_unblock_qs_rnp() releases rnp->lock,
389         * so we must take a snapshot of the expedited state.
390         */
391        empty_exp_now = !rcu_preempted_readers_exp(rnp);
392        if (!empty && !rcu_preempt_blocked_readers_cgp(rnp)) {
393            trace_rcu_quiescent_state_report("preempt_rcu",
394                             rnp->gpnum,
395                             0, rnp->qsmask,
396                             rnp->level,
397                             rnp->grplo,
398                             rnp->grphi,
399                             !!rnp->gp_tasks);
400            rcu_report_unblock_qs_rnp(rnp, flags);
401        } else
402            raw_spin_unlock_irqrestore(&rnp->lock, flags);
403
404#ifdef CONFIG_RCU_BOOST
405        /* Unboost if we were boosted. */
406        if (rbmp)
407            rt_mutex_unlock(rbmp);
408#endif /* #ifdef CONFIG_RCU_BOOST */
409
410        /*
411         * If this was the last task on the expedited lists,
412         * then we need to report up the rcu_node hierarchy.
413         */
414        if (!empty_exp && empty_exp_now)
415            rcu_report_exp_rnp(&rcu_preempt_state, rnp, true);
416    } else {
417        local_irq_restore(flags);
418    }
419}
420
421/*
422 * Tree-preemptible RCU implementation for rcu_read_unlock().
423 * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost
424 * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then
425 * invoke rcu_read_unlock_special() to clean up after a context switch
426 * in an RCU read-side critical section and other special cases.
427 */
428void __rcu_read_unlock(void)
429{
430    struct task_struct *t = current;
431
432    if (t->rcu_read_lock_nesting != 1)
433        --t->rcu_read_lock_nesting;
434    else {
435        barrier(); /* critical section before exit code. */
436        t->rcu_read_lock_nesting = INT_MIN;
437        barrier(); /* assign before ->rcu_read_unlock_special load */
438        if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
439            rcu_read_unlock_special(t);
440        barrier(); /* ->rcu_read_unlock_special load before assign */
441        t->rcu_read_lock_nesting = 0;
442    }
443#ifdef CONFIG_PROVE_LOCKING
444    {
445        int rrln = ACCESS_ONCE(t->rcu_read_lock_nesting);
446
447        WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2);
448    }
449#endif /* #ifdef CONFIG_PROVE_LOCKING */
450}
451EXPORT_SYMBOL_GPL(__rcu_read_unlock);
452
453#ifdef CONFIG_RCU_CPU_STALL_VERBOSE
454
455/*
456 * Dump detailed information for all tasks blocking the current RCU
457 * grace period on the specified rcu_node structure.
458 */
459static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
460{
461    unsigned long flags;
462    struct task_struct *t;
463
464    if (!rcu_preempt_blocked_readers_cgp(rnp))
465        return;
466    raw_spin_lock_irqsave(&rnp->lock, flags);
467    t = list_entry(rnp->gp_tasks,
468               struct task_struct, rcu_node_entry);
469    list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry)
470        sched_show_task(t);
471    raw_spin_unlock_irqrestore(&rnp->lock, flags);
472}
473
474/*
475 * Dump detailed information for all tasks blocking the current RCU
476 * grace period.
477 */
478static void rcu_print_detail_task_stall(struct rcu_state *rsp)
479{
480    struct rcu_node *rnp = rcu_get_root(rsp);
481
482    rcu_print_detail_task_stall_rnp(rnp);
483    rcu_for_each_leaf_node(rsp, rnp)
484        rcu_print_detail_task_stall_rnp(rnp);
485}
486
487#else /* #ifdef CONFIG_RCU_CPU_STALL_VERBOSE */
488
489static void rcu_print_detail_task_stall(struct rcu_state *rsp)
490{
491}
492
493#endif /* #else #ifdef CONFIG_RCU_CPU_STALL_VERBOSE */
494
495#ifdef CONFIG_RCU_CPU_STALL_INFO
496
497static void rcu_print_task_stall_begin(struct rcu_node *rnp)
498{
499    printk(KERN_ERR "\tTasks blocked on level-%d rcu_node (CPUs %d-%d):",
500           rnp->level, rnp->grplo, rnp->grphi);
501}
502
503static void rcu_print_task_stall_end(void)
504{
505    printk(KERN_CONT "\n");
506}
507
508#else /* #ifdef CONFIG_RCU_CPU_STALL_INFO */
509
510static void rcu_print_task_stall_begin(struct rcu_node *rnp)
511{
512}
513
514static void rcu_print_task_stall_end(void)
515{
516}
517
518#endif /* #else #ifdef CONFIG_RCU_CPU_STALL_INFO */
519
520/*
521 * Scan the current list of tasks blocked within RCU read-side critical
522 * sections, printing out the tid of each.
523 */
524static int rcu_print_task_stall(struct rcu_node *rnp)
525{
526    struct task_struct *t;
527    int ndetected = 0;
528
529    if (!rcu_preempt_blocked_readers_cgp(rnp))
530        return 0;
531    rcu_print_task_stall_begin(rnp);
532    t = list_entry(rnp->gp_tasks,
533               struct task_struct, rcu_node_entry);
534    list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
535        printk(KERN_CONT " P%d", t->pid);
536        ndetected++;
537    }
538    rcu_print_task_stall_end();
539    return ndetected;
540}
541
542/*
543 * Suppress preemptible RCU's CPU stall warnings by pushing the
544 * time of the next stall-warning message comfortably far into the
545 * future.
546 */
547static void rcu_preempt_stall_reset(void)
548{
549    rcu_preempt_state.jiffies_stall = jiffies + ULONG_MAX / 2;
550}
551
552/*
553 * Check that the list of blocked tasks for the newly completed grace
554 * period is in fact empty. It is a serious bug to complete a grace
555 * period that still has RCU readers blocked! This function must be
556 * invoked -before- updating this rnp's ->gpnum, and the rnp's ->lock
557 * must be held by the caller.
558 *
559 * Also, if there are blocked tasks on the list, they automatically
560 * block the newly created grace period, so set up ->gp_tasks accordingly.
561 */
562static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
563{
564    WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp));
565    if (!list_empty(&rnp->blkd_tasks))
566        rnp->gp_tasks = rnp->blkd_tasks.next;
567    WARN_ON_ONCE(rnp->qsmask);
568}
569
570#ifdef CONFIG_HOTPLUG_CPU
571
572/*
573 * Handle tasklist migration for case in which all CPUs covered by the
574 * specified rcu_node have gone offline. Move them up to the root
575 * rcu_node. The reason for not just moving them to the immediate
576 * parent is to remove the need for rcu_read_unlock_special() to
577 * make more than two attempts to acquire the target rcu_node's lock.
578 * Returns true if there were tasks blocking the current RCU grace
579 * period.
580 *
581 * Returns 1 if there was previously a task blocking the current grace
582 * period on the specified rcu_node structure.
583 *
584 * The caller must hold rnp->lock with irqs disabled.
585 */
586static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
587                     struct rcu_node *rnp,
588                     struct rcu_data *rdp)
589{
590    struct list_head *lp;
591    struct list_head *lp_root;
592    int retval = 0;
593    struct rcu_node *rnp_root = rcu_get_root(rsp);
594    struct task_struct *t;
595
596    if (rnp == rnp_root) {
597        WARN_ONCE(1, "Last CPU thought to be offlined?");
598        return 0; /* Shouldn't happen: at least one CPU online. */
599    }
600
601    /* If we are on an internal node, complain bitterly. */
602    WARN_ON_ONCE(rnp != rdp->mynode);
603
604    /*
605     * Move tasks up to root rcu_node. Don't try to get fancy for
606     * this corner-case operation -- just put this node's tasks
607     * at the head of the root node's list, and update the root node's
608     * ->gp_tasks and ->exp_tasks pointers to those of this node's,
609     * if non-NULL. This might result in waiting for more tasks than
610     * absolutely necessary, but this is a good performance/complexity
611     * tradeoff.
612     */
613    if (rcu_preempt_blocked_readers_cgp(rnp) && rnp->qsmask == 0)
614        retval |= RCU_OFL_TASKS_NORM_GP;
615    if (rcu_preempted_readers_exp(rnp))
616        retval |= RCU_OFL_TASKS_EXP_GP;
617    lp = &rnp->blkd_tasks;
618    lp_root = &rnp_root->blkd_tasks;
619    while (!list_empty(lp)) {
620        t = list_entry(lp->next, typeof(*t), rcu_node_entry);
621        raw_spin_lock(&rnp_root->lock); /* irqs already disabled */
622        list_del(&t->rcu_node_entry);
623        t->rcu_blocked_node = rnp_root;
624        list_add(&t->rcu_node_entry, lp_root);
625        if (&t->rcu_node_entry == rnp->gp_tasks)
626            rnp_root->gp_tasks = rnp->gp_tasks;
627        if (&t->rcu_node_entry == rnp->exp_tasks)
628            rnp_root->exp_tasks = rnp->exp_tasks;
629#ifdef CONFIG_RCU_BOOST
630        if (&t->rcu_node_entry == rnp->boost_tasks)
631            rnp_root->boost_tasks = rnp->boost_tasks;
632#endif /* #ifdef CONFIG_RCU_BOOST */
633        raw_spin_unlock(&rnp_root->lock); /* irqs still disabled */
634    }
635
636#ifdef CONFIG_RCU_BOOST
637    /* In case root is being boosted and leaf is not. */
638    raw_spin_lock(&rnp_root->lock); /* irqs already disabled */
639    if (rnp_root->boost_tasks != NULL &&
640        rnp_root->boost_tasks != rnp_root->gp_tasks)
641        rnp_root->boost_tasks = rnp_root->gp_tasks;
642    raw_spin_unlock(&rnp_root->lock); /* irqs still disabled */
643#endif /* #ifdef CONFIG_RCU_BOOST */
644
645    rnp->gp_tasks = NULL;
646    rnp->exp_tasks = NULL;
647    return retval;
648}
649
650#endif /* #ifdef CONFIG_HOTPLUG_CPU */
651
652/*
653 * Do CPU-offline processing for preemptible RCU.
654 */
655static void rcu_preempt_cleanup_dead_cpu(int cpu)
656{
657    rcu_cleanup_dead_cpu(cpu, &rcu_preempt_state);
658}
659
660/*
661 * Check for a quiescent state from the current CPU. When a task blocks,
662 * the task is recorded in the corresponding CPU's rcu_node structure,
663 * which is checked elsewhere.
664 *
665 * Caller must disable hard irqs.
666 */
667static void rcu_preempt_check_callbacks(int cpu)
668{
669    struct task_struct *t = current;
670
671    if (t->rcu_read_lock_nesting == 0) {
672        rcu_preempt_qs(cpu);
673        return;
674    }
675    if (t->rcu_read_lock_nesting > 0 &&
676        per_cpu(rcu_preempt_data, cpu).qs_pending)
677        t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS;
678}
679
680/*
681 * Process callbacks for preemptible RCU.
682 */
683static void rcu_preempt_process_callbacks(void)
684{
685    __rcu_process_callbacks(&rcu_preempt_state,
686                &__get_cpu_var(rcu_preempt_data));
687}
688
689#ifdef CONFIG_RCU_BOOST
690
691static void rcu_preempt_do_callbacks(void)
692{
693    rcu_do_batch(&rcu_preempt_state, &__get_cpu_var(rcu_preempt_data));
694}
695
696#endif /* #ifdef CONFIG_RCU_BOOST */
697
698/*
699 * Queue a preemptible-RCU callback for invocation after a grace period.
700 */
701void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
702{
703    __call_rcu(head, func, &rcu_preempt_state, 0);
704}
705EXPORT_SYMBOL_GPL(call_rcu);
706
707/*
708 * Queue an RCU callback for lazy invocation after a grace period.
709 * This will likely be later named something like "call_rcu_lazy()",
710 * but this change will require some way of tagging the lazy RCU
711 * callbacks in the list of pending callbacks. Until then, this
712 * function may only be called from __kfree_rcu().
713 */
714void kfree_call_rcu(struct rcu_head *head,
715            void (*func)(struct rcu_head *rcu))
716{
717    __call_rcu(head, func, &rcu_preempt_state, 1);
718}
719EXPORT_SYMBOL_GPL(kfree_call_rcu);
720
721/**
722 * synchronize_rcu - wait until a grace period has elapsed.
723 *
724 * Control will return to the caller some time after a full grace
725 * period has elapsed, in other words after all currently executing RCU
726 * read-side critical sections have completed. Note, however, that
727 * upon return from synchronize_rcu(), the caller might well be executing
728 * concurrently with new RCU read-side critical sections that began while
729 * synchronize_rcu() was waiting. RCU read-side critical sections are
730 * delimited by rcu_read_lock() and rcu_read_unlock(), and may be nested.
731 */
732void synchronize_rcu(void)
733{
734    rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) &&
735               !lock_is_held(&rcu_lock_map) &&
736               !lock_is_held(&rcu_sched_lock_map),
737               "Illegal synchronize_rcu() in RCU read-side critical section");
738    if (!rcu_scheduler_active)
739        return;
740    wait_rcu_gp(call_rcu);
741}
742EXPORT_SYMBOL_GPL(synchronize_rcu);
743
744static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq);
745static long sync_rcu_preempt_exp_count;
746static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex);
747
748/*
749 * Return non-zero if there are any tasks in RCU read-side critical
750 * sections blocking the current preemptible-RCU expedited grace period.
751 * If there is no preemptible-RCU expedited grace period currently in
752 * progress, returns zero unconditionally.
753 */
754static int rcu_preempted_readers_exp(struct rcu_node *rnp)
755{
756    return rnp->exp_tasks != NULL;
757}
758
759/*
760 * return non-zero if there is no RCU expedited grace period in progress
761 * for the specified rcu_node structure, in other words, if all CPUs and
762 * tasks covered by the specified rcu_node structure have done their bit
763 * for the current expedited grace period. Works only for preemptible
764 * RCU -- other RCU implementation use other means.
765 *
766 * Caller must hold sync_rcu_preempt_exp_mutex.
767 */
768static int sync_rcu_preempt_exp_done(struct rcu_node *rnp)
769{
770    return !rcu_preempted_readers_exp(rnp) &&
771           ACCESS_ONCE(rnp->expmask) == 0;
772}
773
774/*
775 * Report the exit from RCU read-side critical section for the last task
776 * that queued itself during or before the current expedited preemptible-RCU
777 * grace period. This event is reported either to the rcu_node structure on
778 * which the task was queued or to one of that rcu_node structure's ancestors,
779 * recursively up the tree. (Calm down, calm down, we do the recursion
780 * iteratively!)
781 *
782 * Most callers will set the "wake" flag, but the task initiating the
783 * expedited grace period need not wake itself.
784 *
785 * Caller must hold sync_rcu_preempt_exp_mutex.
786 */
787static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
788                   bool wake)
789{
790    unsigned long flags;
791    unsigned long mask;
792
793    raw_spin_lock_irqsave(&rnp->lock, flags);
794    for (;;) {
795        if (!sync_rcu_preempt_exp_done(rnp)) {
796            raw_spin_unlock_irqrestore(&rnp->lock, flags);
797            break;
798        }
799        if (rnp->parent == NULL) {
800            raw_spin_unlock_irqrestore(&rnp->lock, flags);
801            if (wake)
802                wake_up(&sync_rcu_preempt_exp_wq);
803            break;
804        }
805        mask = rnp->grpmask;
806        raw_spin_unlock(&rnp->lock); /* irqs remain disabled */
807        rnp = rnp->parent;
808        raw_spin_lock(&rnp->lock); /* irqs already disabled */
809        rnp->expmask &= ~mask;
810    }
811}
812
813/*
814 * Snapshot the tasks blocking the newly started preemptible-RCU expedited
815 * grace period for the specified rcu_node structure. If there are no such
816 * tasks, report it up the rcu_node hierarchy.
817 *
818 * Caller must hold sync_rcu_preempt_exp_mutex and rsp->onofflock.
819 */
820static void
821sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp)
822{
823    unsigned long flags;
824    int must_wait = 0;
825
826    raw_spin_lock_irqsave(&rnp->lock, flags);
827    if (list_empty(&rnp->blkd_tasks))
828        raw_spin_unlock_irqrestore(&rnp->lock, flags);
829    else {
830        rnp->exp_tasks = rnp->blkd_tasks.next;
831        rcu_initiate_boost(rnp, flags); /* releases rnp->lock */
832        must_wait = 1;
833    }
834    if (!must_wait)
835        rcu_report_exp_rnp(rsp, rnp, false); /* Don't wake self. */
836}
837
838/**
839 * synchronize_rcu_expedited - Brute-force RCU grace period
840 *
841 * Wait for an RCU-preempt grace period, but expedite it. The basic
842 * idea is to invoke synchronize_sched_expedited() to push all the tasks to
843 * the ->blkd_tasks lists and wait for this list to drain. This consumes
844 * significant time on all CPUs and is unfriendly to real-time workloads,
845 * so is thus not recommended for any sort of common-case code.
846 * In fact, if you are using synchronize_rcu_expedited() in a loop,
847 * please restructure your code to batch your updates, and then Use a
848 * single synchronize_rcu() instead.
849 *
850 * Note that it is illegal to call this function while holding any lock
851 * that is acquired by a CPU-hotplug notifier. And yes, it is also illegal
852 * to call this function from a CPU-hotplug notifier. Failing to observe
853 * these restriction will result in deadlock.
854 */
855void synchronize_rcu_expedited(void)
856{
857    unsigned long flags;
858    struct rcu_node *rnp;
859    struct rcu_state *rsp = &rcu_preempt_state;
860    long snap;
861    int trycount = 0;
862
863    smp_mb(); /* Caller's modifications seen first by other CPUs. */
864    snap = ACCESS_ONCE(sync_rcu_preempt_exp_count) + 1;
865    smp_mb(); /* Above access cannot bleed into critical section. */
866
867    /*
868     * Acquire lock, falling back to synchronize_rcu() if too many
869     * lock-acquisition failures. Of course, if someone does the
870     * expedited grace period for us, just leave.
871     */
872    while (!mutex_trylock(&sync_rcu_preempt_exp_mutex)) {
873        if (trycount++ < 10)
874            udelay(trycount * num_online_cpus());
875        else {
876            synchronize_rcu();
877            return;
878        }
879        if ((ACCESS_ONCE(sync_rcu_preempt_exp_count) - snap) > 0)
880            goto mb_ret; /* Others did our work for us. */
881    }
882    if ((ACCESS_ONCE(sync_rcu_preempt_exp_count) - snap) > 0)
883        goto unlock_mb_ret; /* Others did our work for us. */
884
885    /* force all RCU readers onto ->blkd_tasks lists. */
886    synchronize_sched_expedited();
887
888    raw_spin_lock_irqsave(&rsp->onofflock, flags);
889
890    /* Initialize ->expmask for all non-leaf rcu_node structures. */
891    rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) {
892        raw_spin_lock(&rnp->lock); /* irqs already disabled. */
893        rnp->expmask = rnp->qsmaskinit;
894        raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
895    }
896
897    /* Snapshot current state of ->blkd_tasks lists. */
898    rcu_for_each_leaf_node(rsp, rnp)
899        sync_rcu_preempt_exp_init(rsp, rnp);
900    if (NUM_RCU_NODES > 1)
901        sync_rcu_preempt_exp_init(rsp, rcu_get_root(rsp));
902
903    raw_spin_unlock_irqrestore(&rsp->onofflock, flags);
904
905    /* Wait for snapshotted ->blkd_tasks lists to drain. */
906    rnp = rcu_get_root(rsp);
907    wait_event(sync_rcu_preempt_exp_wq,
908           sync_rcu_preempt_exp_done(rnp));
909
910    /* Clean up and exit. */
911    smp_mb(); /* ensure expedited GP seen before counter increment. */
912    ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
913unlock_mb_ret:
914    mutex_unlock(&sync_rcu_preempt_exp_mutex);
915mb_ret:
916    smp_mb(); /* ensure subsequent action seen after grace period. */
917}
918EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
919
920/*
921 * Check to see if there is any immediate preemptible-RCU-related work
922 * to be done.
923 */
924static int rcu_preempt_pending(int cpu)
925{
926    return __rcu_pending(&rcu_preempt_state,
927                 &per_cpu(rcu_preempt_data, cpu));
928}
929
930/*
931 * Does preemptible RCU have callbacks on this CPU?
932 */
933static int rcu_preempt_cpu_has_callbacks(int cpu)
934{
935    return !!per_cpu(rcu_preempt_data, cpu).nxtlist;
936}
937
938/**
939 * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
940 */
941void rcu_barrier(void)
942{
943    _rcu_barrier(&rcu_preempt_state, call_rcu);
944}
945EXPORT_SYMBOL_GPL(rcu_barrier);
946
947/*
948 * Initialize preemptible RCU's per-CPU data.
949 */
950static void __cpuinit rcu_preempt_init_percpu_data(int cpu)
951{
952    rcu_init_percpu_data(cpu, &rcu_preempt_state, 1);
953}
954
955/*
956 * Move preemptible RCU's callbacks from dying CPU to other online CPU
957 * and record a quiescent state.
958 */
959static void rcu_preempt_cleanup_dying_cpu(void)
960{
961    rcu_cleanup_dying_cpu(&rcu_preempt_state);
962}
963
964/*
965 * Initialize preemptible RCU's state structures.
966 */
967static void __init __rcu_init_preempt(void)
968{
969    rcu_init_one(&rcu_preempt_state, &rcu_preempt_data);
970}
971
972#else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
973
974static struct rcu_state *rcu_state = &rcu_sched_state;
975
976/*
977 * Tell them what RCU they are running.
978 */
979static void __init rcu_bootup_announce(void)
980{
981    printk(KERN_INFO "Hierarchical RCU implementation.\n");
982    rcu_bootup_announce_oddness();
983}
984
985/*
986 * Return the number of RCU batches processed thus far for debug & stats.
987 */
988long rcu_batches_completed(void)
989{
990    return rcu_batches_completed_sched();
991}
992EXPORT_SYMBOL_GPL(rcu_batches_completed);
993
994/*
995 * Force a quiescent state for RCU, which, because there is no preemptible
996 * RCU, becomes the same as rcu-sched.
997 */
998void rcu_force_quiescent_state(void)
999{
1000    rcu_sched_force_quiescent_state();
1001}
1002EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
1003
1004/*
1005 * Because preemptible RCU does not exist, we never have to check for
1006 * CPUs being in quiescent states.
1007 */
1008static void rcu_preempt_note_context_switch(int cpu)
1009{
1010}
1011
1012/*
1013 * Because preemptible RCU does not exist, there are never any preempted
1014 * RCU readers.
1015 */
1016static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
1017{
1018    return 0;
1019}
1020
1021#ifdef CONFIG_HOTPLUG_CPU
1022
1023/* Because preemptible RCU does not exist, no quieting of tasks. */
1024static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
1025{
1026    raw_spin_unlock_irqrestore(&rnp->lock, flags);
1027}
1028
1029#endif /* #ifdef CONFIG_HOTPLUG_CPU */
1030
1031/*
1032 * Because preemptible RCU does not exist, we never have to check for
1033 * tasks blocked within RCU read-side critical sections.
1034 */
1035static void rcu_print_detail_task_stall(struct rcu_state *rsp)
1036{
1037}
1038
1039/*
1040 * Because preemptible RCU does not exist, we never have to check for
1041 * tasks blocked within RCU read-side critical sections.
1042 */
1043static int rcu_print_task_stall(struct rcu_node *rnp)
1044{
1045    return 0;
1046}
1047
1048/*
1049 * Because preemptible RCU does not exist, there is no need to suppress
1050 * its CPU stall warnings.
1051 */
1052static void rcu_preempt_stall_reset(void)
1053{
1054}
1055
1056/*
1057 * Because there is no preemptible RCU, there can be no readers blocked,
1058 * so there is no need to check for blocked tasks. So check only for
1059 * bogus qsmask values.
1060 */
1061static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
1062{
1063    WARN_ON_ONCE(rnp->qsmask);
1064}
1065
1066#ifdef CONFIG_HOTPLUG_CPU
1067
1068/*
1069 * Because preemptible RCU does not exist, it never needs to migrate
1070 * tasks that were blocked within RCU read-side critical sections, and
1071 * such non-existent tasks cannot possibly have been blocking the current
1072 * grace period.
1073 */
1074static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
1075                     struct rcu_node *rnp,
1076                     struct rcu_data *rdp)
1077{
1078    return 0;
1079}
1080
1081#endif /* #ifdef CONFIG_HOTPLUG_CPU */
1082
1083/*
1084 * Because preemptible RCU does not exist, it never needs CPU-offline
1085 * processing.
1086 */
1087static void rcu_preempt_cleanup_dead_cpu(int cpu)
1088{
1089}
1090
1091/*
1092 * Because preemptible RCU does not exist, it never has any callbacks
1093 * to check.
1094 */
1095static void rcu_preempt_check_callbacks(int cpu)
1096{
1097}
1098
1099/*
1100 * Because preemptible RCU does not exist, it never has any callbacks
1101 * to process.
1102 */
1103static void rcu_preempt_process_callbacks(void)
1104{
1105}
1106
1107/*
1108 * Queue an RCU callback for lazy invocation after a grace period.
1109 * This will likely be later named something like "call_rcu_lazy()",
1110 * but this change will require some way of tagging the lazy RCU
1111 * callbacks in the list of pending callbacks. Until then, this
1112 * function may only be called from __kfree_rcu().
1113 *
1114 * Because there is no preemptible RCU, we use RCU-sched instead.
1115 */
1116void kfree_call_rcu(struct rcu_head *head,
1117            void (*func)(struct rcu_head *rcu))
1118{
1119    __call_rcu(head, func, &rcu_sched_state, 1);
1120}
1121EXPORT_SYMBOL_GPL(kfree_call_rcu);
1122
1123/*
1124 * Wait for an rcu-preempt grace period, but make it happen quickly.
1125 * But because preemptible RCU does not exist, map to rcu-sched.
1126 */
1127void synchronize_rcu_expedited(void)
1128{
1129    synchronize_sched_expedited();
1130}
1131EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
1132
1133#ifdef CONFIG_HOTPLUG_CPU
1134
1135/*
1136 * Because preemptible RCU does not exist, there is never any need to
1137 * report on tasks preempted in RCU read-side critical sections during
1138 * expedited RCU grace periods.
1139 */
1140static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
1141                   bool wake)
1142{
1143}
1144
1145#endif /* #ifdef CONFIG_HOTPLUG_CPU */
1146
1147/*
1148 * Because preemptible RCU does not exist, it never has any work to do.
1149 */
1150static int rcu_preempt_pending(int cpu)
1151{
1152    return 0;
1153}
1154
1155/*
1156 * Because preemptible RCU does not exist, it never has callbacks
1157 */
1158static int rcu_preempt_cpu_has_callbacks(int cpu)
1159{
1160    return 0;
1161}
1162
1163/*
1164 * Because preemptible RCU does not exist, rcu_barrier() is just
1165 * another name for rcu_barrier_sched().
1166 */
1167void rcu_barrier(void)
1168{
1169    rcu_barrier_sched();
1170}
1171EXPORT_SYMBOL_GPL(rcu_barrier);
1172
1173/*
1174 * Because preemptible RCU does not exist, there is no per-CPU
1175 * data to initialize.
1176 */
1177static void __cpuinit rcu_preempt_init_percpu_data(int cpu)
1178{
1179}
1180
1181/*
1182 * Because there is no preemptible RCU, there is no cleanup to do.
1183 */
1184static void rcu_preempt_cleanup_dying_cpu(void)
1185{
1186}
1187
1188/*
1189 * Because preemptible RCU does not exist, it need not be initialized.
1190 */
1191static void __init __rcu_init_preempt(void)
1192{
1193}
1194
1195#endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */
1196
1197#ifdef CONFIG_RCU_BOOST
1198
1199#include "rtmutex_common.h"
1200
1201#ifdef CONFIG_RCU_TRACE
1202
1203static void rcu_initiate_boost_trace(struct rcu_node *rnp)
1204{
1205    if (list_empty(&rnp->blkd_tasks))
1206        rnp->n_balk_blkd_tasks++;
1207    else if (rnp->exp_tasks == NULL && rnp->gp_tasks == NULL)
1208        rnp->n_balk_exp_gp_tasks++;
1209    else if (rnp->gp_tasks != NULL && rnp->boost_tasks != NULL)
1210        rnp->n_balk_boost_tasks++;
1211    else if (rnp->gp_tasks != NULL && rnp->qsmask != 0)
1212        rnp->n_balk_notblocked++;
1213    else if (rnp->gp_tasks != NULL &&
1214         ULONG_CMP_LT(jiffies, rnp->boost_time))
1215        rnp->n_balk_notyet++;
1216    else
1217        rnp->n_balk_nos++;
1218}
1219
1220#else /* #ifdef CONFIG_RCU_TRACE */
1221
1222static void rcu_initiate_boost_trace(struct rcu_node *rnp)
1223{
1224}
1225
1226#endif /* #else #ifdef CONFIG_RCU_TRACE */
1227
1228/*
1229 * Carry out RCU priority boosting on the task indicated by ->exp_tasks
1230 * or ->boost_tasks, advancing the pointer to the next task in the
1231 * ->blkd_tasks list.
1232 *
1233 * Note that irqs must be enabled: boosting the task can block.
1234 * Returns 1 if there are more tasks needing to be boosted.
1235 */
1236static int rcu_boost(struct rcu_node *rnp)
1237{
1238    unsigned long flags;
1239    struct rt_mutex mtx;
1240    struct task_struct *t;
1241    struct list_head *tb;
1242
1243    if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL)
1244        return 0; /* Nothing left to boost. */
1245
1246    raw_spin_lock_irqsave(&rnp->lock, flags);
1247
1248    /*
1249     * Recheck under the lock: all tasks in need of boosting
1250     * might exit their RCU read-side critical sections on their own.
1251     */
1252    if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL) {
1253        raw_spin_unlock_irqrestore(&rnp->lock, flags);
1254        return 0;
1255    }
1256
1257    /*
1258     * Preferentially boost tasks blocking expedited grace periods.
1259     * This cannot starve the normal grace periods because a second
1260     * expedited grace period must boost all blocked tasks, including
1261     * those blocking the pre-existing normal grace period.
1262     */
1263    if (rnp->exp_tasks != NULL) {
1264        tb = rnp->exp_tasks;
1265        rnp->n_exp_boosts++;
1266    } else {
1267        tb = rnp->boost_tasks;
1268        rnp->n_normal_boosts++;
1269    }
1270    rnp->n_tasks_boosted++;
1271
1272    /*
1273     * We boost task t by manufacturing an rt_mutex that appears to
1274     * be held by task t. We leave a pointer to that rt_mutex where
1275     * task t can find it, and task t will release the mutex when it
1276     * exits its outermost RCU read-side critical section. Then
1277     * simply acquiring this artificial rt_mutex will boost task
1278     * t's priority. (Thanks to tglx for suggesting this approach!)
1279     *
1280     * Note that task t must acquire rnp->lock to remove itself from
1281     * the ->blkd_tasks list, which it will do from exit() if from
1282     * nowhere else. We therefore are guaranteed that task t will
1283     * stay around at least until we drop rnp->lock. Note that
1284     * rnp->lock also resolves races between our priority boosting
1285     * and task t's exiting its outermost RCU read-side critical
1286     * section.
1287     */
1288    t = container_of(tb, struct task_struct, rcu_node_entry);
1289    rt_mutex_init_proxy_locked(&mtx, t);
1290    t->rcu_boost_mutex = &mtx;
1291    raw_spin_unlock_irqrestore(&rnp->lock, flags);
1292    rt_mutex_lock(&mtx); /* Side effect: boosts task t's priority. */
1293    rt_mutex_unlock(&mtx); /* Keep lockdep happy. */
1294
1295    return ACCESS_ONCE(rnp->exp_tasks) != NULL ||
1296           ACCESS_ONCE(rnp->boost_tasks) != NULL;
1297}
1298
1299/*
1300 * Timer handler to initiate waking up of boost kthreads that
1301 * have yielded the CPU due to excessive numbers of tasks to
1302 * boost. We wake up the per-rcu_node kthread, which in turn
1303 * will wake up the booster kthread.
1304 */
1305static void rcu_boost_kthread_timer(unsigned long arg)
1306{
1307    invoke_rcu_node_kthread((struct rcu_node *)arg);
1308}
1309
1310/*
1311 * Priority-boosting kthread. One per leaf rcu_node and one for the
1312 * root rcu_node.
1313 */
1314static int rcu_boost_kthread(void *arg)
1315{
1316    struct rcu_node *rnp = (struct rcu_node *)arg;
1317    int spincnt = 0;
1318    int more2boost;
1319
1320    trace_rcu_utilization("Start boost kthread@init");
1321    for (;;) {
1322        rnp->boost_kthread_status = RCU_KTHREAD_WAITING;
1323        trace_rcu_utilization("End boost kthread@rcu_wait");
1324        rcu_wait(rnp->boost_tasks || rnp->exp_tasks);
1325        trace_rcu_utilization("Start boost kthread@rcu_wait");
1326        rnp->boost_kthread_status = RCU_KTHREAD_RUNNING;
1327        more2boost = rcu_boost(rnp);
1328        if (more2boost)
1329            spincnt++;
1330        else
1331            spincnt = 0;
1332        if (spincnt > 10) {
1333            trace_rcu_utilization("End boost kthread@rcu_yield");
1334            rcu_yield(rcu_boost_kthread_timer, (unsigned long)rnp);
1335            trace_rcu_utilization("Start boost kthread@rcu_yield");
1336            spincnt = 0;
1337        }
1338    }
1339    /* NOTREACHED */
1340    trace_rcu_utilization("End boost kthread@notreached");
1341    return 0;
1342}
1343
1344/*
1345 * Check to see if it is time to start boosting RCU readers that are
1346 * blocking the current grace period, and, if so, tell the per-rcu_node
1347 * kthread to start boosting them. If there is an expedited grace
1348 * period in progress, it is always time to boost.
1349 *
1350 * The caller must hold rnp->lock, which this function releases,
1351 * but irqs remain disabled. The ->boost_kthread_task is immortal,
1352 * so we don't need to worry about it going away.
1353 */
1354static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
1355{
1356    struct task_struct *t;
1357
1358    if (!rcu_preempt_blocked_readers_cgp(rnp) && rnp->exp_tasks == NULL) {
1359        rnp->n_balk_exp_gp_tasks++;
1360        raw_spin_unlock_irqrestore(&rnp->lock, flags);
1361        return;
1362    }
1363    if (rnp->exp_tasks != NULL ||
1364        (rnp->gp_tasks != NULL &&
1365         rnp->boost_tasks == NULL &&
1366         rnp->qsmask == 0 &&
1367         ULONG_CMP_GE(jiffies, rnp->boost_time))) {
1368        if (rnp->exp_tasks == NULL)
1369            rnp->boost_tasks = rnp->gp_tasks;
1370        raw_spin_unlock_irqrestore(&rnp->lock, flags);
1371        t = rnp->boost_kthread_task;
1372        if (t != NULL)
1373            wake_up_process(t);
1374    } else {
1375        rcu_initiate_boost_trace(rnp);
1376        raw_spin_unlock_irqrestore(&rnp->lock, flags);
1377    }
1378}
1379
1380/*
1381 * Wake up the per-CPU kthread to invoke RCU callbacks.
1382 */
1383static void invoke_rcu_callbacks_kthread(void)
1384{
1385    unsigned long flags;
1386
1387    local_irq_save(flags);
1388    __this_cpu_write(rcu_cpu_has_work, 1);
1389    if (__this_cpu_read(rcu_cpu_kthread_task) != NULL &&
1390        current != __this_cpu_read(rcu_cpu_kthread_task))
1391        wake_up_process(__this_cpu_read(rcu_cpu_kthread_task));
1392    local_irq_restore(flags);
1393}
1394
1395/*
1396 * Is the current CPU running the RCU-callbacks kthread?
1397 * Caller must have preemption disabled.
1398 */
1399static bool rcu_is_callbacks_kthread(void)
1400{
1401    return __get_cpu_var(rcu_cpu_kthread_task) == current;
1402}
1403
1404/*
1405 * Set the affinity of the boost kthread. The CPU-hotplug locks are
1406 * held, so no one should be messing with the existence of the boost
1407 * kthread.
1408 */
1409static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp,
1410                      cpumask_var_t cm)
1411{
1412    struct task_struct *t;
1413
1414    t = rnp->boost_kthread_task;
1415    if (t != NULL)
1416        set_cpus_allowed_ptr(rnp->boost_kthread_task, cm);
1417}
1418
1419#define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000)
1420
1421/*
1422 * Do priority-boost accounting for the start of a new grace period.
1423 */
1424static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
1425{
1426    rnp->boost_time = jiffies + RCU_BOOST_DELAY_JIFFIES;
1427}
1428
1429/*
1430 * Create an RCU-boost kthread for the specified node if one does not
1431 * already exist. We only create this kthread for preemptible RCU.
1432 * Returns zero if all is well, a negated errno otherwise.
1433 */
1434static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
1435                         struct rcu_node *rnp,
1436                         int rnp_index)
1437{
1438    unsigned long flags;
1439    struct sched_param sp;
1440    struct task_struct *t;
1441
1442    if (&rcu_preempt_state != rsp)
1443        return 0;
1444    rsp->boost = 1;
1445    if (rnp->boost_kthread_task != NULL)
1446        return 0;
1447    t = kthread_create(rcu_boost_kthread, (void *)rnp,
1448               "rcub/%d", rnp_index);
1449    if (IS_ERR(t))
1450        return PTR_ERR(t);
1451    raw_spin_lock_irqsave(&rnp->lock, flags);
1452    rnp->boost_kthread_task = t;
1453    raw_spin_unlock_irqrestore(&rnp->lock, flags);
1454    sp.sched_priority = RCU_BOOST_PRIO;
1455    sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1456    wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
1457    return 0;
1458}
1459
1460#ifdef CONFIG_HOTPLUG_CPU
1461
1462/*
1463 * Stop the RCU's per-CPU kthread when its CPU goes offline,.
1464 */
1465static void rcu_stop_cpu_kthread(int cpu)
1466{
1467    struct task_struct *t;
1468
1469    /* Stop the CPU's kthread. */
1470    t = per_cpu(rcu_cpu_kthread_task, cpu);
1471    if (t != NULL) {
1472        per_cpu(rcu_cpu_kthread_task, cpu) = NULL;
1473        kthread_stop(t);
1474    }
1475}
1476
1477#endif /* #ifdef CONFIG_HOTPLUG_CPU */
1478
1479static void rcu_kthread_do_work(void)
1480{
1481    rcu_do_batch(&rcu_sched_state, &__get_cpu_var(rcu_sched_data));
1482    rcu_do_batch(&rcu_bh_state, &__get_cpu_var(rcu_bh_data));
1483    rcu_preempt_do_callbacks();
1484}
1485
1486/*
1487 * Wake up the specified per-rcu_node-structure kthread.
1488 * Because the per-rcu_node kthreads are immortal, we don't need
1489 * to do anything to keep them alive.
1490 */
1491static void invoke_rcu_node_kthread(struct rcu_node *rnp)
1492{
1493    struct task_struct *t;
1494
1495    t = rnp->node_kthread_task;
1496    if (t != NULL)
1497        wake_up_process(t);
1498}
1499
1500/*
1501 * Set the specified CPU's kthread to run RT or not, as specified by
1502 * the to_rt argument. The CPU-hotplug locks are held, so the task
1503 * is not going away.
1504 */
1505static void rcu_cpu_kthread_setrt(int cpu, int to_rt)
1506{
1507    int policy;
1508    struct sched_param sp;
1509    struct task_struct *t;
1510
1511    t = per_cpu(rcu_cpu_kthread_task, cpu);
1512    if (t == NULL)
1513        return;
1514    if (to_rt) {
1515        policy = SCHED_FIFO;
1516        sp.sched_priority = RCU_KTHREAD_PRIO;
1517    } else {
1518        policy = SCHED_NORMAL;
1519        sp.sched_priority = 0;
1520    }
1521    sched_setscheduler_nocheck(t, policy, &sp);
1522}
1523
1524/*
1525 * Timer handler to initiate the waking up of per-CPU kthreads that
1526 * have yielded the CPU due to excess numbers of RCU callbacks.
1527 * We wake up the per-rcu_node kthread, which in turn will wake up
1528 * the booster kthread.
1529 */
1530static void rcu_cpu_kthread_timer(unsigned long arg)
1531{
1532    struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, arg);
1533    struct rcu_node *rnp = rdp->mynode;
1534
1535    atomic_or(rdp->grpmask, &rnp->wakemask);
1536    invoke_rcu_node_kthread(rnp);
1537}
1538
1539/*
1540 * Drop to non-real-time priority and yield, but only after posting a
1541 * timer that will cause us to regain our real-time priority if we
1542 * remain preempted. Either way, we restore our real-time priority
1543 * before returning.
1544 */
1545static void rcu_yield(void (*f)(unsigned long), unsigned long arg)
1546{
1547    struct sched_param sp;
1548    struct timer_list yield_timer;
1549    int prio = current->rt_priority;
1550
1551    setup_timer_on_stack(&yield_timer, f, arg);
1552    mod_timer(&yield_timer, jiffies + 2);
1553    sp.sched_priority = 0;
1554    sched_setscheduler_nocheck(current, SCHED_NORMAL, &sp);
1555    set_user_nice(current, 19);
1556    schedule();
1557    set_user_nice(current, 0);
1558    sp.sched_priority = prio;
1559    sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
1560    del_timer(&yield_timer);
1561}
1562
1563/*
1564 * Handle cases where the rcu_cpu_kthread() ends up on the wrong CPU.
1565 * This can happen while the corresponding CPU is either coming online
1566 * or going offline. We cannot wait until the CPU is fully online
1567 * before starting the kthread, because the various notifier functions
1568 * can wait for RCU grace periods. So we park rcu_cpu_kthread() until
1569 * the corresponding CPU is online.
1570 *
1571 * Return 1 if the kthread needs to stop, 0 otherwise.
1572 *
1573 * Caller must disable bh. This function can momentarily enable it.
1574 */
1575static int rcu_cpu_kthread_should_stop(int cpu)
1576{
1577    while (cpu_is_offline(cpu) ||
1578           !cpumask_equal(&current->cpus_allowed, cpumask_of(cpu)) ||
1579           smp_processor_id() != cpu) {
1580        if (kthread_should_stop())
1581            return 1;
1582        per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
1583        per_cpu(rcu_cpu_kthread_cpu, cpu) = raw_smp_processor_id();
1584        local_bh_enable();
1585        schedule_timeout_uninterruptible(1);
1586        if (!cpumask_equal(&current->cpus_allowed, cpumask_of(cpu)))
1587            set_cpus_allowed_ptr(current, cpumask_of(cpu));
1588        local_bh_disable();
1589    }
1590    per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu;
1591    return 0;
1592}
1593
1594/*
1595 * Per-CPU kernel thread that invokes RCU callbacks. This replaces the
1596 * RCU softirq used in flavors and configurations of RCU that do not
1597 * support RCU priority boosting.
1598 */
1599static int rcu_cpu_kthread(void *arg)
1600{
1601    int cpu = (int)(long)arg;
1602    unsigned long flags;
1603    int spincnt = 0;
1604    unsigned int *statusp = &per_cpu(rcu_cpu_kthread_status, cpu);
1605    char work;
1606    char *workp = &per_cpu(rcu_cpu_has_work, cpu);
1607
1608    trace_rcu_utilization("Start CPU kthread@init");
1609    for (;;) {
1610        *statusp = RCU_KTHREAD_WAITING;
1611        trace_rcu_utilization("End CPU kthread@rcu_wait");
1612        rcu_wait(*workp != 0 || kthread_should_stop());
1613        trace_rcu_utilization("Start CPU kthread@rcu_wait");
1614        local_bh_disable();
1615        if (rcu_cpu_kthread_should_stop(cpu)) {
1616            local_bh_enable();
1617            break;
1618        }
1619        *statusp = RCU_KTHREAD_RUNNING;
1620        per_cpu(rcu_cpu_kthread_loops, cpu)++;
1621        local_irq_save(flags);
1622        work = *workp;
1623        *workp = 0;
1624        local_irq_restore(flags);
1625        if (work)
1626            rcu_kthread_do_work();
1627        local_bh_enable();
1628        if (*workp != 0)
1629            spincnt++;
1630        else
1631            spincnt = 0;
1632        if (spincnt > 10) {
1633            *statusp = RCU_KTHREAD_YIELDING;
1634            trace_rcu_utilization("End CPU kthread@rcu_yield");
1635            rcu_yield(rcu_cpu_kthread_timer, (unsigned long)cpu);
1636            trace_rcu_utilization("Start CPU kthread@rcu_yield");
1637            spincnt = 0;
1638        }
1639    }
1640    *statusp = RCU_KTHREAD_STOPPED;
1641    trace_rcu_utilization("End CPU kthread@term");
1642    return 0;
1643}
1644
1645/*
1646 * Spawn a per-CPU kthread, setting up affinity and priority.
1647 * Because the CPU hotplug lock is held, no other CPU will be attempting
1648 * to manipulate rcu_cpu_kthread_task. There might be another CPU
1649 * attempting to access it during boot, but the locking in kthread_bind()
1650 * will enforce sufficient ordering.
1651 *
1652 * Please note that we cannot simply refuse to wake up the per-CPU
1653 * kthread because kthreads are created in TASK_UNINTERRUPTIBLE state,
1654 * which can result in softlockup complaints if the task ends up being
1655 * idle for more than a couple of minutes.
1656 *
1657 * However, please note also that we cannot bind the per-CPU kthread to its
1658 * CPU until that CPU is fully online. We also cannot wait until the
1659 * CPU is fully online before we create its per-CPU kthread, as this would
1660 * deadlock the system when CPU notifiers tried waiting for grace
1661 * periods. So we bind the per-CPU kthread to its CPU only if the CPU
1662 * is online. If its CPU is not yet fully online, then the code in
1663 * rcu_cpu_kthread() will wait until it is fully online, and then do
1664 * the binding.
1665 */
1666static int __cpuinit rcu_spawn_one_cpu_kthread(int cpu)
1667{
1668    struct sched_param sp;
1669    struct task_struct *t;
1670
1671    if (!rcu_scheduler_fully_active ||
1672        per_cpu(rcu_cpu_kthread_task, cpu) != NULL)
1673        return 0;
1674    t = kthread_create_on_node(rcu_cpu_kthread,
1675                   (void *)(long)cpu,
1676                   cpu_to_node(cpu),
1677                   "rcuc/%d", cpu);
1678    if (IS_ERR(t))
1679        return PTR_ERR(t);
1680    if (cpu_online(cpu))
1681        kthread_bind(t, cpu);
1682    per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu;
1683    WARN_ON_ONCE(per_cpu(rcu_cpu_kthread_task, cpu) != NULL);
1684    sp.sched_priority = RCU_KTHREAD_PRIO;
1685    sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1686    per_cpu(rcu_cpu_kthread_task, cpu) = t;
1687    wake_up_process(t); /* Get to TASK_INTERRUPTIBLE quickly. */
1688    return 0;
1689}
1690
1691/*
1692 * Per-rcu_node kthread, which is in charge of waking up the per-CPU
1693 * kthreads when needed. We ignore requests to wake up kthreads
1694 * for offline CPUs, which is OK because force_quiescent_state()
1695 * takes care of this case.
1696 */
1697static int rcu_node_kthread(void *arg)
1698{
1699    int cpu;
1700    unsigned long flags;
1701    unsigned long mask;
1702    struct rcu_node *rnp = (struct rcu_node *)arg;
1703    struct sched_param sp;
1704    struct task_struct *t;
1705
1706    for (;;) {
1707        rnp->node_kthread_status = RCU_KTHREAD_WAITING;
1708        rcu_wait(atomic_read(&rnp->wakemask) != 0);
1709        rnp->node_kthread_status = RCU_KTHREAD_RUNNING;
1710        raw_spin_lock_irqsave(&rnp->lock, flags);
1711        mask = atomic_xchg(&rnp->wakemask, 0);
1712        rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */
1713        for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) {
1714            if ((mask & 0x1) == 0)
1715                continue;
1716            preempt_disable();
1717            t = per_cpu(rcu_cpu_kthread_task, cpu);
1718            if (!cpu_online(cpu) || t == NULL) {
1719                preempt_enable();
1720                continue;
1721            }
1722            per_cpu(rcu_cpu_has_work, cpu) = 1;
1723            sp.sched_priority = RCU_KTHREAD_PRIO;
1724            sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1725            preempt_enable();
1726        }
1727    }
1728    /* NOTREACHED */
1729    rnp->node_kthread_status = RCU_KTHREAD_STOPPED;
1730    return 0;
1731}
1732
1733/*
1734 * Set the per-rcu_node kthread's affinity to cover all CPUs that are
1735 * served by the rcu_node in question. The CPU hotplug lock is still
1736 * held, so the value of rnp->qsmaskinit will be stable.
1737 *
1738 * We don't include outgoingcpu in the affinity set, use -1 if there is
1739 * no outgoing CPU. If there are no CPUs left in the affinity set,
1740 * this function allows the kthread to execute on any CPU.
1741 */
1742static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
1743{
1744    cpumask_var_t cm;
1745    int cpu;
1746    unsigned long mask = rnp->qsmaskinit;
1747
1748    if (rnp->node_kthread_task == NULL)
1749        return;
1750    if (!alloc_cpumask_var(&cm, GFP_KERNEL))
1751        return;
1752    cpumask_clear(cm);
1753    for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1)
1754        if ((mask & 0x1) && cpu != outgoingcpu)
1755            cpumask_set_cpu(cpu, cm);
1756    if (cpumask_weight(cm) == 0) {
1757        cpumask_setall(cm);
1758        for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++)
1759            cpumask_clear_cpu(cpu, cm);
1760        WARN_ON_ONCE(cpumask_weight(cm) == 0);
1761    }
1762    set_cpus_allowed_ptr(rnp->node_kthread_task, cm);
1763    rcu_boost_kthread_setaffinity(rnp, cm);
1764    free_cpumask_var(cm);
1765}
1766
1767/*
1768 * Spawn a per-rcu_node kthread, setting priority and affinity.
1769 * Called during boot before online/offline can happen, or, if
1770 * during runtime, with the main CPU-hotplug locks held. So only
1771 * one of these can be executing at a time.
1772 */
1773static int __cpuinit rcu_spawn_one_node_kthread(struct rcu_state *rsp,
1774                        struct rcu_node *rnp)
1775{
1776    unsigned long flags;
1777    int rnp_index = rnp - &rsp->node[0];
1778    struct sched_param sp;
1779    struct task_struct *t;
1780
1781    if (!rcu_scheduler_fully_active ||
1782        rnp->qsmaskinit == 0)
1783        return 0;
1784    if (rnp->node_kthread_task == NULL) {
1785        t = kthread_create(rcu_node_kthread, (void *)rnp,
1786                   "rcun/%d", rnp_index);
1787        if (IS_ERR(t))
1788            return PTR_ERR(t);
1789        raw_spin_lock_irqsave(&rnp->lock, flags);
1790        rnp->node_kthread_task = t;
1791        raw_spin_unlock_irqrestore(&rnp->lock, flags);
1792        sp.sched_priority = 99;
1793        sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1794        wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
1795    }
1796    return rcu_spawn_one_boost_kthread(rsp, rnp, rnp_index);
1797}
1798
1799/*
1800 * Spawn all kthreads -- called as soon as the scheduler is running.
1801 */
1802static int __init rcu_spawn_kthreads(void)
1803{
1804    int cpu;
1805    struct rcu_node *rnp;
1806
1807    rcu_scheduler_fully_active = 1;
1808    for_each_possible_cpu(cpu) {
1809        per_cpu(rcu_cpu_has_work, cpu) = 0;
1810        if (cpu_online(cpu))
1811            (void)rcu_spawn_one_cpu_kthread(cpu);
1812    }
1813    rnp = rcu_get_root(rcu_state);
1814    (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
1815    if (NUM_RCU_NODES > 1) {
1816        rcu_for_each_leaf_node(rcu_state, rnp)
1817            (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
1818    }
1819    return 0;
1820}
1821early_initcall(rcu_spawn_kthreads);
1822
1823static void __cpuinit rcu_prepare_kthreads(int cpu)
1824{
1825    struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu);
1826    struct rcu_node *rnp = rdp->mynode;
1827
1828    /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */
1829    if (rcu_scheduler_fully_active) {
1830        (void)rcu_spawn_one_cpu_kthread(cpu);
1831        if (rnp->node_kthread_task == NULL)
1832            (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
1833    }
1834}
1835
1836#else /* #ifdef CONFIG_RCU_BOOST */
1837
1838static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
1839{
1840    raw_spin_unlock_irqrestore(&rnp->lock, flags);
1841}
1842
1843static void invoke_rcu_callbacks_kthread(void)
1844{
1845    WARN_ON_ONCE(1);
1846}
1847
1848static bool rcu_is_callbacks_kthread(void)
1849{
1850    return false;
1851}
1852
1853static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
1854{
1855}
1856
1857#ifdef CONFIG_HOTPLUG_CPU
1858
1859static void rcu_stop_cpu_kthread(int cpu)
1860{
1861}
1862
1863#endif /* #ifdef CONFIG_HOTPLUG_CPU */
1864
1865static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
1866{
1867}
1868
1869static void rcu_cpu_kthread_setrt(int cpu, int to_rt)
1870{
1871}
1872
1873static int __init rcu_scheduler_really_started(void)
1874{
1875    rcu_scheduler_fully_active = 1;
1876    return 0;
1877}
1878early_initcall(rcu_scheduler_really_started);
1879
1880static void __cpuinit rcu_prepare_kthreads(int cpu)
1881{
1882}
1883
1884#endif /* #else #ifdef CONFIG_RCU_BOOST */
1885
1886#if !defined(CONFIG_RCU_FAST_NO_HZ)
1887
1888/*
1889 * Check to see if any future RCU-related work will need to be done
1890 * by the current CPU, even if none need be done immediately, returning
1891 * 1 if so. This function is part of the RCU implementation; it is -not-
1892 * an exported member of the RCU API.
1893 *
1894 * Because we not have RCU_FAST_NO_HZ, just check whether this CPU needs
1895 * any flavor of RCU.
1896 */
1897int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies)
1898{
1899    *delta_jiffies = ULONG_MAX;
1900    return rcu_cpu_has_callbacks(cpu);
1901}
1902
1903/*
1904 * Because we do not have RCU_FAST_NO_HZ, don't bother initializing for it.
1905 */
1906static void rcu_prepare_for_idle_init(int cpu)
1907{
1908}
1909
1910/*
1911 * Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up
1912 * after it.
1913 */
1914static void rcu_cleanup_after_idle(int cpu)
1915{
1916}
1917
1918/*
1919 * Do the idle-entry grace-period work, which, because CONFIG_RCU_FAST_NO_HZ=n,
1920 * is nothing.
1921 */
1922static void rcu_prepare_for_idle(int cpu)
1923{
1924}
1925
1926/*
1927 * Don't bother keeping a running count of the number of RCU callbacks
1928 * posted because CONFIG_RCU_FAST_NO_HZ=n.
1929 */
1930static void rcu_idle_count_callbacks_posted(void)
1931{
1932}
1933
1934#else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */
1935
1936/*
1937 * This code is invoked when a CPU goes idle, at which point we want
1938 * to have the CPU do everything required for RCU so that it can enter
1939 * the energy-efficient dyntick-idle mode. This is handled by a
1940 * state machine implemented by rcu_prepare_for_idle() below.
1941 *
1942 * The following three proprocessor symbols control this state machine:
1943 *
1944 * RCU_IDLE_FLUSHES gives the maximum number of times that we will attempt
1945 * to satisfy RCU. Beyond this point, it is better to incur a periodic
1946 * scheduling-clock interrupt than to loop through the state machine
1947 * at full power.
1948 * RCU_IDLE_OPT_FLUSHES gives the number of RCU_IDLE_FLUSHES that are
1949 * optional if RCU does not need anything immediately from this
1950 * CPU, even if this CPU still has RCU callbacks queued. The first
1951 * times through the state machine are mandatory: we need to give
1952 * the state machine a chance to communicate a quiescent state
1953 * to the RCU core.
1954 * RCU_IDLE_GP_DELAY gives the number of jiffies that a CPU is permitted
1955 * to sleep in dyntick-idle mode with RCU callbacks pending. This
1956 * is sized to be roughly one RCU grace period. Those energy-efficiency
1957 * benchmarkers who might otherwise be tempted to set this to a large
1958 * number, be warned: Setting RCU_IDLE_GP_DELAY too high can hang your
1959 * system. And if you are -that- concerned about energy efficiency,
1960 * just power the system down and be done with it!
1961 * RCU_IDLE_LAZY_GP_DELAY gives the number of jiffies that a CPU is
1962 * permitted to sleep in dyntick-idle mode with only lazy RCU
1963 * callbacks pending. Setting this too high can OOM your system.
1964 *
1965 * The values below work well in practice. If future workloads require
1966 * adjustment, they can be converted into kernel config parameters, though
1967 * making the state machine smarter might be a better option.
1968 */
1969#define RCU_IDLE_FLUSHES 5 /* Number of dyntick-idle tries. */
1970#define RCU_IDLE_OPT_FLUSHES 3 /* Optional dyntick-idle tries. */
1971#define RCU_IDLE_GP_DELAY 6 /* Roughly one grace period. */
1972#define RCU_IDLE_LAZY_GP_DELAY (6 * HZ) /* Roughly six seconds. */
1973
1974/*
1975 * Does the specified flavor of RCU have non-lazy callbacks pending on
1976 * the specified CPU? Both RCU flavor and CPU are specified by the
1977 * rcu_data structure.
1978 */
1979static bool __rcu_cpu_has_nonlazy_callbacks(struct rcu_data *rdp)
1980{
1981    return rdp->qlen != rdp->qlen_lazy;
1982}
1983
1984#ifdef CONFIG_TREE_PREEMPT_RCU
1985
1986/*
1987 * Are there non-lazy RCU-preempt callbacks? (There cannot be if there
1988 * is no RCU-preempt in the kernel.)
1989 */
1990static bool rcu_preempt_cpu_has_nonlazy_callbacks(int cpu)
1991{
1992    struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu);
1993
1994    return __rcu_cpu_has_nonlazy_callbacks(rdp);
1995}
1996
1997#else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
1998
1999static bool rcu_preempt_cpu_has_nonlazy_callbacks(int cpu)
2000{
2001    return 0;
2002}
2003
2004#endif /* else #ifdef CONFIG_TREE_PREEMPT_RCU */
2005
2006/*
2007 * Does any flavor of RCU have non-lazy callbacks on the specified CPU?
2008 */
2009static bool rcu_cpu_has_nonlazy_callbacks(int cpu)
2010{
2011    return __rcu_cpu_has_nonlazy_callbacks(&per_cpu(rcu_sched_data, cpu)) ||
2012           __rcu_cpu_has_nonlazy_callbacks(&per_cpu(rcu_bh_data, cpu)) ||
2013           rcu_preempt_cpu_has_nonlazy_callbacks(cpu);
2014}
2015
2016/*
2017 * Allow the CPU to enter dyntick-idle mode if either: (1) There are no
2018 * callbacks on this CPU, (2) this CPU has not yet attempted to enter
2019 * dyntick-idle mode, or (3) this CPU is in the process of attempting to
2020 * enter dyntick-idle mode. Otherwise, if we have recently tried and failed
2021 * to enter dyntick-idle mode, we refuse to try to enter it. After all,
2022 * it is better to incur scheduling-clock interrupts than to spin
2023 * continuously for the same time duration!
2024 *
2025 * The delta_jiffies argument is used to store the time when RCU is
2026 * going to need the CPU again if it still has callbacks. The reason
2027 * for this is that rcu_prepare_for_idle() might need to post a timer,
2028 * but if so, it will do so after tick_nohz_stop_sched_tick() has set
2029 * the wakeup time for this CPU. This means that RCU's timer can be
2030 * delayed until the wakeup time, which defeats the purpose of posting
2031 * a timer.
2032 */
2033int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies)
2034{
2035    struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
2036
2037    /* Flag a new idle sojourn to the idle-entry state machine. */
2038    rdtp->idle_first_pass = 1;
2039    /* If no callbacks, RCU doesn't need the CPU. */
2040    if (!rcu_cpu_has_callbacks(cpu)) {
2041        *delta_jiffies = ULONG_MAX;
2042        return 0;
2043    }
2044    if (rdtp->dyntick_holdoff == jiffies) {
2045        /* RCU recently tried and failed, so don't try again. */
2046        *delta_jiffies = 1;
2047        return 1;
2048    }
2049    /* Set up for the possibility that RCU will post a timer. */
2050    if (rcu_cpu_has_nonlazy_callbacks(cpu))
2051        *delta_jiffies = RCU_IDLE_GP_DELAY;
2052    else
2053        *delta_jiffies = RCU_IDLE_LAZY_GP_DELAY;
2054    return 0;
2055}
2056
2057/*
2058 * Handler for smp_call_function_single(). The only point of this
2059 * handler is to wake the CPU up, so the handler does only tracing.
2060 */
2061void rcu_idle_demigrate(void *unused)
2062{
2063    trace_rcu_prep_idle("Demigrate");
2064}
2065
2066/*
2067 * Timer handler used to force CPU to start pushing its remaining RCU
2068 * callbacks in the case where it entered dyntick-idle mode with callbacks
2069 * pending. The hander doesn't really need to do anything because the
2070 * real work is done upon re-entry to idle, or by the next scheduling-clock
2071 * interrupt should idle not be re-entered.
2072 *
2073 * One special case: the timer gets migrated without awakening the CPU
2074 * on which the timer was scheduled on. In this case, we must wake up
2075 * that CPU. We do so with smp_call_function_single().
2076 */
2077static void rcu_idle_gp_timer_func(unsigned long cpu_in)
2078{
2079    int cpu = (int)cpu_in;
2080
2081    trace_rcu_prep_idle("Timer");
2082    if (cpu != smp_processor_id())
2083        smp_call_function_single(cpu, rcu_idle_demigrate, NULL, 0);
2084    else
2085        WARN_ON_ONCE(1); /* Getting here can hang the system... */
2086}
2087
2088/*
2089 * Initialize the timer used to pull CPUs out of dyntick-idle mode.
2090 */
2091static void rcu_prepare_for_idle_init(int cpu)
2092{
2093    struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
2094
2095    rdtp->dyntick_holdoff = jiffies - 1;
2096    setup_timer(&rdtp->idle_gp_timer, rcu_idle_gp_timer_func, cpu);
2097    rdtp->idle_gp_timer_expires = jiffies - 1;
2098    rdtp->idle_first_pass = 1;
2099}
2100
2101/*
2102 * Clean up for exit from idle. Because we are exiting from idle, there
2103 * is no longer any point to ->idle_gp_timer, so cancel it. This will
2104 * do nothing if this timer is not active, so just cancel it unconditionally.
2105 */
2106static void rcu_cleanup_after_idle(int cpu)
2107{
2108    struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
2109
2110    del_timer(&rdtp->idle_gp_timer);
2111    trace_rcu_prep_idle("Cleanup after idle");
2112}
2113
2114/*
2115 * Check to see if any RCU-related work can be done by the current CPU,
2116 * and if so, schedule a softirq to get it done. This function is part
2117 * of the RCU implementation; it is -not- an exported member of the RCU API.
2118 *
2119 * The idea is for the current CPU to clear out all work required by the
2120 * RCU core for the current grace period, so that this CPU can be permitted
2121 * to enter dyntick-idle mode. In some cases, it will need to be awakened
2122 * at the end of the grace period by whatever CPU ends the grace period.
2123 * This allows CPUs to go dyntick-idle more quickly, and to reduce the
2124 * number of wakeups by a modest integer factor.
2125 *
2126 * Because it is not legal to invoke rcu_process_callbacks() with irqs
2127 * disabled, we do one pass of force_quiescent_state(), then do a
2128 * invoke_rcu_core() to cause rcu_process_callbacks() to be invoked
2129 * later. The ->dyntick_drain field controls the sequencing.
2130 *
2131 * The caller must have disabled interrupts.
2132 */
2133static void rcu_prepare_for_idle(int cpu)
2134{
2135    struct timer_list *tp;
2136    struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
2137
2138    /*
2139     * If this is an idle re-entry, for example, due to use of
2140     * RCU_NONIDLE() or the new idle-loop tracing API within the idle
2141     * loop, then don't take any state-machine actions, unless the
2142     * momentary exit from idle queued additional non-lazy callbacks.
2143     * Instead, repost the ->idle_gp_timer if this CPU has callbacks
2144     * pending.
2145     */
2146    if (!rdtp->idle_first_pass &&
2147        (rdtp->nonlazy_posted == rdtp->nonlazy_posted_snap)) {
2148        if (rcu_cpu_has_callbacks(cpu)) {
2149            tp = &rdtp->idle_gp_timer;
2150            mod_timer_pinned(tp, rdtp->idle_gp_timer_expires);
2151        }
2152        return;
2153    }
2154    rdtp->idle_first_pass = 0;
2155    rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted - 1;
2156
2157    /*
2158     * If there are no callbacks on this CPU, enter dyntick-idle mode.
2159     * Also reset state to avoid prejudicing later attempts.
2160     */
2161    if (!rcu_cpu_has_callbacks(cpu)) {
2162        rdtp->dyntick_holdoff = jiffies - 1;
2163        rdtp->dyntick_drain = 0;
2164        trace_rcu_prep_idle("No callbacks");
2165        return;
2166    }
2167
2168    /*
2169     * If in holdoff mode, just return. We will presumably have
2170     * refrained from disabling the scheduling-clock tick.
2171     */
2172    if (rdtp->dyntick_holdoff == jiffies) {
2173        trace_rcu_prep_idle("In holdoff");
2174        return;
2175    }
2176
2177    /* Check and update the ->dyntick_drain sequencing. */
2178    if (rdtp->dyntick_drain <= 0) {
2179        /* First time through, initialize the counter. */
2180        rdtp->dyntick_drain = RCU_IDLE_FLUSHES;
2181    } else if (rdtp->dyntick_drain <= RCU_IDLE_OPT_FLUSHES &&
2182           !rcu_pending(cpu) &&
2183           !local_softirq_pending()) {
2184        /* Can we go dyntick-idle despite still having callbacks? */
2185        rdtp->dyntick_drain = 0;
2186        rdtp->dyntick_holdoff = jiffies;
2187        if (rcu_cpu_has_nonlazy_callbacks(cpu)) {
2188            trace_rcu_prep_idle("Dyntick with callbacks");
2189            rdtp->idle_gp_timer_expires =
2190                       jiffies + RCU_IDLE_GP_DELAY;
2191        } else {
2192            rdtp->idle_gp_timer_expires =
2193                       jiffies + RCU_IDLE_LAZY_GP_DELAY;
2194            trace_rcu_prep_idle("Dyntick with lazy callbacks");
2195        }
2196        tp = &rdtp->idle_gp_timer;
2197        mod_timer_pinned(tp, rdtp->idle_gp_timer_expires);
2198        rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted;
2199        return; /* Nothing more to do immediately. */
2200    } else if (--(rdtp->dyntick_drain) <= 0) {
2201        /* We have hit the limit, so time to give up. */
2202        rdtp->dyntick_holdoff = jiffies;
2203        trace_rcu_prep_idle("Begin holdoff");
2204        invoke_rcu_core(); /* Force the CPU out of dyntick-idle. */
2205        return;
2206    }
2207
2208    /*
2209     * Do one step of pushing the remaining RCU callbacks through
2210     * the RCU core state machine.
2211     */
2212#ifdef CONFIG_TREE_PREEMPT_RCU
2213    if (per_cpu(rcu_preempt_data, cpu).nxtlist) {
2214        rcu_preempt_qs(cpu);
2215        force_quiescent_state(&rcu_preempt_state, 0);
2216    }
2217#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
2218    if (per_cpu(rcu_sched_data, cpu).nxtlist) {
2219        rcu_sched_qs(cpu);
2220        force_quiescent_state(&rcu_sched_state, 0);
2221    }
2222    if (per_cpu(rcu_bh_data, cpu).nxtlist) {
2223        rcu_bh_qs(cpu);
2224        force_quiescent_state(&rcu_bh_state, 0);
2225    }
2226
2227    /*
2228     * If RCU callbacks are still pending, RCU still needs this CPU.
2229     * So try forcing the callbacks through the grace period.
2230     */
2231    if (rcu_cpu_has_callbacks(cpu)) {
2232        trace_rcu_prep_idle("More callbacks");
2233        invoke_rcu_core();
2234    } else
2235        trace_rcu_prep_idle("Callbacks drained");
2236}
2237
2238/*
2239 * Keep a running count of the number of non-lazy callbacks posted
2240 * on this CPU. This running counter (which is never decremented) allows
2241 * rcu_prepare_for_idle() to detect when something out of the idle loop
2242 * posts a callback, even if an equal number of callbacks are invoked.
2243 * Of course, callbacks should only be posted from within a trace event
2244 * designed to be called from idle or from within RCU_NONIDLE().
2245 */
2246static void rcu_idle_count_callbacks_posted(void)
2247{
2248    __this_cpu_add(rcu_dynticks.nonlazy_posted, 1);
2249}
2250
2251#endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */
2252
2253#ifdef CONFIG_RCU_CPU_STALL_INFO
2254
2255#ifdef CONFIG_RCU_FAST_NO_HZ
2256
2257static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
2258{
2259    struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
2260    struct timer_list *tltp = &rdtp->idle_gp_timer;
2261
2262    sprintf(cp, "drain=%d %c timer=%lu",
2263        rdtp->dyntick_drain,
2264        rdtp->dyntick_holdoff == jiffies ? 'H' : '.',
2265        timer_pending(tltp) ? tltp->expires - jiffies : -1);
2266}
2267
2268#else /* #ifdef CONFIG_RCU_FAST_NO_HZ */
2269
2270static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
2271{
2272}
2273
2274#endif /* #else #ifdef CONFIG_RCU_FAST_NO_HZ */
2275
2276/* Initiate the stall-info list. */
2277static void print_cpu_stall_info_begin(void)
2278{
2279    printk(KERN_CONT "\n");
2280}
2281
2282/*
2283 * Print out diagnostic information for the specified stalled CPU.
2284 *
2285 * If the specified CPU is aware of the current RCU grace period
2286 * (flavor specified by rsp), then print the number of scheduling
2287 * clock interrupts the CPU has taken during the time that it has
2288 * been aware. Otherwise, print the number of RCU grace periods
2289 * that this CPU is ignorant of, for example, "1" if the CPU was
2290 * aware of the previous grace period.
2291 *
2292 * Also print out idle and (if CONFIG_RCU_FAST_NO_HZ) idle-entry info.
2293 */
2294static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
2295{
2296    char fast_no_hz[72];
2297    struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
2298    struct rcu_dynticks *rdtp = rdp->dynticks;
2299    char *ticks_title;
2300    unsigned long ticks_value;
2301
2302    if (rsp->gpnum == rdp->gpnum) {
2303        ticks_title = "ticks this GP";
2304        ticks_value = rdp->ticks_this_gp;
2305    } else {
2306        ticks_title = "GPs behind";
2307        ticks_value = rsp->gpnum - rdp->gpnum;
2308    }
2309    print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
2310    printk(KERN_ERR "\t%d: (%lu %s) idle=%03x/%llx/%d %s\n",
2311           cpu, ticks_value, ticks_title,
2312           atomic_read(&rdtp->dynticks) & 0xfff,
2313           rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
2314           fast_no_hz);
2315}
2316
2317/* Terminate the stall-info list. */
2318static void print_cpu_stall_info_end(void)
2319{
2320    printk(KERN_ERR "\t");
2321}
2322
2323/* Zero ->ticks_this_gp for all flavors of RCU. */
2324static void zero_cpu_stall_ticks(struct rcu_data *rdp)
2325{
2326    rdp->ticks_this_gp = 0;
2327}
2328
2329/* Increment ->ticks_this_gp for all flavors of RCU. */
2330static void increment_cpu_stall_ticks(void)
2331{
2332    __get_cpu_var(rcu_sched_data).ticks_this_gp++;
2333    __get_cpu_var(rcu_bh_data).ticks_this_gp++;
2334#ifdef CONFIG_TREE_PREEMPT_RCU
2335    __get_cpu_var(rcu_preempt_data).ticks_this_gp++;
2336#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
2337}
2338
2339#else /* #ifdef CONFIG_RCU_CPU_STALL_INFO */
2340
2341static void print_cpu_stall_info_begin(void)
2342{
2343    printk(KERN_CONT " {");
2344}
2345
2346static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
2347{
2348    printk(KERN_CONT " %d", cpu);
2349}
2350
2351static void print_cpu_stall_info_end(void)
2352{
2353    printk(KERN_CONT "} ");
2354}
2355
2356static void zero_cpu_stall_ticks(struct rcu_data *rdp)
2357{
2358}
2359
2360static void increment_cpu_stall_ticks(void)
2361{
2362}
2363
2364#endif /* #else #ifdef CONFIG_RCU_CPU_STALL_INFO */
2365

Archive Download this file



interactive