Root/kernel/rcutree.h

1/*
2 * Read-Copy Update mechanism for mutual exclusion (tree-based version)
3 * Internal non-public definitions.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 *
19 * Copyright IBM Corporation, 2008
20 *
21 * Author: Ingo Molnar <mingo@elte.hu>
22 * Paul E. McKenney <paulmck@linux.vnet.ibm.com>
23 */
24
25#include <linux/cache.h>
26#include <linux/spinlock.h>
27#include <linux/threads.h>
28#include <linux/cpumask.h>
29#include <linux/seqlock.h>
30
31/*
32 * Define shape of hierarchy based on NR_CPUS and CONFIG_RCU_FANOUT.
33 * In theory, it should be possible to add more levels straightforwardly.
34 * In practice, this has not been tested, so there is probably some
35 * bug somewhere.
36 */
37#define MAX_RCU_LVLS 4
38#define RCU_FANOUT (CONFIG_RCU_FANOUT)
39#define RCU_FANOUT_SQ (RCU_FANOUT * RCU_FANOUT)
40#define RCU_FANOUT_CUBE (RCU_FANOUT_SQ * RCU_FANOUT)
41#define RCU_FANOUT_FOURTH (RCU_FANOUT_CUBE * RCU_FANOUT)
42
43#if NR_CPUS <= RCU_FANOUT
44# define NUM_RCU_LVLS 1
45# define NUM_RCU_LVL_0 1
46# define NUM_RCU_LVL_1 (NR_CPUS)
47# define NUM_RCU_LVL_2 0
48# define NUM_RCU_LVL_3 0
49# define NUM_RCU_LVL_4 0
50#elif NR_CPUS <= RCU_FANOUT_SQ
51# define NUM_RCU_LVLS 2
52# define NUM_RCU_LVL_0 1
53# define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT)
54# define NUM_RCU_LVL_2 (NR_CPUS)
55# define NUM_RCU_LVL_3 0
56# define NUM_RCU_LVL_4 0
57#elif NR_CPUS <= RCU_FANOUT_CUBE
58# define NUM_RCU_LVLS 3
59# define NUM_RCU_LVL_0 1
60# define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_SQ)
61# define NUM_RCU_LVL_2 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT)
62# define NUM_RCU_LVL_3 NR_CPUS
63# define NUM_RCU_LVL_4 0
64#elif NR_CPUS <= RCU_FANOUT_FOURTH
65# define NUM_RCU_LVLS 4
66# define NUM_RCU_LVL_0 1
67# define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_CUBE)
68# define NUM_RCU_LVL_2 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_SQ)
69# define NUM_RCU_LVL_3 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT)
70# define NUM_RCU_LVL_4 NR_CPUS
71#else
72# error "CONFIG_RCU_FANOUT insufficient for NR_CPUS"
73#endif /* #if (NR_CPUS) <= RCU_FANOUT */
74
75#define RCU_SUM (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2 + NUM_RCU_LVL_3 + NUM_RCU_LVL_4)
76#define NUM_RCU_NODES (RCU_SUM - NR_CPUS)
77
78/*
79 * Dynticks per-CPU state.
80 */
81struct rcu_dynticks {
82    int dynticks_nesting; /* Track nesting level, sort of. */
83    int dynticks; /* Even value for dynticks-idle, else odd. */
84    int dynticks_nmi; /* Even value for either dynticks-idle or */
85                /* not in nmi handler, else odd. So this */
86                /* remains even for nmi from irq handler. */
87};
88
89/*
90 * Definition for node within the RCU grace-period-detection hierarchy.
91 */
92struct rcu_node {
93    raw_spinlock_t lock; /* Root rcu_node's lock protects some */
94                /* rcu_state fields as well as following. */
95    unsigned long gpnum; /* Current grace period for this node. */
96                /* This will either be equal to or one */
97                /* behind the root rcu_node's gpnum. */
98    unsigned long completed; /* Last GP completed for this node. */
99                /* This will either be equal to or one */
100                /* behind the root rcu_node's gpnum. */
101    unsigned long qsmask; /* CPUs or groups that need to switch in */
102                /* order for current grace period to proceed.*/
103                /* In leaf rcu_node, each bit corresponds to */
104                /* an rcu_data structure, otherwise, each */
105                /* bit corresponds to a child rcu_node */
106                /* structure. */
107    unsigned long expmask; /* Groups that have ->blocked_tasks[] */
108                /* elements that need to drain to allow the */
109                /* current expedited grace period to */
110                /* complete (only for TREE_PREEMPT_RCU). */
111    unsigned long qsmaskinit;
112                /* Per-GP initial value for qsmask & expmask. */
113    unsigned long grpmask; /* Mask to apply to parent qsmask. */
114                /* Only one bit will be set in this mask. */
115    int grplo; /* lowest-numbered CPU or group here. */
116    int grphi; /* highest-numbered CPU or group here. */
117    u8 grpnum; /* CPU/group number for next level up. */
118    u8 level; /* root is at level 0. */
119    struct rcu_node *parent;
120    struct list_head blocked_tasks[4];
121                /* Tasks blocked in RCU read-side critsect. */
122                /* Grace period number (->gpnum) x blocked */
123                /* by tasks on the (x & 0x1) element of the */
124                /* blocked_tasks[] array. */
125} ____cacheline_internodealigned_in_smp;
126
127/*
128 * Do a full breadth-first scan of the rcu_node structures for the
129 * specified rcu_state structure.
130 */
131#define rcu_for_each_node_breadth_first(rsp, rnp) \
132    for ((rnp) = &(rsp)->node[0]; \
133         (rnp) < &(rsp)->node[NUM_RCU_NODES]; (rnp)++)
134
135/*
136 * Do a breadth-first scan of the non-leaf rcu_node structures for the
137 * specified rcu_state structure. Note that if there is a singleton
138 * rcu_node tree with but one rcu_node structure, this loop is a no-op.
139 */
140#define rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) \
141    for ((rnp) = &(rsp)->node[0]; \
142         (rnp) < (rsp)->level[NUM_RCU_LVLS - 1]; (rnp)++)
143
144/*
145 * Scan the leaves of the rcu_node hierarchy for the specified rcu_state
146 * structure. Note that if there is a singleton rcu_node tree with but
147 * one rcu_node structure, this loop -will- visit the rcu_node structure.
148 * It is still a leaf node, even if it is also the root node.
149 */
150#define rcu_for_each_leaf_node(rsp, rnp) \
151    for ((rnp) = (rsp)->level[NUM_RCU_LVLS - 1]; \
152         (rnp) < &(rsp)->node[NUM_RCU_NODES]; (rnp)++)
153
154/* Index values for nxttail array in struct rcu_data. */
155#define RCU_DONE_TAIL 0 /* Also RCU_WAIT head. */
156#define RCU_WAIT_TAIL 1 /* Also RCU_NEXT_READY head. */
157#define RCU_NEXT_READY_TAIL 2 /* Also RCU_NEXT head. */
158#define RCU_NEXT_TAIL 3
159#define RCU_NEXT_SIZE 4
160
161/* Per-CPU data for read-copy update. */
162struct rcu_data {
163    /* 1) quiescent-state and grace-period handling : */
164    unsigned long completed; /* Track rsp->completed gp number */
165                    /* in order to detect GP end. */
166    unsigned long gpnum; /* Highest gp number that this CPU */
167                    /* is aware of having started. */
168    unsigned long passed_quiesc_completed;
169                    /* Value of completed at time of qs. */
170    bool passed_quiesc; /* User-mode/idle loop etc. */
171    bool qs_pending; /* Core waits for quiesc state. */
172    bool beenonline; /* CPU online at least once. */
173    bool preemptable; /* Preemptable RCU? */
174    struct rcu_node *mynode; /* This CPU's leaf of hierarchy */
175    unsigned long grpmask; /* Mask to apply to leaf qsmask. */
176
177    /* 2) batch handling */
178    /*
179     * If nxtlist is not NULL, it is partitioned as follows.
180     * Any of the partitions might be empty, in which case the
181     * pointer to that partition will be equal to the pointer for
182     * the following partition. When the list is empty, all of
183     * the nxttail elements point to the ->nxtlist pointer itself,
184     * which in that case is NULL.
185     *
186     * [nxtlist, *nxttail[RCU_DONE_TAIL]):
187     * Entries that batch # <= ->completed
188     * The grace period for these entries has completed, and
189     * the other grace-period-completed entries may be moved
190     * here temporarily in rcu_process_callbacks().
191     * [*nxttail[RCU_DONE_TAIL], *nxttail[RCU_WAIT_TAIL]):
192     * Entries that batch # <= ->completed - 1: waiting for current GP
193     * [*nxttail[RCU_WAIT_TAIL], *nxttail[RCU_NEXT_READY_TAIL]):
194     * Entries known to have arrived before current GP ended
195     * [*nxttail[RCU_NEXT_READY_TAIL], *nxttail[RCU_NEXT_TAIL]):
196     * Entries that might have arrived after current GP ended
197     * Note that the value of *nxttail[RCU_NEXT_TAIL] will
198     * always be NULL, as this is the end of the list.
199     */
200    struct rcu_head *nxtlist;
201    struct rcu_head **nxttail[RCU_NEXT_SIZE];
202    long qlen; /* # of queued callbacks */
203    long qlen_last_fqs_check;
204                    /* qlen at last check for QS forcing */
205    unsigned long n_force_qs_snap;
206                    /* did other CPU force QS recently? */
207    long blimit; /* Upper limit on a processed batch */
208
209#ifdef CONFIG_NO_HZ
210    /* 3) dynticks interface. */
211    struct rcu_dynticks *dynticks; /* Shared per-CPU dynticks state. */
212    int dynticks_snap; /* Per-GP tracking for dynticks. */
213    int dynticks_nmi_snap; /* Per-GP tracking for dynticks_nmi. */
214#endif /* #ifdef CONFIG_NO_HZ */
215
216    /* 4) reasons this CPU needed to be kicked by force_quiescent_state */
217#ifdef CONFIG_NO_HZ
218    unsigned long dynticks_fqs; /* Kicked due to dynticks idle. */
219#endif /* #ifdef CONFIG_NO_HZ */
220    unsigned long offline_fqs; /* Kicked due to being offline. */
221    unsigned long resched_ipi; /* Sent a resched IPI. */
222
223    /* 5) __rcu_pending() statistics. */
224    unsigned long n_rcu_pending; /* rcu_pending() calls since boot. */
225    unsigned long n_rp_qs_pending;
226    unsigned long n_rp_report_qs;
227    unsigned long n_rp_cb_ready;
228    unsigned long n_rp_cpu_needs_gp;
229    unsigned long n_rp_gp_completed;
230    unsigned long n_rp_gp_started;
231    unsigned long n_rp_need_fqs;
232    unsigned long n_rp_need_nothing;
233
234    int cpu;
235};
236
237/* Values for signaled field in struct rcu_state. */
238#define RCU_GP_IDLE 0 /* No grace period in progress. */
239#define RCU_GP_INIT 1 /* Grace period being initialized. */
240#define RCU_SAVE_DYNTICK 2 /* Need to scan dyntick state. */
241#define RCU_FORCE_QS 3 /* Need to force quiescent state. */
242#ifdef CONFIG_NO_HZ
243#define RCU_SIGNAL_INIT RCU_SAVE_DYNTICK
244#else /* #ifdef CONFIG_NO_HZ */
245#define RCU_SIGNAL_INIT RCU_FORCE_QS
246#endif /* #else #ifdef CONFIG_NO_HZ */
247
248#define RCU_JIFFIES_TILL_FORCE_QS 3 /* for rsp->jiffies_force_qs */
249#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
250
251#ifdef CONFIG_PROVE_RCU
252#define RCU_STALL_DELAY_DELTA (5 * HZ)
253#else
254#define RCU_STALL_DELAY_DELTA 0
255#endif
256
257#define RCU_SECONDS_TILL_STALL_CHECK (10 * HZ + RCU_STALL_DELAY_DELTA)
258                        /* for rsp->jiffies_stall */
259#define RCU_SECONDS_TILL_STALL_RECHECK (30 * HZ + RCU_STALL_DELAY_DELTA)
260                        /* for rsp->jiffies_stall */
261#define RCU_STALL_RAT_DELAY 2 /* Allow other CPUs time */
262                        /* to take at least one */
263                        /* scheduling clock irq */
264                        /* before ratting on them. */
265
266#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
267
268#define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b))
269#define ULONG_CMP_LT(a, b) (ULONG_MAX / 2 < (a) - (b))
270
271/*
272 * RCU global state, including node hierarchy. This hierarchy is
273 * represented in "heap" form in a dense array. The root (first level)
274 * of the hierarchy is in ->node[0] (referenced by ->level[0]), the second
275 * level in ->node[1] through ->node[m] (->node[1] referenced by ->level[1]),
276 * and the third level in ->node[m+1] and following (->node[m+1] referenced
277 * by ->level[2]). The number of levels is determined by the number of
278 * CPUs and by CONFIG_RCU_FANOUT. Small systems will have a "hierarchy"
279 * consisting of a single rcu_node.
280 */
281struct rcu_state {
282    struct rcu_node node[NUM_RCU_NODES]; /* Hierarchy. */
283    struct rcu_node *level[NUM_RCU_LVLS]; /* Hierarchy levels. */
284    u32 levelcnt[MAX_RCU_LVLS + 1]; /* # nodes in each level. */
285    u8 levelspread[NUM_RCU_LVLS]; /* kids/node in each level. */
286    struct rcu_data *rda[NR_CPUS]; /* array of rdp pointers. */
287
288    /* The following fields are guarded by the root rcu_node's lock. */
289
290    u8 signaled ____cacheline_internodealigned_in_smp;
291                        /* Force QS state. */
292    u8 fqs_active; /* force_quiescent_state() */
293                        /* is running. */
294    u8 fqs_need_gp; /* A CPU was prevented from */
295                        /* starting a new grace */
296                        /* period because */
297                        /* force_quiescent_state() */
298                        /* was running. */
299    unsigned long gpnum; /* Current gp number. */
300    unsigned long completed; /* # of last completed gp. */
301
302    /* End of fields guarded by root rcu_node's lock. */
303
304    raw_spinlock_t onofflock; /* exclude on/offline and */
305                        /* starting new GP. Also */
306                        /* protects the following */
307                        /* orphan_cbs fields. */
308    struct rcu_head *orphan_cbs_list; /* list of rcu_head structs */
309                        /* orphaned by all CPUs in */
310                        /* a given leaf rcu_node */
311                        /* going offline. */
312    struct rcu_head **orphan_cbs_tail; /* And tail pointer. */
313    long orphan_qlen; /* Number of orphaned cbs. */
314    raw_spinlock_t fqslock; /* Only one task forcing */
315                        /* quiescent states. */
316    unsigned long jiffies_force_qs; /* Time at which to invoke */
317                        /* force_quiescent_state(). */
318    unsigned long n_force_qs; /* Number of calls to */
319                        /* force_quiescent_state(). */
320    unsigned long n_force_qs_lh; /* ~Number of calls leaving */
321                        /* due to lock unavailable. */
322    unsigned long n_force_qs_ngp; /* Number of calls leaving */
323                        /* due to no GP active. */
324#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
325    unsigned long gp_start; /* Time at which GP started, */
326                        /* but in jiffies. */
327    unsigned long jiffies_stall; /* Time at which to check */
328                        /* for CPU stalls. */
329#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
330    char *name; /* Name of structure. */
331};
332
333/* Return values for rcu_preempt_offline_tasks(). */
334
335#define RCU_OFL_TASKS_NORM_GP 0x1 /* Tasks blocking normal */
336                        /* GP were moved to root. */
337#define RCU_OFL_TASKS_EXP_GP 0x2 /* Tasks blocking expedited */
338                        /* GP were moved to root. */
339
340/*
341 * RCU implementation internal declarations:
342 */
343extern struct rcu_state rcu_sched_state;
344DECLARE_PER_CPU(struct rcu_data, rcu_sched_data);
345
346extern struct rcu_state rcu_bh_state;
347DECLARE_PER_CPU(struct rcu_data, rcu_bh_data);
348
349#ifdef CONFIG_TREE_PREEMPT_RCU
350extern struct rcu_state rcu_preempt_state;
351DECLARE_PER_CPU(struct rcu_data, rcu_preempt_data);
352#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
353
354#ifndef RCU_TREE_NONCORE
355
356/* Forward declarations for rcutree_plugin.h */
357static void rcu_bootup_announce(void);
358long rcu_batches_completed(void);
359static void rcu_preempt_note_context_switch(int cpu);
360static int rcu_preempted_readers(struct rcu_node *rnp);
361#ifdef CONFIG_HOTPLUG_CPU
362static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp,
363                      unsigned long flags);
364#endif /* #ifdef CONFIG_HOTPLUG_CPU */
365#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
366static void rcu_print_detail_task_stall(struct rcu_state *rsp);
367static void rcu_print_task_stall(struct rcu_node *rnp);
368#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
369static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp);
370#ifdef CONFIG_HOTPLUG_CPU
371static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
372                     struct rcu_node *rnp,
373                     struct rcu_data *rdp);
374static void rcu_preempt_offline_cpu(int cpu);
375#endif /* #ifdef CONFIG_HOTPLUG_CPU */
376static void rcu_preempt_check_callbacks(int cpu);
377static void rcu_preempt_process_callbacks(void);
378void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu));
379#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_TREE_PREEMPT_RCU)
380static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp);
381#endif /* #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_TREE_PREEMPT_RCU) */
382static int rcu_preempt_pending(int cpu);
383static int rcu_preempt_needs_cpu(int cpu);
384static void __cpuinit rcu_preempt_init_percpu_data(int cpu);
385static void rcu_preempt_send_cbs_to_orphanage(void);
386static void __init __rcu_init_preempt(void);
387static void rcu_needs_cpu_flush(void);
388
389#endif /* #ifndef RCU_TREE_NONCORE */
390

Archive Download this file



interactive