Root/
1 | /* |
2 | * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition |
3 | * Internal non-public definitions that provide either classic |
4 | * or preemptible semantics. |
5 | * |
6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License as published by |
8 | * the Free Software Foundation; either version 2 of the License, or |
9 | * (at your option) any later version. |
10 | * |
11 | * This program is distributed in the hope that it will be useful, |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
14 | * GNU General Public License for more details. |
15 | * |
16 | * You should have received a copy of the GNU General Public License |
17 | * along with this program; if not, write to the Free Software |
18 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
19 | * |
20 | * Copyright (c) 2010 Linaro |
21 | * |
22 | * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com> |
23 | */ |
24 | |
25 | #include <linux/kthread.h> |
26 | #include <linux/module.h> |
27 | #include <linux/debugfs.h> |
28 | #include <linux/seq_file.h> |
29 | |
30 | /* Global control variables for rcupdate callback mechanism. */ |
31 | struct rcu_ctrlblk { |
32 | struct rcu_head *rcucblist; /* List of pending callbacks (CBs). */ |
33 | struct rcu_head **donetail; /* ->next pointer of last "done" CB. */ |
34 | struct rcu_head **curtail; /* ->next pointer of last CB. */ |
35 | RCU_TRACE(long qlen); /* Number of pending CBs. */ |
36 | RCU_TRACE(char *name); /* Name of RCU type. */ |
37 | }; |
38 | |
39 | /* Definition for rcupdate control block. */ |
40 | static struct rcu_ctrlblk rcu_sched_ctrlblk = { |
41 | .donetail = &rcu_sched_ctrlblk.rcucblist, |
42 | .curtail = &rcu_sched_ctrlblk.rcucblist, |
43 | RCU_TRACE(.name = "rcu_sched") |
44 | }; |
45 | |
46 | static struct rcu_ctrlblk rcu_bh_ctrlblk = { |
47 | .donetail = &rcu_bh_ctrlblk.rcucblist, |
48 | .curtail = &rcu_bh_ctrlblk.rcucblist, |
49 | RCU_TRACE(.name = "rcu_bh") |
50 | }; |
51 | |
52 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
53 | int rcu_scheduler_active __read_mostly; |
54 | EXPORT_SYMBOL_GPL(rcu_scheduler_active); |
55 | #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ |
56 | |
57 | #ifdef CONFIG_TINY_PREEMPT_RCU |
58 | |
59 | #include <linux/delay.h> |
60 | |
61 | /* Global control variables for preemptible RCU. */ |
62 | struct rcu_preempt_ctrlblk { |
63 | struct rcu_ctrlblk rcb; /* curtail: ->next ptr of last CB for GP. */ |
64 | struct rcu_head **nexttail; |
65 | /* Tasks blocked in a preemptible RCU */ |
66 | /* read-side critical section while an */ |
67 | /* preemptible-RCU grace period is in */ |
68 | /* progress must wait for a later grace */ |
69 | /* period. This pointer points to the */ |
70 | /* ->next pointer of the last task that */ |
71 | /* must wait for a later grace period, or */ |
72 | /* to &->rcb.rcucblist if there is no */ |
73 | /* such task. */ |
74 | struct list_head blkd_tasks; |
75 | /* Tasks blocked in RCU read-side critical */ |
76 | /* section. Tasks are placed at the head */ |
77 | /* of this list and age towards the tail. */ |
78 | struct list_head *gp_tasks; |
79 | /* Pointer to the first task blocking the */ |
80 | /* current grace period, or NULL if there */ |
81 | /* is no such task. */ |
82 | struct list_head *exp_tasks; |
83 | /* Pointer to first task blocking the */ |
84 | /* current expedited grace period, or NULL */ |
85 | /* if there is no such task. If there */ |
86 | /* is no current expedited grace period, */ |
87 | /* then there cannot be any such task. */ |
88 | #ifdef CONFIG_RCU_BOOST |
89 | struct list_head *boost_tasks; |
90 | /* Pointer to first task that needs to be */ |
91 | /* priority-boosted, or NULL if no priority */ |
92 | /* boosting is needed. If there is no */ |
93 | /* current or expedited grace period, there */ |
94 | /* can be no such task. */ |
95 | #endif /* #ifdef CONFIG_RCU_BOOST */ |
96 | u8 gpnum; /* Current grace period. */ |
97 | u8 gpcpu; /* Last grace period blocked by the CPU. */ |
98 | u8 completed; /* Last grace period completed. */ |
99 | /* If all three are equal, RCU is idle. */ |
100 | #ifdef CONFIG_RCU_BOOST |
101 | unsigned long boost_time; /* When to start boosting (jiffies) */ |
102 | #endif /* #ifdef CONFIG_RCU_BOOST */ |
103 | #ifdef CONFIG_RCU_TRACE |
104 | unsigned long n_grace_periods; |
105 | #ifdef CONFIG_RCU_BOOST |
106 | unsigned long n_tasks_boosted; |
107 | /* Total number of tasks boosted. */ |
108 | unsigned long n_exp_boosts; |
109 | /* Number of tasks boosted for expedited GP. */ |
110 | unsigned long n_normal_boosts; |
111 | /* Number of tasks boosted for normal GP. */ |
112 | unsigned long n_balk_blkd_tasks; |
113 | /* Refused to boost: no blocked tasks. */ |
114 | unsigned long n_balk_exp_gp_tasks; |
115 | /* Refused to boost: nothing blocking GP. */ |
116 | unsigned long n_balk_boost_tasks; |
117 | /* Refused to boost: already boosting. */ |
118 | unsigned long n_balk_notyet; |
119 | /* Refused to boost: not yet time. */ |
120 | unsigned long n_balk_nos; |
121 | /* Refused to boost: not sure why, though. */ |
122 | /* This can happen due to race conditions. */ |
123 | #endif /* #ifdef CONFIG_RCU_BOOST */ |
124 | #endif /* #ifdef CONFIG_RCU_TRACE */ |
125 | }; |
126 | |
127 | static struct rcu_preempt_ctrlblk rcu_preempt_ctrlblk = { |
128 | .rcb.donetail = &rcu_preempt_ctrlblk.rcb.rcucblist, |
129 | .rcb.curtail = &rcu_preempt_ctrlblk.rcb.rcucblist, |
130 | .nexttail = &rcu_preempt_ctrlblk.rcb.rcucblist, |
131 | .blkd_tasks = LIST_HEAD_INIT(rcu_preempt_ctrlblk.blkd_tasks), |
132 | RCU_TRACE(.rcb.name = "rcu_preempt") |
133 | }; |
134 | |
135 | static void rcu_read_unlock_special(struct task_struct *t); |
136 | static int rcu_preempted_readers_exp(void); |
137 | static void rcu_report_exp_done(void); |
138 | |
139 | /* |
140 | * Return true if the CPU has not yet responded to the current grace period. |
141 | */ |
142 | static int rcu_cpu_blocking_cur_gp(void) |
143 | { |
144 | return rcu_preempt_ctrlblk.gpcpu != rcu_preempt_ctrlblk.gpnum; |
145 | } |
146 | |
147 | /* |
148 | * Check for a running RCU reader. Because there is only one CPU, |
149 | * there can be but one running RCU reader at a time. ;-) |
150 | * |
151 | * Returns zero if there are no running readers. Returns a positive |
152 | * number if there is at least one reader within its RCU read-side |
153 | * critical section. Returns a negative number if an outermost reader |
154 | * is in the midst of exiting from its RCU read-side critical section |
155 | * |
156 | * Returns zero if there are no running readers. Returns a positive |
157 | * number if there is at least one reader within its RCU read-side |
158 | * critical section. Returns a negative number if an outermost reader |
159 | * is in the midst of exiting from its RCU read-side critical section. |
160 | */ |
161 | static int rcu_preempt_running_reader(void) |
162 | { |
163 | return current->rcu_read_lock_nesting; |
164 | } |
165 | |
166 | /* |
167 | * Check for preempted RCU readers blocking any grace period. |
168 | * If the caller needs a reliable answer, it must disable hard irqs. |
169 | */ |
170 | static int rcu_preempt_blocked_readers_any(void) |
171 | { |
172 | return !list_empty(&rcu_preempt_ctrlblk.blkd_tasks); |
173 | } |
174 | |
175 | /* |
176 | * Check for preempted RCU readers blocking the current grace period. |
177 | * If the caller needs a reliable answer, it must disable hard irqs. |
178 | */ |
179 | static int rcu_preempt_blocked_readers_cgp(void) |
180 | { |
181 | return rcu_preempt_ctrlblk.gp_tasks != NULL; |
182 | } |
183 | |
184 | /* |
185 | * Return true if another preemptible-RCU grace period is needed. |
186 | */ |
187 | static int rcu_preempt_needs_another_gp(void) |
188 | { |
189 | return *rcu_preempt_ctrlblk.rcb.curtail != NULL; |
190 | } |
191 | |
192 | /* |
193 | * Return true if a preemptible-RCU grace period is in progress. |
194 | * The caller must disable hardirqs. |
195 | */ |
196 | static int rcu_preempt_gp_in_progress(void) |
197 | { |
198 | return rcu_preempt_ctrlblk.completed != rcu_preempt_ctrlblk.gpnum; |
199 | } |
200 | |
201 | /* |
202 | * Advance a ->blkd_tasks-list pointer to the next entry, instead |
203 | * returning NULL if at the end of the list. |
204 | */ |
205 | static struct list_head *rcu_next_node_entry(struct task_struct *t) |
206 | { |
207 | struct list_head *np; |
208 | |
209 | np = t->rcu_node_entry.next; |
210 | if (np == &rcu_preempt_ctrlblk.blkd_tasks) |
211 | np = NULL; |
212 | return np; |
213 | } |
214 | |
215 | #ifdef CONFIG_RCU_TRACE |
216 | |
217 | #ifdef CONFIG_RCU_BOOST |
218 | static void rcu_initiate_boost_trace(void); |
219 | #endif /* #ifdef CONFIG_RCU_BOOST */ |
220 | |
221 | /* |
222 | * Dump additional statistice for TINY_PREEMPT_RCU. |
223 | */ |
224 | static void show_tiny_preempt_stats(struct seq_file *m) |
225 | { |
226 | seq_printf(m, "rcu_preempt: qlen=%ld gp=%lu g%u/p%u/c%u tasks=%c%c%c\n", |
227 | rcu_preempt_ctrlblk.rcb.qlen, |
228 | rcu_preempt_ctrlblk.n_grace_periods, |
229 | rcu_preempt_ctrlblk.gpnum, |
230 | rcu_preempt_ctrlblk.gpcpu, |
231 | rcu_preempt_ctrlblk.completed, |
232 | "T."[list_empty(&rcu_preempt_ctrlblk.blkd_tasks)], |
233 | "N."[!rcu_preempt_ctrlblk.gp_tasks], |
234 | "E."[!rcu_preempt_ctrlblk.exp_tasks]); |
235 | #ifdef CONFIG_RCU_BOOST |
236 | seq_printf(m, "%sttb=%c ntb=%lu neb=%lu nnb=%lu j=%04x bt=%04x\n", |
237 | " ", |
238 | "B."[!rcu_preempt_ctrlblk.boost_tasks], |
239 | rcu_preempt_ctrlblk.n_tasks_boosted, |
240 | rcu_preempt_ctrlblk.n_exp_boosts, |
241 | rcu_preempt_ctrlblk.n_normal_boosts, |
242 | (int)(jiffies & 0xffff), |
243 | (int)(rcu_preempt_ctrlblk.boost_time & 0xffff)); |
244 | seq_printf(m, "%s: nt=%lu egt=%lu bt=%lu ny=%lu nos=%lu\n", |
245 | " balk", |
246 | rcu_preempt_ctrlblk.n_balk_blkd_tasks, |
247 | rcu_preempt_ctrlblk.n_balk_exp_gp_tasks, |
248 | rcu_preempt_ctrlblk.n_balk_boost_tasks, |
249 | rcu_preempt_ctrlblk.n_balk_notyet, |
250 | rcu_preempt_ctrlblk.n_balk_nos); |
251 | #endif /* #ifdef CONFIG_RCU_BOOST */ |
252 | } |
253 | |
254 | #endif /* #ifdef CONFIG_RCU_TRACE */ |
255 | |
256 | #ifdef CONFIG_RCU_BOOST |
257 | |
258 | #include "rtmutex_common.h" |
259 | |
260 | #define RCU_BOOST_PRIO CONFIG_RCU_BOOST_PRIO |
261 | |
262 | /* Controls for rcu_kthread() kthread. */ |
263 | static struct task_struct *rcu_kthread_task; |
264 | static DECLARE_WAIT_QUEUE_HEAD(rcu_kthread_wq); |
265 | static unsigned long have_rcu_kthread_work; |
266 | |
267 | /* |
268 | * Carry out RCU priority boosting on the task indicated by ->boost_tasks, |
269 | * and advance ->boost_tasks to the next task in the ->blkd_tasks list. |
270 | */ |
271 | static int rcu_boost(void) |
272 | { |
273 | unsigned long flags; |
274 | struct rt_mutex mtx; |
275 | struct task_struct *t; |
276 | struct list_head *tb; |
277 | |
278 | if (rcu_preempt_ctrlblk.boost_tasks == NULL && |
279 | rcu_preempt_ctrlblk.exp_tasks == NULL) |
280 | return 0; /* Nothing to boost. */ |
281 | |
282 | raw_local_irq_save(flags); |
283 | |
284 | /* |
285 | * Recheck with irqs disabled: all tasks in need of boosting |
286 | * might exit their RCU read-side critical sections on their own |
287 | * if we are preempted just before disabling irqs. |
288 | */ |
289 | if (rcu_preempt_ctrlblk.boost_tasks == NULL && |
290 | rcu_preempt_ctrlblk.exp_tasks == NULL) { |
291 | raw_local_irq_restore(flags); |
292 | return 0; |
293 | } |
294 | |
295 | /* |
296 | * Preferentially boost tasks blocking expedited grace periods. |
297 | * This cannot starve the normal grace periods because a second |
298 | * expedited grace period must boost all blocked tasks, including |
299 | * those blocking the pre-existing normal grace period. |
300 | */ |
301 | if (rcu_preempt_ctrlblk.exp_tasks != NULL) { |
302 | tb = rcu_preempt_ctrlblk.exp_tasks; |
303 | RCU_TRACE(rcu_preempt_ctrlblk.n_exp_boosts++); |
304 | } else { |
305 | tb = rcu_preempt_ctrlblk.boost_tasks; |
306 | RCU_TRACE(rcu_preempt_ctrlblk.n_normal_boosts++); |
307 | } |
308 | RCU_TRACE(rcu_preempt_ctrlblk.n_tasks_boosted++); |
309 | |
310 | /* |
311 | * We boost task t by manufacturing an rt_mutex that appears to |
312 | * be held by task t. We leave a pointer to that rt_mutex where |
313 | * task t can find it, and task t will release the mutex when it |
314 | * exits its outermost RCU read-side critical section. Then |
315 | * simply acquiring this artificial rt_mutex will boost task |
316 | * t's priority. (Thanks to tglx for suggesting this approach!) |
317 | */ |
318 | t = container_of(tb, struct task_struct, rcu_node_entry); |
319 | rt_mutex_init_proxy_locked(&mtx, t); |
320 | t->rcu_boost_mutex = &mtx; |
321 | raw_local_irq_restore(flags); |
322 | rt_mutex_lock(&mtx); |
323 | rt_mutex_unlock(&mtx); /* Keep lockdep happy. */ |
324 | |
325 | return ACCESS_ONCE(rcu_preempt_ctrlblk.boost_tasks) != NULL || |
326 | ACCESS_ONCE(rcu_preempt_ctrlblk.exp_tasks) != NULL; |
327 | } |
328 | |
329 | /* |
330 | * Check to see if it is now time to start boosting RCU readers blocking |
331 | * the current grace period, and, if so, tell the rcu_kthread_task to |
332 | * start boosting them. If there is an expedited boost in progress, |
333 | * we wait for it to complete. |
334 | * |
335 | * If there are no blocked readers blocking the current grace period, |
336 | * return 0 to let the caller know, otherwise return 1. Note that this |
337 | * return value is independent of whether or not boosting was done. |
338 | */ |
339 | static int rcu_initiate_boost(void) |
340 | { |
341 | if (!rcu_preempt_blocked_readers_cgp() && |
342 | rcu_preempt_ctrlblk.exp_tasks == NULL) { |
343 | RCU_TRACE(rcu_preempt_ctrlblk.n_balk_exp_gp_tasks++); |
344 | return 0; |
345 | } |
346 | if (rcu_preempt_ctrlblk.exp_tasks != NULL || |
347 | (rcu_preempt_ctrlblk.gp_tasks != NULL && |
348 | rcu_preempt_ctrlblk.boost_tasks == NULL && |
349 | ULONG_CMP_GE(jiffies, rcu_preempt_ctrlblk.boost_time))) { |
350 | if (rcu_preempt_ctrlblk.exp_tasks == NULL) |
351 | rcu_preempt_ctrlblk.boost_tasks = |
352 | rcu_preempt_ctrlblk.gp_tasks; |
353 | invoke_rcu_callbacks(); |
354 | } else |
355 | RCU_TRACE(rcu_initiate_boost_trace()); |
356 | return 1; |
357 | } |
358 | |
359 | #define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000) |
360 | |
361 | /* |
362 | * Do priority-boost accounting for the start of a new grace period. |
363 | */ |
364 | static void rcu_preempt_boost_start_gp(void) |
365 | { |
366 | rcu_preempt_ctrlblk.boost_time = jiffies + RCU_BOOST_DELAY_JIFFIES; |
367 | } |
368 | |
369 | #else /* #ifdef CONFIG_RCU_BOOST */ |
370 | |
371 | /* |
372 | * If there is no RCU priority boosting, we don't initiate boosting, |
373 | * but we do indicate whether there are blocked readers blocking the |
374 | * current grace period. |
375 | */ |
376 | static int rcu_initiate_boost(void) |
377 | { |
378 | return rcu_preempt_blocked_readers_cgp(); |
379 | } |
380 | |
381 | /* |
382 | * If there is no RCU priority boosting, nothing to do at grace-period start. |
383 | */ |
384 | static void rcu_preempt_boost_start_gp(void) |
385 | { |
386 | } |
387 | |
388 | #endif /* else #ifdef CONFIG_RCU_BOOST */ |
389 | |
390 | /* |
391 | * Record a preemptible-RCU quiescent state for the specified CPU. Note |
392 | * that this just means that the task currently running on the CPU is |
393 | * in a quiescent state. There might be any number of tasks blocked |
394 | * while in an RCU read-side critical section. |
395 | * |
396 | * Unlike the other rcu_*_qs() functions, callers to this function |
397 | * must disable irqs in order to protect the assignment to |
398 | * ->rcu_read_unlock_special. |
399 | * |
400 | * Because this is a single-CPU implementation, the only way a grace |
401 | * period can end is if the CPU is in a quiescent state. The reason is |
402 | * that a blocked preemptible-RCU reader can exit its critical section |
403 | * only if the CPU is running it at the time. Therefore, when the |
404 | * last task blocking the current grace period exits its RCU read-side |
405 | * critical section, neither the CPU nor blocked tasks will be stopping |
406 | * the current grace period. (In contrast, SMP implementations |
407 | * might have CPUs running in RCU read-side critical sections that |
408 | * block later grace periods -- but this is not possible given only |
409 | * one CPU.) |
410 | */ |
411 | static void rcu_preempt_cpu_qs(void) |
412 | { |
413 | /* Record both CPU and task as having responded to current GP. */ |
414 | rcu_preempt_ctrlblk.gpcpu = rcu_preempt_ctrlblk.gpnum; |
415 | current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS; |
416 | |
417 | /* If there is no GP then there is nothing more to do. */ |
418 | if (!rcu_preempt_gp_in_progress()) |
419 | return; |
420 | /* |
421 | * Check up on boosting. If there are readers blocking the |
422 | * current grace period, leave. |
423 | */ |
424 | if (rcu_initiate_boost()) |
425 | return; |
426 | |
427 | /* Advance callbacks. */ |
428 | rcu_preempt_ctrlblk.completed = rcu_preempt_ctrlblk.gpnum; |
429 | rcu_preempt_ctrlblk.rcb.donetail = rcu_preempt_ctrlblk.rcb.curtail; |
430 | rcu_preempt_ctrlblk.rcb.curtail = rcu_preempt_ctrlblk.nexttail; |
431 | |
432 | /* If there are no blocked readers, next GP is done instantly. */ |
433 | if (!rcu_preempt_blocked_readers_any()) |
434 | rcu_preempt_ctrlblk.rcb.donetail = rcu_preempt_ctrlblk.nexttail; |
435 | |
436 | /* If there are done callbacks, cause them to be invoked. */ |
437 | if (*rcu_preempt_ctrlblk.rcb.donetail != NULL) |
438 | invoke_rcu_callbacks(); |
439 | } |
440 | |
441 | /* |
442 | * Start a new RCU grace period if warranted. Hard irqs must be disabled. |
443 | */ |
444 | static void rcu_preempt_start_gp(void) |
445 | { |
446 | if (!rcu_preempt_gp_in_progress() && rcu_preempt_needs_another_gp()) { |
447 | |
448 | /* Official start of GP. */ |
449 | rcu_preempt_ctrlblk.gpnum++; |
450 | RCU_TRACE(rcu_preempt_ctrlblk.n_grace_periods++); |
451 | |
452 | /* Any blocked RCU readers block new GP. */ |
453 | if (rcu_preempt_blocked_readers_any()) |
454 | rcu_preempt_ctrlblk.gp_tasks = |
455 | rcu_preempt_ctrlblk.blkd_tasks.next; |
456 | |
457 | /* Set up for RCU priority boosting. */ |
458 | rcu_preempt_boost_start_gp(); |
459 | |
460 | /* If there is no running reader, CPU is done with GP. */ |
461 | if (!rcu_preempt_running_reader()) |
462 | rcu_preempt_cpu_qs(); |
463 | } |
464 | } |
465 | |
466 | /* |
467 | * We have entered the scheduler, and the current task might soon be |
468 | * context-switched away from. If this task is in an RCU read-side |
469 | * critical section, we will no longer be able to rely on the CPU to |
470 | * record that fact, so we enqueue the task on the blkd_tasks list. |
471 | * If the task started after the current grace period began, as recorded |
472 | * by ->gpcpu, we enqueue at the beginning of the list. Otherwise |
473 | * before the element referenced by ->gp_tasks (or at the tail if |
474 | * ->gp_tasks is NULL) and point ->gp_tasks at the newly added element. |
475 | * The task will dequeue itself when it exits the outermost enclosing |
476 | * RCU read-side critical section. Therefore, the current grace period |
477 | * cannot be permitted to complete until the ->gp_tasks pointer becomes |
478 | * NULL. |
479 | * |
480 | * Caller must disable preemption. |
481 | */ |
482 | void rcu_preempt_note_context_switch(void) |
483 | { |
484 | struct task_struct *t = current; |
485 | unsigned long flags; |
486 | |
487 | local_irq_save(flags); /* must exclude scheduler_tick(). */ |
488 | if (rcu_preempt_running_reader() > 0 && |
489 | (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) { |
490 | |
491 | /* Possibly blocking in an RCU read-side critical section. */ |
492 | t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED; |
493 | |
494 | /* |
495 | * If this CPU has already checked in, then this task |
496 | * will hold up the next grace period rather than the |
497 | * current grace period. Queue the task accordingly. |
498 | * If the task is queued for the current grace period |
499 | * (i.e., this CPU has not yet passed through a quiescent |
500 | * state for the current grace period), then as long |
501 | * as that task remains queued, the current grace period |
502 | * cannot end. |
503 | */ |
504 | list_add(&t->rcu_node_entry, &rcu_preempt_ctrlblk.blkd_tasks); |
505 | if (rcu_cpu_blocking_cur_gp()) |
506 | rcu_preempt_ctrlblk.gp_tasks = &t->rcu_node_entry; |
507 | } else if (rcu_preempt_running_reader() < 0 && |
508 | t->rcu_read_unlock_special) { |
509 | /* |
510 | * Complete exit from RCU read-side critical section on |
511 | * behalf of preempted instance of __rcu_read_unlock(). |
512 | */ |
513 | rcu_read_unlock_special(t); |
514 | } |
515 | |
516 | /* |
517 | * Either we were not in an RCU read-side critical section to |
518 | * begin with, or we have now recorded that critical section |
519 | * globally. Either way, we can now note a quiescent state |
520 | * for this CPU. Again, if we were in an RCU read-side critical |
521 | * section, and if that critical section was blocking the current |
522 | * grace period, then the fact that the task has been enqueued |
523 | * means that current grace period continues to be blocked. |
524 | */ |
525 | rcu_preempt_cpu_qs(); |
526 | local_irq_restore(flags); |
527 | } |
528 | |
529 | /* |
530 | * Tiny-preemptible RCU implementation for rcu_read_lock(). |
531 | * Just increment ->rcu_read_lock_nesting, shared state will be updated |
532 | * if we block. |
533 | */ |
534 | void __rcu_read_lock(void) |
535 | { |
536 | current->rcu_read_lock_nesting++; |
537 | barrier(); /* needed if we ever invoke rcu_read_lock in rcutiny.c */ |
538 | } |
539 | EXPORT_SYMBOL_GPL(__rcu_read_lock); |
540 | |
541 | /* |
542 | * Handle special cases during rcu_read_unlock(), such as needing to |
543 | * notify RCU core processing or task having blocked during the RCU |
544 | * read-side critical section. |
545 | */ |
546 | static noinline void rcu_read_unlock_special(struct task_struct *t) |
547 | { |
548 | int empty; |
549 | int empty_exp; |
550 | unsigned long flags; |
551 | struct list_head *np; |
552 | #ifdef CONFIG_RCU_BOOST |
553 | struct rt_mutex *rbmp = NULL; |
554 | #endif /* #ifdef CONFIG_RCU_BOOST */ |
555 | int special; |
556 | |
557 | /* |
558 | * NMI handlers cannot block and cannot safely manipulate state. |
559 | * They therefore cannot possibly be special, so just leave. |
560 | */ |
561 | if (in_nmi()) |
562 | return; |
563 | |
564 | local_irq_save(flags); |
565 | |
566 | /* |
567 | * If RCU core is waiting for this CPU to exit critical section, |
568 | * let it know that we have done so. |
569 | */ |
570 | special = t->rcu_read_unlock_special; |
571 | if (special & RCU_READ_UNLOCK_NEED_QS) |
572 | rcu_preempt_cpu_qs(); |
573 | |
574 | /* Hardware IRQ handlers cannot block. */ |
575 | if (in_irq() || in_serving_softirq()) { |
576 | local_irq_restore(flags); |
577 | return; |
578 | } |
579 | |
580 | /* Clean up if blocked during RCU read-side critical section. */ |
581 | if (special & RCU_READ_UNLOCK_BLOCKED) { |
582 | t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BLOCKED; |
583 | |
584 | /* |
585 | * Remove this task from the ->blkd_tasks list and adjust |
586 | * any pointers that might have been referencing it. |
587 | */ |
588 | empty = !rcu_preempt_blocked_readers_cgp(); |
589 | empty_exp = rcu_preempt_ctrlblk.exp_tasks == NULL; |
590 | np = rcu_next_node_entry(t); |
591 | list_del_init(&t->rcu_node_entry); |
592 | if (&t->rcu_node_entry == rcu_preempt_ctrlblk.gp_tasks) |
593 | rcu_preempt_ctrlblk.gp_tasks = np; |
594 | if (&t->rcu_node_entry == rcu_preempt_ctrlblk.exp_tasks) |
595 | rcu_preempt_ctrlblk.exp_tasks = np; |
596 | #ifdef CONFIG_RCU_BOOST |
597 | if (&t->rcu_node_entry == rcu_preempt_ctrlblk.boost_tasks) |
598 | rcu_preempt_ctrlblk.boost_tasks = np; |
599 | #endif /* #ifdef CONFIG_RCU_BOOST */ |
600 | |
601 | /* |
602 | * If this was the last task on the current list, and if |
603 | * we aren't waiting on the CPU, report the quiescent state |
604 | * and start a new grace period if needed. |
605 | */ |
606 | if (!empty && !rcu_preempt_blocked_readers_cgp()) { |
607 | rcu_preempt_cpu_qs(); |
608 | rcu_preempt_start_gp(); |
609 | } |
610 | |
611 | /* |
612 | * If this was the last task on the expedited lists, |
613 | * then we need wake up the waiting task. |
614 | */ |
615 | if (!empty_exp && rcu_preempt_ctrlblk.exp_tasks == NULL) |
616 | rcu_report_exp_done(); |
617 | } |
618 | #ifdef CONFIG_RCU_BOOST |
619 | /* Unboost self if was boosted. */ |
620 | if (t->rcu_boost_mutex != NULL) { |
621 | rbmp = t->rcu_boost_mutex; |
622 | t->rcu_boost_mutex = NULL; |
623 | rt_mutex_unlock(rbmp); |
624 | } |
625 | #endif /* #ifdef CONFIG_RCU_BOOST */ |
626 | local_irq_restore(flags); |
627 | } |
628 | |
629 | /* |
630 | * Tiny-preemptible RCU implementation for rcu_read_unlock(). |
631 | * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost |
632 | * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then |
633 | * invoke rcu_read_unlock_special() to clean up after a context switch |
634 | * in an RCU read-side critical section and other special cases. |
635 | */ |
636 | void __rcu_read_unlock(void) |
637 | { |
638 | struct task_struct *t = current; |
639 | |
640 | barrier(); /* needed if we ever invoke rcu_read_unlock in rcutiny.c */ |
641 | if (t->rcu_read_lock_nesting != 1) |
642 | --t->rcu_read_lock_nesting; |
643 | else { |
644 | t->rcu_read_lock_nesting = INT_MIN; |
645 | barrier(); /* assign before ->rcu_read_unlock_special load */ |
646 | if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special))) |
647 | rcu_read_unlock_special(t); |
648 | barrier(); /* ->rcu_read_unlock_special load before assign */ |
649 | t->rcu_read_lock_nesting = 0; |
650 | } |
651 | #ifdef CONFIG_PROVE_LOCKING |
652 | { |
653 | int rrln = ACCESS_ONCE(t->rcu_read_lock_nesting); |
654 | |
655 | WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2); |
656 | } |
657 | #endif /* #ifdef CONFIG_PROVE_LOCKING */ |
658 | } |
659 | EXPORT_SYMBOL_GPL(__rcu_read_unlock); |
660 | |
661 | /* |
662 | * Check for a quiescent state from the current CPU. When a task blocks, |
663 | * the task is recorded in the rcu_preempt_ctrlblk structure, which is |
664 | * checked elsewhere. This is called from the scheduling-clock interrupt. |
665 | * |
666 | * Caller must disable hard irqs. |
667 | */ |
668 | static void rcu_preempt_check_callbacks(void) |
669 | { |
670 | struct task_struct *t = current; |
671 | |
672 | if (rcu_preempt_gp_in_progress() && |
673 | (!rcu_preempt_running_reader() || |
674 | !rcu_cpu_blocking_cur_gp())) |
675 | rcu_preempt_cpu_qs(); |
676 | if (&rcu_preempt_ctrlblk.rcb.rcucblist != |
677 | rcu_preempt_ctrlblk.rcb.donetail) |
678 | invoke_rcu_callbacks(); |
679 | if (rcu_preempt_gp_in_progress() && |
680 | rcu_cpu_blocking_cur_gp() && |
681 | rcu_preempt_running_reader() > 0) |
682 | t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS; |
683 | } |
684 | |
685 | /* |
686 | * TINY_PREEMPT_RCU has an extra callback-list tail pointer to |
687 | * update, so this is invoked from rcu_process_callbacks() to |
688 | * handle that case. Of course, it is invoked for all flavors of |
689 | * RCU, but RCU callbacks can appear only on one of the lists, and |
690 | * neither ->nexttail nor ->donetail can possibly be NULL, so there |
691 | * is no need for an explicit check. |
692 | */ |
693 | static void rcu_preempt_remove_callbacks(struct rcu_ctrlblk *rcp) |
694 | { |
695 | if (rcu_preempt_ctrlblk.nexttail == rcp->donetail) |
696 | rcu_preempt_ctrlblk.nexttail = &rcp->rcucblist; |
697 | } |
698 | |
699 | /* |
700 | * Process callbacks for preemptible RCU. |
701 | */ |
702 | static void rcu_preempt_process_callbacks(void) |
703 | { |
704 | __rcu_process_callbacks(&rcu_preempt_ctrlblk.rcb); |
705 | } |
706 | |
707 | /* |
708 | * Queue a preemptible -RCU callback for invocation after a grace period. |
709 | */ |
710 | void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) |
711 | { |
712 | unsigned long flags; |
713 | |
714 | debug_rcu_head_queue(head); |
715 | head->func = func; |
716 | head->next = NULL; |
717 | |
718 | local_irq_save(flags); |
719 | *rcu_preempt_ctrlblk.nexttail = head; |
720 | rcu_preempt_ctrlblk.nexttail = &head->next; |
721 | RCU_TRACE(rcu_preempt_ctrlblk.rcb.qlen++); |
722 | rcu_preempt_start_gp(); /* checks to see if GP needed. */ |
723 | local_irq_restore(flags); |
724 | } |
725 | EXPORT_SYMBOL_GPL(call_rcu); |
726 | |
727 | /* |
728 | * synchronize_rcu - wait until a grace period has elapsed. |
729 | * |
730 | * Control will return to the caller some time after a full grace |
731 | * period has elapsed, in other words after all currently executing RCU |
732 | * read-side critical sections have completed. RCU read-side critical |
733 | * sections are delimited by rcu_read_lock() and rcu_read_unlock(), |
734 | * and may be nested. |
735 | */ |
736 | void synchronize_rcu(void) |
737 | { |
738 | rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) && |
739 | !lock_is_held(&rcu_lock_map) && |
740 | !lock_is_held(&rcu_sched_lock_map), |
741 | "Illegal synchronize_rcu() in RCU read-side critical section"); |
742 | |
743 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
744 | if (!rcu_scheduler_active) |
745 | return; |
746 | #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ |
747 | |
748 | WARN_ON_ONCE(rcu_preempt_running_reader()); |
749 | if (!rcu_preempt_blocked_readers_any()) |
750 | return; |
751 | |
752 | /* Once we get past the fastpath checks, same code as rcu_barrier(). */ |
753 | rcu_barrier(); |
754 | } |
755 | EXPORT_SYMBOL_GPL(synchronize_rcu); |
756 | |
757 | static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq); |
758 | static unsigned long sync_rcu_preempt_exp_count; |
759 | static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex); |
760 | |
761 | /* |
762 | * Return non-zero if there are any tasks in RCU read-side critical |
763 | * sections blocking the current preemptible-RCU expedited grace period. |
764 | * If there is no preemptible-RCU expedited grace period currently in |
765 | * progress, returns zero unconditionally. |
766 | */ |
767 | static int rcu_preempted_readers_exp(void) |
768 | { |
769 | return rcu_preempt_ctrlblk.exp_tasks != NULL; |
770 | } |
771 | |
772 | /* |
773 | * Report the exit from RCU read-side critical section for the last task |
774 | * that queued itself during or before the current expedited preemptible-RCU |
775 | * grace period. |
776 | */ |
777 | static void rcu_report_exp_done(void) |
778 | { |
779 | wake_up(&sync_rcu_preempt_exp_wq); |
780 | } |
781 | |
782 | /* |
783 | * Wait for an rcu-preempt grace period, but expedite it. The basic idea |
784 | * is to rely in the fact that there is but one CPU, and that it is |
785 | * illegal for a task to invoke synchronize_rcu_expedited() while in a |
786 | * preemptible-RCU read-side critical section. Therefore, any such |
787 | * critical sections must correspond to blocked tasks, which must therefore |
788 | * be on the ->blkd_tasks list. So just record the current head of the |
789 | * list in the ->exp_tasks pointer, and wait for all tasks including and |
790 | * after the task pointed to by ->exp_tasks to drain. |
791 | */ |
792 | void synchronize_rcu_expedited(void) |
793 | { |
794 | unsigned long flags; |
795 | struct rcu_preempt_ctrlblk *rpcp = &rcu_preempt_ctrlblk; |
796 | unsigned long snap; |
797 | |
798 | barrier(); /* ensure prior action seen before grace period. */ |
799 | |
800 | WARN_ON_ONCE(rcu_preempt_running_reader()); |
801 | |
802 | /* |
803 | * Acquire lock so that there is only one preemptible RCU grace |
804 | * period in flight. Of course, if someone does the expedited |
805 | * grace period for us while we are acquiring the lock, just leave. |
806 | */ |
807 | snap = sync_rcu_preempt_exp_count + 1; |
808 | mutex_lock(&sync_rcu_preempt_exp_mutex); |
809 | if (ULONG_CMP_LT(snap, sync_rcu_preempt_exp_count)) |
810 | goto unlock_mb_ret; /* Others did our work for us. */ |
811 | |
812 | local_irq_save(flags); |
813 | |
814 | /* |
815 | * All RCU readers have to already be on blkd_tasks because |
816 | * we cannot legally be executing in an RCU read-side critical |
817 | * section. |
818 | */ |
819 | |
820 | /* Snapshot current head of ->blkd_tasks list. */ |
821 | rpcp->exp_tasks = rpcp->blkd_tasks.next; |
822 | if (rpcp->exp_tasks == &rpcp->blkd_tasks) |
823 | rpcp->exp_tasks = NULL; |
824 | |
825 | /* Wait for tail of ->blkd_tasks list to drain. */ |
826 | if (!rcu_preempted_readers_exp()) |
827 | local_irq_restore(flags); |
828 | else { |
829 | rcu_initiate_boost(); |
830 | local_irq_restore(flags); |
831 | wait_event(sync_rcu_preempt_exp_wq, |
832 | !rcu_preempted_readers_exp()); |
833 | } |
834 | |
835 | /* Clean up and exit. */ |
836 | barrier(); /* ensure expedited GP seen before counter increment. */ |
837 | sync_rcu_preempt_exp_count++; |
838 | unlock_mb_ret: |
839 | mutex_unlock(&sync_rcu_preempt_exp_mutex); |
840 | barrier(); /* ensure subsequent action seen after grace period. */ |
841 | } |
842 | EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); |
843 | |
844 | /* |
845 | * Does preemptible RCU need the CPU to stay out of dynticks mode? |
846 | */ |
847 | int rcu_preempt_needs_cpu(void) |
848 | { |
849 | if (!rcu_preempt_running_reader()) |
850 | rcu_preempt_cpu_qs(); |
851 | return rcu_preempt_ctrlblk.rcb.rcucblist != NULL; |
852 | } |
853 | |
854 | /* |
855 | * Check for a task exiting while in a preemptible -RCU read-side |
856 | * critical section, clean up if so. No need to issue warnings, |
857 | * as debug_check_no_locks_held() already does this if lockdep |
858 | * is enabled. |
859 | */ |
860 | void exit_rcu(void) |
861 | { |
862 | struct task_struct *t = current; |
863 | |
864 | if (t->rcu_read_lock_nesting == 0) |
865 | return; |
866 | t->rcu_read_lock_nesting = 1; |
867 | __rcu_read_unlock(); |
868 | } |
869 | |
870 | #else /* #ifdef CONFIG_TINY_PREEMPT_RCU */ |
871 | |
872 | #ifdef CONFIG_RCU_TRACE |
873 | |
874 | /* |
875 | * Because preemptible RCU does not exist, it is not necessary to |
876 | * dump out its statistics. |
877 | */ |
878 | static void show_tiny_preempt_stats(struct seq_file *m) |
879 | { |
880 | } |
881 | |
882 | #endif /* #ifdef CONFIG_RCU_TRACE */ |
883 | |
884 | /* |
885 | * Because preemptible RCU does not exist, it never has any callbacks |
886 | * to check. |
887 | */ |
888 | static void rcu_preempt_check_callbacks(void) |
889 | { |
890 | } |
891 | |
892 | /* |
893 | * Because preemptible RCU does not exist, it never has any callbacks |
894 | * to remove. |
895 | */ |
896 | static void rcu_preempt_remove_callbacks(struct rcu_ctrlblk *rcp) |
897 | { |
898 | } |
899 | |
900 | /* |
901 | * Because preemptible RCU does not exist, it never has any callbacks |
902 | * to process. |
903 | */ |
904 | static void rcu_preempt_process_callbacks(void) |
905 | { |
906 | } |
907 | |
908 | #endif /* #else #ifdef CONFIG_TINY_PREEMPT_RCU */ |
909 | |
910 | #ifdef CONFIG_RCU_BOOST |
911 | |
912 | /* |
913 | * Wake up rcu_kthread() to process callbacks now eligible for invocation |
914 | * or to boost readers. |
915 | */ |
916 | static void invoke_rcu_callbacks(void) |
917 | { |
918 | have_rcu_kthread_work = 1; |
919 | if (rcu_kthread_task != NULL) |
920 | wake_up(&rcu_kthread_wq); |
921 | } |
922 | |
923 | #ifdef CONFIG_RCU_TRACE |
924 | |
925 | /* |
926 | * Is the current CPU running the RCU-callbacks kthread? |
927 | * Caller must have preemption disabled. |
928 | */ |
929 | static bool rcu_is_callbacks_kthread(void) |
930 | { |
931 | return rcu_kthread_task == current; |
932 | } |
933 | |
934 | #endif /* #ifdef CONFIG_RCU_TRACE */ |
935 | |
936 | /* |
937 | * This kthread invokes RCU callbacks whose grace periods have |
938 | * elapsed. It is awakened as needed, and takes the place of the |
939 | * RCU_SOFTIRQ that is used for this purpose when boosting is disabled. |
940 | * This is a kthread, but it is never stopped, at least not until |
941 | * the system goes down. |
942 | */ |
943 | static int rcu_kthread(void *arg) |
944 | { |
945 | unsigned long work; |
946 | unsigned long morework; |
947 | unsigned long flags; |
948 | |
949 | for (;;) { |
950 | wait_event_interruptible(rcu_kthread_wq, |
951 | have_rcu_kthread_work != 0); |
952 | morework = rcu_boost(); |
953 | local_irq_save(flags); |
954 | work = have_rcu_kthread_work; |
955 | have_rcu_kthread_work = morework; |
956 | local_irq_restore(flags); |
957 | if (work) |
958 | rcu_process_callbacks(NULL); |
959 | schedule_timeout_interruptible(1); /* Leave CPU for others. */ |
960 | } |
961 | |
962 | return 0; /* Not reached, but needed to shut gcc up. */ |
963 | } |
964 | |
965 | /* |
966 | * Spawn the kthread that invokes RCU callbacks. |
967 | */ |
968 | static int __init rcu_spawn_kthreads(void) |
969 | { |
970 | struct sched_param sp; |
971 | |
972 | rcu_kthread_task = kthread_run(rcu_kthread, NULL, "rcu_kthread"); |
973 | sp.sched_priority = RCU_BOOST_PRIO; |
974 | sched_setscheduler_nocheck(rcu_kthread_task, SCHED_FIFO, &sp); |
975 | return 0; |
976 | } |
977 | early_initcall(rcu_spawn_kthreads); |
978 | |
979 | #else /* #ifdef CONFIG_RCU_BOOST */ |
980 | |
981 | /* Hold off callback invocation until early_initcall() time. */ |
982 | static int rcu_scheduler_fully_active __read_mostly; |
983 | |
984 | /* |
985 | * Start up softirq processing of callbacks. |
986 | */ |
987 | void invoke_rcu_callbacks(void) |
988 | { |
989 | if (rcu_scheduler_fully_active) |
990 | raise_softirq(RCU_SOFTIRQ); |
991 | } |
992 | |
993 | #ifdef CONFIG_RCU_TRACE |
994 | |
995 | /* |
996 | * There is no callback kthread, so this thread is never it. |
997 | */ |
998 | static bool rcu_is_callbacks_kthread(void) |
999 | { |
1000 | return false; |
1001 | } |
1002 | |
1003 | #endif /* #ifdef CONFIG_RCU_TRACE */ |
1004 | |
1005 | static int __init rcu_scheduler_really_started(void) |
1006 | { |
1007 | rcu_scheduler_fully_active = 1; |
1008 | open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); |
1009 | raise_softirq(RCU_SOFTIRQ); /* Invoke any callbacks from early boot. */ |
1010 | return 0; |
1011 | } |
1012 | early_initcall(rcu_scheduler_really_started); |
1013 | |
1014 | #endif /* #else #ifdef CONFIG_RCU_BOOST */ |
1015 | |
1016 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
1017 | #include <linux/kernel_stat.h> |
1018 | |
1019 | /* |
1020 | * During boot, we forgive RCU lockdep issues. After this function is |
1021 | * invoked, we start taking RCU lockdep issues seriously. |
1022 | */ |
1023 | void __init rcu_scheduler_starting(void) |
1024 | { |
1025 | WARN_ON(nr_context_switches() > 0); |
1026 | rcu_scheduler_active = 1; |
1027 | } |
1028 | |
1029 | #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ |
1030 | |
1031 | #ifdef CONFIG_RCU_TRACE |
1032 | |
1033 | #ifdef CONFIG_RCU_BOOST |
1034 | |
1035 | static void rcu_initiate_boost_trace(void) |
1036 | { |
1037 | if (list_empty(&rcu_preempt_ctrlblk.blkd_tasks)) |
1038 | rcu_preempt_ctrlblk.n_balk_blkd_tasks++; |
1039 | else if (rcu_preempt_ctrlblk.gp_tasks == NULL && |
1040 | rcu_preempt_ctrlblk.exp_tasks == NULL) |
1041 | rcu_preempt_ctrlblk.n_balk_exp_gp_tasks++; |
1042 | else if (rcu_preempt_ctrlblk.boost_tasks != NULL) |
1043 | rcu_preempt_ctrlblk.n_balk_boost_tasks++; |
1044 | else if (!ULONG_CMP_GE(jiffies, rcu_preempt_ctrlblk.boost_time)) |
1045 | rcu_preempt_ctrlblk.n_balk_notyet++; |
1046 | else |
1047 | rcu_preempt_ctrlblk.n_balk_nos++; |
1048 | } |
1049 | |
1050 | #endif /* #ifdef CONFIG_RCU_BOOST */ |
1051 | |
1052 | static void rcu_trace_sub_qlen(struct rcu_ctrlblk *rcp, int n) |
1053 | { |
1054 | unsigned long flags; |
1055 | |
1056 | raw_local_irq_save(flags); |
1057 | rcp->qlen -= n; |
1058 | raw_local_irq_restore(flags); |
1059 | } |
1060 | |
1061 | /* |
1062 | * Dump statistics for TINY_RCU, such as they are. |
1063 | */ |
1064 | static int show_tiny_stats(struct seq_file *m, void *unused) |
1065 | { |
1066 | show_tiny_preempt_stats(m); |
1067 | seq_printf(m, "rcu_sched: qlen: %ld\n", rcu_sched_ctrlblk.qlen); |
1068 | seq_printf(m, "rcu_bh: qlen: %ld\n", rcu_bh_ctrlblk.qlen); |
1069 | return 0; |
1070 | } |
1071 | |
1072 | static int show_tiny_stats_open(struct inode *inode, struct file *file) |
1073 | { |
1074 | return single_open(file, show_tiny_stats, NULL); |
1075 | } |
1076 | |
1077 | static const struct file_operations show_tiny_stats_fops = { |
1078 | .owner = THIS_MODULE, |
1079 | .open = show_tiny_stats_open, |
1080 | .read = seq_read, |
1081 | .llseek = seq_lseek, |
1082 | .release = single_release, |
1083 | }; |
1084 | |
1085 | static struct dentry *rcudir; |
1086 | |
1087 | static int __init rcutiny_trace_init(void) |
1088 | { |
1089 | struct dentry *retval; |
1090 | |
1091 | rcudir = debugfs_create_dir("rcu", NULL); |
1092 | if (!rcudir) |
1093 | goto free_out; |
1094 | retval = debugfs_create_file("rcudata", 0444, rcudir, |
1095 | NULL, &show_tiny_stats_fops); |
1096 | if (!retval) |
1097 | goto free_out; |
1098 | return 0; |
1099 | free_out: |
1100 | debugfs_remove_recursive(rcudir); |
1101 | return 1; |
1102 | } |
1103 | |
1104 | static void __exit rcutiny_trace_cleanup(void) |
1105 | { |
1106 | debugfs_remove_recursive(rcudir); |
1107 | } |
1108 | |
1109 | module_init(rcutiny_trace_init); |
1110 | module_exit(rcutiny_trace_cleanup); |
1111 | |
1112 | MODULE_AUTHOR("Paul E. McKenney"); |
1113 | MODULE_DESCRIPTION("Read-Copy Update tracing for tiny implementation"); |
1114 | MODULE_LICENSE("GPL"); |
1115 | |
1116 | #endif /* #ifdef CONFIG_RCU_TRACE */ |
1117 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9