Root/
1 | /* |
2 | * Sleepable Read-Copy Update mechanism for mutual exclusion. |
3 | * |
4 | * This program is free software; you can redistribute it and/or modify |
5 | * it under the terms of the GNU General Public License as published by |
6 | * the Free Software Foundation; either version 2 of the License, or |
7 | * (at your option) any later version. |
8 | * |
9 | * This program is distributed in the hope that it will be useful, |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
12 | * GNU General Public License for more details. |
13 | * |
14 | * You should have received a copy of the GNU General Public License |
15 | * along with this program; if not, write to the Free Software |
16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
17 | * |
18 | * Copyright (C) IBM Corporation, 2006 |
19 | * Copyright (C) Fujitsu, 2012 |
20 | * |
21 | * Author: Paul McKenney <paulmck@us.ibm.com> |
22 | * Lai Jiangshan <laijs@cn.fujitsu.com> |
23 | * |
24 | * For detailed explanation of Read-Copy Update mechanism see - |
25 | * Documentation/RCU/ *.txt |
26 | * |
27 | */ |
28 | |
29 | #include <linux/export.h> |
30 | #include <linux/mutex.h> |
31 | #include <linux/percpu.h> |
32 | #include <linux/preempt.h> |
33 | #include <linux/rcupdate.h> |
34 | #include <linux/sched.h> |
35 | #include <linux/smp.h> |
36 | #include <linux/delay.h> |
37 | #include <linux/srcu.h> |
38 | |
39 | #include <trace/events/rcu.h> |
40 | |
41 | #include "rcu.h" |
42 | |
43 | /* |
44 | * Initialize an rcu_batch structure to empty. |
45 | */ |
46 | static inline void rcu_batch_init(struct rcu_batch *b) |
47 | { |
48 | b->head = NULL; |
49 | b->tail = &b->head; |
50 | } |
51 | |
52 | /* |
53 | * Enqueue a callback onto the tail of the specified rcu_batch structure. |
54 | */ |
55 | static inline void rcu_batch_queue(struct rcu_batch *b, struct rcu_head *head) |
56 | { |
57 | *b->tail = head; |
58 | b->tail = &head->next; |
59 | } |
60 | |
61 | /* |
62 | * Is the specified rcu_batch structure empty? |
63 | */ |
64 | static inline bool rcu_batch_empty(struct rcu_batch *b) |
65 | { |
66 | return b->tail == &b->head; |
67 | } |
68 | |
69 | /* |
70 | * Remove the callback at the head of the specified rcu_batch structure |
71 | * and return a pointer to it, or return NULL if the structure is empty. |
72 | */ |
73 | static inline struct rcu_head *rcu_batch_dequeue(struct rcu_batch *b) |
74 | { |
75 | struct rcu_head *head; |
76 | |
77 | if (rcu_batch_empty(b)) |
78 | return NULL; |
79 | |
80 | head = b->head; |
81 | b->head = head->next; |
82 | if (b->tail == &head->next) |
83 | rcu_batch_init(b); |
84 | |
85 | return head; |
86 | } |
87 | |
88 | /* |
89 | * Move all callbacks from the rcu_batch structure specified by "from" to |
90 | * the structure specified by "to". |
91 | */ |
92 | static inline void rcu_batch_move(struct rcu_batch *to, struct rcu_batch *from) |
93 | { |
94 | if (!rcu_batch_empty(from)) { |
95 | *to->tail = from->head; |
96 | to->tail = from->tail; |
97 | rcu_batch_init(from); |
98 | } |
99 | } |
100 | |
101 | static int init_srcu_struct_fields(struct srcu_struct *sp) |
102 | { |
103 | sp->completed = 0; |
104 | spin_lock_init(&sp->queue_lock); |
105 | sp->running = false; |
106 | rcu_batch_init(&sp->batch_queue); |
107 | rcu_batch_init(&sp->batch_check0); |
108 | rcu_batch_init(&sp->batch_check1); |
109 | rcu_batch_init(&sp->batch_done); |
110 | INIT_DELAYED_WORK(&sp->work, process_srcu); |
111 | sp->per_cpu_ref = alloc_percpu(struct srcu_struct_array); |
112 | return sp->per_cpu_ref ? 0 : -ENOMEM; |
113 | } |
114 | |
115 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
116 | |
117 | int __init_srcu_struct(struct srcu_struct *sp, const char *name, |
118 | struct lock_class_key *key) |
119 | { |
120 | /* Don't re-initialize a lock while it is held. */ |
121 | debug_check_no_locks_freed((void *)sp, sizeof(*sp)); |
122 | lockdep_init_map(&sp->dep_map, name, key, 0); |
123 | return init_srcu_struct_fields(sp); |
124 | } |
125 | EXPORT_SYMBOL_GPL(__init_srcu_struct); |
126 | |
127 | #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ |
128 | |
129 | /** |
130 | * init_srcu_struct - initialize a sleep-RCU structure |
131 | * @sp: structure to initialize. |
132 | * |
133 | * Must invoke this on a given srcu_struct before passing that srcu_struct |
134 | * to any other function. Each srcu_struct represents a separate domain |
135 | * of SRCU protection. |
136 | */ |
137 | int init_srcu_struct(struct srcu_struct *sp) |
138 | { |
139 | return init_srcu_struct_fields(sp); |
140 | } |
141 | EXPORT_SYMBOL_GPL(init_srcu_struct); |
142 | |
143 | #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ |
144 | |
145 | /* |
146 | * Returns approximate total of the readers' ->seq[] values for the |
147 | * rank of per-CPU counters specified by idx. |
148 | */ |
149 | static unsigned long srcu_readers_seq_idx(struct srcu_struct *sp, int idx) |
150 | { |
151 | int cpu; |
152 | unsigned long sum = 0; |
153 | unsigned long t; |
154 | |
155 | for_each_possible_cpu(cpu) { |
156 | t = ACCESS_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->seq[idx]); |
157 | sum += t; |
158 | } |
159 | return sum; |
160 | } |
161 | |
162 | /* |
163 | * Returns approximate number of readers active on the specified rank |
164 | * of the per-CPU ->c[] counters. |
165 | */ |
166 | static unsigned long srcu_readers_active_idx(struct srcu_struct *sp, int idx) |
167 | { |
168 | int cpu; |
169 | unsigned long sum = 0; |
170 | unsigned long t; |
171 | |
172 | for_each_possible_cpu(cpu) { |
173 | t = ACCESS_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->c[idx]); |
174 | sum += t; |
175 | } |
176 | return sum; |
177 | } |
178 | |
179 | /* |
180 | * Return true if the number of pre-existing readers is determined to |
181 | * be stably zero. An example unstable zero can occur if the call |
182 | * to srcu_readers_active_idx() misses an __srcu_read_lock() increment, |
183 | * but due to task migration, sees the corresponding __srcu_read_unlock() |
184 | * decrement. This can happen because srcu_readers_active_idx() takes |
185 | * time to sum the array, and might in fact be interrupted or preempted |
186 | * partway through the summation. |
187 | */ |
188 | static bool srcu_readers_active_idx_check(struct srcu_struct *sp, int idx) |
189 | { |
190 | unsigned long seq; |
191 | |
192 | seq = srcu_readers_seq_idx(sp, idx); |
193 | |
194 | /* |
195 | * The following smp_mb() A pairs with the smp_mb() B located in |
196 | * __srcu_read_lock(). This pairing ensures that if an |
197 | * __srcu_read_lock() increments its counter after the summation |
198 | * in srcu_readers_active_idx(), then the corresponding SRCU read-side |
199 | * critical section will see any changes made prior to the start |
200 | * of the current SRCU grace period. |
201 | * |
202 | * Also, if the above call to srcu_readers_seq_idx() saw the |
203 | * increment of ->seq[], then the call to srcu_readers_active_idx() |
204 | * must see the increment of ->c[]. |
205 | */ |
206 | smp_mb(); /* A */ |
207 | |
208 | /* |
209 | * Note that srcu_readers_active_idx() can incorrectly return |
210 | * zero even though there is a pre-existing reader throughout. |
211 | * To see this, suppose that task A is in a very long SRCU |
212 | * read-side critical section that started on CPU 0, and that |
213 | * no other reader exists, so that the sum of the counters |
214 | * is equal to one. Then suppose that task B starts executing |
215 | * srcu_readers_active_idx(), summing up to CPU 1, and then that |
216 | * task C starts reading on CPU 0, so that its increment is not |
217 | * summed, but finishes reading on CPU 2, so that its decrement |
218 | * -is- summed. Then when task B completes its sum, it will |
219 | * incorrectly get zero, despite the fact that task A has been |
220 | * in its SRCU read-side critical section the whole time. |
221 | * |
222 | * We therefore do a validation step should srcu_readers_active_idx() |
223 | * return zero. |
224 | */ |
225 | if (srcu_readers_active_idx(sp, idx) != 0) |
226 | return false; |
227 | |
228 | /* |
229 | * The remainder of this function is the validation step. |
230 | * The following smp_mb() D pairs with the smp_mb() C in |
231 | * __srcu_read_unlock(). If the __srcu_read_unlock() was seen |
232 | * by srcu_readers_active_idx() above, then any destructive |
233 | * operation performed after the grace period will happen after |
234 | * the corresponding SRCU read-side critical section. |
235 | * |
236 | * Note that there can be at most NR_CPUS worth of readers using |
237 | * the old index, which is not enough to overflow even a 32-bit |
238 | * integer. (Yes, this does mean that systems having more than |
239 | * a billion or so CPUs need to be 64-bit systems.) Therefore, |
240 | * the sum of the ->seq[] counters cannot possibly overflow. |
241 | * Therefore, the only way that the return values of the two |
242 | * calls to srcu_readers_seq_idx() can be equal is if there were |
243 | * no increments of the corresponding rank of ->seq[] counts |
244 | * in the interim. But the missed-increment scenario laid out |
245 | * above includes an increment of the ->seq[] counter by |
246 | * the corresponding __srcu_read_lock(). Therefore, if this |
247 | * scenario occurs, the return values from the two calls to |
248 | * srcu_readers_seq_idx() will differ, and thus the validation |
249 | * step below suffices. |
250 | */ |
251 | smp_mb(); /* D */ |
252 | |
253 | return srcu_readers_seq_idx(sp, idx) == seq; |
254 | } |
255 | |
256 | /** |
257 | * srcu_readers_active - returns approximate number of readers. |
258 | * @sp: which srcu_struct to count active readers (holding srcu_read_lock). |
259 | * |
260 | * Note that this is not an atomic primitive, and can therefore suffer |
261 | * severe errors when invoked on an active srcu_struct. That said, it |
262 | * can be useful as an error check at cleanup time. |
263 | */ |
264 | static int srcu_readers_active(struct srcu_struct *sp) |
265 | { |
266 | int cpu; |
267 | unsigned long sum = 0; |
268 | |
269 | for_each_possible_cpu(cpu) { |
270 | sum += ACCESS_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->c[0]); |
271 | sum += ACCESS_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->c[1]); |
272 | } |
273 | return sum; |
274 | } |
275 | |
276 | /** |
277 | * cleanup_srcu_struct - deconstruct a sleep-RCU structure |
278 | * @sp: structure to clean up. |
279 | * |
280 | * Must invoke this after you are finished using a given srcu_struct that |
281 | * was initialized via init_srcu_struct(), else you leak memory. |
282 | */ |
283 | void cleanup_srcu_struct(struct srcu_struct *sp) |
284 | { |
285 | if (WARN_ON(srcu_readers_active(sp))) |
286 | return; /* Leakage unless caller handles error. */ |
287 | free_percpu(sp->per_cpu_ref); |
288 | sp->per_cpu_ref = NULL; |
289 | } |
290 | EXPORT_SYMBOL_GPL(cleanup_srcu_struct); |
291 | |
292 | /* |
293 | * Counts the new reader in the appropriate per-CPU element of the |
294 | * srcu_struct. Must be called from process context. |
295 | * Returns an index that must be passed to the matching srcu_read_unlock(). |
296 | */ |
297 | int __srcu_read_lock(struct srcu_struct *sp) |
298 | { |
299 | int idx; |
300 | |
301 | idx = ACCESS_ONCE(sp->completed) & 0x1; |
302 | preempt_disable(); |
303 | ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 1; |
304 | smp_mb(); /* B */ /* Avoid leaking the critical section. */ |
305 | ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->seq[idx]) += 1; |
306 | preempt_enable(); |
307 | return idx; |
308 | } |
309 | EXPORT_SYMBOL_GPL(__srcu_read_lock); |
310 | |
311 | /* |
312 | * Removes the count for the old reader from the appropriate per-CPU |
313 | * element of the srcu_struct. Note that this may well be a different |
314 | * CPU than that which was incremented by the corresponding srcu_read_lock(). |
315 | * Must be called from process context. |
316 | */ |
317 | void __srcu_read_unlock(struct srcu_struct *sp, int idx) |
318 | { |
319 | smp_mb(); /* C */ /* Avoid leaking the critical section. */ |
320 | this_cpu_dec(sp->per_cpu_ref->c[idx]); |
321 | } |
322 | EXPORT_SYMBOL_GPL(__srcu_read_unlock); |
323 | |
324 | /* |
325 | * We use an adaptive strategy for synchronize_srcu() and especially for |
326 | * synchronize_srcu_expedited(). We spin for a fixed time period |
327 | * (defined below) to allow SRCU readers to exit their read-side critical |
328 | * sections. If there are still some readers after 10 microseconds, |
329 | * we repeatedly block for 1-millisecond time periods. This approach |
330 | * has done well in testing, so there is no need for a config parameter. |
331 | */ |
332 | #define SRCU_RETRY_CHECK_DELAY 5 |
333 | #define SYNCHRONIZE_SRCU_TRYCOUNT 2 |
334 | #define SYNCHRONIZE_SRCU_EXP_TRYCOUNT 12 |
335 | |
336 | /* |
337 | * @@@ Wait until all pre-existing readers complete. Such readers |
338 | * will have used the index specified by "idx". |
339 | * the caller should ensures the ->completed is not changed while checking |
340 | * and idx = (->completed & 1) ^ 1 |
341 | */ |
342 | static bool try_check_zero(struct srcu_struct *sp, int idx, int trycount) |
343 | { |
344 | for (;;) { |
345 | if (srcu_readers_active_idx_check(sp, idx)) |
346 | return true; |
347 | if (--trycount <= 0) |
348 | return false; |
349 | udelay(SRCU_RETRY_CHECK_DELAY); |
350 | } |
351 | } |
352 | |
353 | /* |
354 | * Increment the ->completed counter so that future SRCU readers will |
355 | * use the other rank of the ->c[] and ->seq[] arrays. This allows |
356 | * us to wait for pre-existing readers in a starvation-free manner. |
357 | */ |
358 | static void srcu_flip(struct srcu_struct *sp) |
359 | { |
360 | sp->completed++; |
361 | } |
362 | |
363 | /* |
364 | * Enqueue an SRCU callback on the specified srcu_struct structure, |
365 | * initiating grace-period processing if it is not already running. |
366 | */ |
367 | void call_srcu(struct srcu_struct *sp, struct rcu_head *head, |
368 | void (*func)(struct rcu_head *head)) |
369 | { |
370 | unsigned long flags; |
371 | |
372 | head->next = NULL; |
373 | head->func = func; |
374 | spin_lock_irqsave(&sp->queue_lock, flags); |
375 | rcu_batch_queue(&sp->batch_queue, head); |
376 | if (!sp->running) { |
377 | sp->running = true; |
378 | schedule_delayed_work(&sp->work, 0); |
379 | } |
380 | spin_unlock_irqrestore(&sp->queue_lock, flags); |
381 | } |
382 | EXPORT_SYMBOL_GPL(call_srcu); |
383 | |
384 | struct rcu_synchronize { |
385 | struct rcu_head head; |
386 | struct completion completion; |
387 | }; |
388 | |
389 | /* |
390 | * Awaken the corresponding synchronize_srcu() instance now that a |
391 | * grace period has elapsed. |
392 | */ |
393 | static void wakeme_after_rcu(struct rcu_head *head) |
394 | { |
395 | struct rcu_synchronize *rcu; |
396 | |
397 | rcu = container_of(head, struct rcu_synchronize, head); |
398 | complete(&rcu->completion); |
399 | } |
400 | |
401 | static void srcu_advance_batches(struct srcu_struct *sp, int trycount); |
402 | static void srcu_reschedule(struct srcu_struct *sp); |
403 | |
404 | /* |
405 | * Helper function for synchronize_srcu() and synchronize_srcu_expedited(). |
406 | */ |
407 | static void __synchronize_srcu(struct srcu_struct *sp, int trycount) |
408 | { |
409 | struct rcu_synchronize rcu; |
410 | struct rcu_head *head = &rcu.head; |
411 | bool done = false; |
412 | |
413 | rcu_lockdep_assert(!lock_is_held(&sp->dep_map) && |
414 | !lock_is_held(&rcu_bh_lock_map) && |
415 | !lock_is_held(&rcu_lock_map) && |
416 | !lock_is_held(&rcu_sched_lock_map), |
417 | "Illegal synchronize_srcu() in same-type SRCU (or RCU) read-side critical section"); |
418 | |
419 | might_sleep(); |
420 | init_completion(&rcu.completion); |
421 | |
422 | head->next = NULL; |
423 | head->func = wakeme_after_rcu; |
424 | spin_lock_irq(&sp->queue_lock); |
425 | if (!sp->running) { |
426 | /* steal the processing owner */ |
427 | sp->running = true; |
428 | rcu_batch_queue(&sp->batch_check0, head); |
429 | spin_unlock_irq(&sp->queue_lock); |
430 | |
431 | srcu_advance_batches(sp, trycount); |
432 | if (!rcu_batch_empty(&sp->batch_done)) { |
433 | BUG_ON(sp->batch_done.head != head); |
434 | rcu_batch_dequeue(&sp->batch_done); |
435 | done = true; |
436 | } |
437 | /* give the processing owner to work_struct */ |
438 | srcu_reschedule(sp); |
439 | } else { |
440 | rcu_batch_queue(&sp->batch_queue, head); |
441 | spin_unlock_irq(&sp->queue_lock); |
442 | } |
443 | |
444 | if (!done) |
445 | wait_for_completion(&rcu.completion); |
446 | } |
447 | |
448 | /** |
449 | * synchronize_srcu - wait for prior SRCU read-side critical-section completion |
450 | * @sp: srcu_struct with which to synchronize. |
451 | * |
452 | * Wait for the count to drain to zero of both indexes. To avoid the |
453 | * possible starvation of synchronize_srcu(), it waits for the count of |
454 | * the index=((->completed & 1) ^ 1) to drain to zero at first, |
455 | * and then flip the completed and wait for the count of the other index. |
456 | * |
457 | * Can block; must be called from process context. |
458 | * |
459 | * Note that it is illegal to call synchronize_srcu() from the corresponding |
460 | * SRCU read-side critical section; doing so will result in deadlock. |
461 | * However, it is perfectly legal to call synchronize_srcu() on one |
462 | * srcu_struct from some other srcu_struct's read-side critical section. |
463 | */ |
464 | void synchronize_srcu(struct srcu_struct *sp) |
465 | { |
466 | __synchronize_srcu(sp, rcu_expedited |
467 | ? SYNCHRONIZE_SRCU_EXP_TRYCOUNT |
468 | : SYNCHRONIZE_SRCU_TRYCOUNT); |
469 | } |
470 | EXPORT_SYMBOL_GPL(synchronize_srcu); |
471 | |
472 | /** |
473 | * synchronize_srcu_expedited - Brute-force SRCU grace period |
474 | * @sp: srcu_struct with which to synchronize. |
475 | * |
476 | * Wait for an SRCU grace period to elapse, but be more aggressive about |
477 | * spinning rather than blocking when waiting. |
478 | * |
479 | * Note that it is also illegal to call synchronize_srcu_expedited() |
480 | * from the corresponding SRCU read-side critical section; |
481 | * doing so will result in deadlock. However, it is perfectly legal |
482 | * to call synchronize_srcu_expedited() on one srcu_struct from some |
483 | * other srcu_struct's read-side critical section, as long as |
484 | * the resulting graph of srcu_structs is acyclic. |
485 | */ |
486 | void synchronize_srcu_expedited(struct srcu_struct *sp) |
487 | { |
488 | __synchronize_srcu(sp, SYNCHRONIZE_SRCU_EXP_TRYCOUNT); |
489 | } |
490 | EXPORT_SYMBOL_GPL(synchronize_srcu_expedited); |
491 | |
492 | /** |
493 | * srcu_barrier - Wait until all in-flight call_srcu() callbacks complete. |
494 | */ |
495 | void srcu_barrier(struct srcu_struct *sp) |
496 | { |
497 | synchronize_srcu(sp); |
498 | } |
499 | EXPORT_SYMBOL_GPL(srcu_barrier); |
500 | |
501 | /** |
502 | * srcu_batches_completed - return batches completed. |
503 | * @sp: srcu_struct on which to report batch completion. |
504 | * |
505 | * Report the number of batches, correlated with, but not necessarily |
506 | * precisely the same as, the number of grace periods that have elapsed. |
507 | */ |
508 | long srcu_batches_completed(struct srcu_struct *sp) |
509 | { |
510 | return sp->completed; |
511 | } |
512 | EXPORT_SYMBOL_GPL(srcu_batches_completed); |
513 | |
514 | #define SRCU_CALLBACK_BATCH 10 |
515 | #define SRCU_INTERVAL 1 |
516 | |
517 | /* |
518 | * Move any new SRCU callbacks to the first stage of the SRCU grace |
519 | * period pipeline. |
520 | */ |
521 | static void srcu_collect_new(struct srcu_struct *sp) |
522 | { |
523 | if (!rcu_batch_empty(&sp->batch_queue)) { |
524 | spin_lock_irq(&sp->queue_lock); |
525 | rcu_batch_move(&sp->batch_check0, &sp->batch_queue); |
526 | spin_unlock_irq(&sp->queue_lock); |
527 | } |
528 | } |
529 | |
530 | /* |
531 | * Core SRCU state machine. Advance callbacks from ->batch_check0 to |
532 | * ->batch_check1 and then to ->batch_done as readers drain. |
533 | */ |
534 | static void srcu_advance_batches(struct srcu_struct *sp, int trycount) |
535 | { |
536 | int idx = 1 ^ (sp->completed & 1); |
537 | |
538 | /* |
539 | * Because readers might be delayed for an extended period after |
540 | * fetching ->completed for their index, at any point in time there |
541 | * might well be readers using both idx=0 and idx=1. We therefore |
542 | * need to wait for readers to clear from both index values before |
543 | * invoking a callback. |
544 | */ |
545 | |
546 | if (rcu_batch_empty(&sp->batch_check0) && |
547 | rcu_batch_empty(&sp->batch_check1)) |
548 | return; /* no callbacks need to be advanced */ |
549 | |
550 | if (!try_check_zero(sp, idx, trycount)) |
551 | return; /* failed to advance, will try after SRCU_INTERVAL */ |
552 | |
553 | /* |
554 | * The callbacks in ->batch_check1 have already done with their |
555 | * first zero check and flip back when they were enqueued on |
556 | * ->batch_check0 in a previous invocation of srcu_advance_batches(). |
557 | * (Presumably try_check_zero() returned false during that |
558 | * invocation, leaving the callbacks stranded on ->batch_check1.) |
559 | * They are therefore ready to invoke, so move them to ->batch_done. |
560 | */ |
561 | rcu_batch_move(&sp->batch_done, &sp->batch_check1); |
562 | |
563 | if (rcu_batch_empty(&sp->batch_check0)) |
564 | return; /* no callbacks need to be advanced */ |
565 | srcu_flip(sp); |
566 | |
567 | /* |
568 | * The callbacks in ->batch_check0 just finished their |
569 | * first check zero and flip, so move them to ->batch_check1 |
570 | * for future checking on the other idx. |
571 | */ |
572 | rcu_batch_move(&sp->batch_check1, &sp->batch_check0); |
573 | |
574 | /* |
575 | * SRCU read-side critical sections are normally short, so check |
576 | * at least twice in quick succession after a flip. |
577 | */ |
578 | trycount = trycount < 2 ? 2 : trycount; |
579 | if (!try_check_zero(sp, idx^1, trycount)) |
580 | return; /* failed to advance, will try after SRCU_INTERVAL */ |
581 | |
582 | /* |
583 | * The callbacks in ->batch_check1 have now waited for all |
584 | * pre-existing readers using both idx values. They are therefore |
585 | * ready to invoke, so move them to ->batch_done. |
586 | */ |
587 | rcu_batch_move(&sp->batch_done, &sp->batch_check1); |
588 | } |
589 | |
590 | /* |
591 | * Invoke a limited number of SRCU callbacks that have passed through |
592 | * their grace period. If there are more to do, SRCU will reschedule |
593 | * the workqueue. |
594 | */ |
595 | static void srcu_invoke_callbacks(struct srcu_struct *sp) |
596 | { |
597 | int i; |
598 | struct rcu_head *head; |
599 | |
600 | for (i = 0; i < SRCU_CALLBACK_BATCH; i++) { |
601 | head = rcu_batch_dequeue(&sp->batch_done); |
602 | if (!head) |
603 | break; |
604 | local_bh_disable(); |
605 | head->func(head); |
606 | local_bh_enable(); |
607 | } |
608 | } |
609 | |
610 | /* |
611 | * Finished one round of SRCU grace period. Start another if there are |
612 | * more SRCU callbacks queued, otherwise put SRCU into not-running state. |
613 | */ |
614 | static void srcu_reschedule(struct srcu_struct *sp) |
615 | { |
616 | bool pending = true; |
617 | |
618 | if (rcu_batch_empty(&sp->batch_done) && |
619 | rcu_batch_empty(&sp->batch_check1) && |
620 | rcu_batch_empty(&sp->batch_check0) && |
621 | rcu_batch_empty(&sp->batch_queue)) { |
622 | spin_lock_irq(&sp->queue_lock); |
623 | if (rcu_batch_empty(&sp->batch_done) && |
624 | rcu_batch_empty(&sp->batch_check1) && |
625 | rcu_batch_empty(&sp->batch_check0) && |
626 | rcu_batch_empty(&sp->batch_queue)) { |
627 | sp->running = false; |
628 | pending = false; |
629 | } |
630 | spin_unlock_irq(&sp->queue_lock); |
631 | } |
632 | |
633 | if (pending) |
634 | schedule_delayed_work(&sp->work, SRCU_INTERVAL); |
635 | } |
636 | |
637 | /* |
638 | * This is the work-queue function that handles SRCU grace periods. |
639 | */ |
640 | void process_srcu(struct work_struct *work) |
641 | { |
642 | struct srcu_struct *sp; |
643 | |
644 | sp = container_of(work, struct srcu_struct, work.work); |
645 | |
646 | srcu_collect_new(sp); |
647 | srcu_advance_batches(sp, 1); |
648 | srcu_invoke_callbacks(sp); |
649 | srcu_reschedule(sp); |
650 | } |
651 | EXPORT_SYMBOL_GPL(process_srcu); |
652 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9