Root/
Source at commit b05a5adf03613de371c77c3235f7d970d7cd0c71 created 13 years 1 month ago. By Lars-Peter Clausen, NAND: Optimize reading the eec data for the JZ4740 (evil hack) | |
---|---|
1 | /* |
2 | * Sleepable Read-Copy Update mechanism for mutual exclusion. |
3 | * |
4 | * This program is free software; you can redistribute it and/or modify |
5 | * it under the terms of the GNU General Public License as published by |
6 | * the Free Software Foundation; either version 2 of the License, or |
7 | * (at your option) any later version. |
8 | * |
9 | * This program is distributed in the hope that it will be useful, |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
12 | * GNU General Public License for more details. |
13 | * |
14 | * You should have received a copy of the GNU General Public License |
15 | * along with this program; if not, write to the Free Software |
16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
17 | * |
18 | * Copyright (C) IBM Corporation, 2006 |
19 | * |
20 | * Author: Paul McKenney <paulmck@us.ibm.com> |
21 | * |
22 | * For detailed explanation of Read-Copy Update mechanism see - |
23 | * Documentation/RCU/ *.txt |
24 | * |
25 | */ |
26 | |
27 | #include <linux/module.h> |
28 | #include <linux/mutex.h> |
29 | #include <linux/percpu.h> |
30 | #include <linux/preempt.h> |
31 | #include <linux/rcupdate.h> |
32 | #include <linux/sched.h> |
33 | #include <linux/smp.h> |
34 | #include <linux/delay.h> |
35 | #include <linux/srcu.h> |
36 | |
37 | static int init_srcu_struct_fields(struct srcu_struct *sp) |
38 | { |
39 | sp->completed = 0; |
40 | mutex_init(&sp->mutex); |
41 | sp->per_cpu_ref = alloc_percpu(struct srcu_struct_array); |
42 | return sp->per_cpu_ref ? 0 : -ENOMEM; |
43 | } |
44 | |
45 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
46 | |
47 | int __init_srcu_struct(struct srcu_struct *sp, const char *name, |
48 | struct lock_class_key *key) |
49 | { |
50 | /* Don't re-initialize a lock while it is held. */ |
51 | debug_check_no_locks_freed((void *)sp, sizeof(*sp)); |
52 | lockdep_init_map(&sp->dep_map, name, key, 0); |
53 | return init_srcu_struct_fields(sp); |
54 | } |
55 | EXPORT_SYMBOL_GPL(__init_srcu_struct); |
56 | |
57 | #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ |
58 | |
59 | /** |
60 | * init_srcu_struct - initialize a sleep-RCU structure |
61 | * @sp: structure to initialize. |
62 | * |
63 | * Must invoke this on a given srcu_struct before passing that srcu_struct |
64 | * to any other function. Each srcu_struct represents a separate domain |
65 | * of SRCU protection. |
66 | */ |
67 | int init_srcu_struct(struct srcu_struct *sp) |
68 | { |
69 | return init_srcu_struct_fields(sp); |
70 | } |
71 | EXPORT_SYMBOL_GPL(init_srcu_struct); |
72 | |
73 | #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ |
74 | |
75 | /* |
76 | * srcu_readers_active_idx -- returns approximate number of readers |
77 | * active on the specified rank of per-CPU counters. |
78 | */ |
79 | |
80 | static int srcu_readers_active_idx(struct srcu_struct *sp, int idx) |
81 | { |
82 | int cpu; |
83 | int sum; |
84 | |
85 | sum = 0; |
86 | for_each_possible_cpu(cpu) |
87 | sum += per_cpu_ptr(sp->per_cpu_ref, cpu)->c[idx]; |
88 | return sum; |
89 | } |
90 | |
91 | /** |
92 | * srcu_readers_active - returns approximate number of readers. |
93 | * @sp: which srcu_struct to count active readers (holding srcu_read_lock). |
94 | * |
95 | * Note that this is not an atomic primitive, and can therefore suffer |
96 | * severe errors when invoked on an active srcu_struct. That said, it |
97 | * can be useful as an error check at cleanup time. |
98 | */ |
99 | static int srcu_readers_active(struct srcu_struct *sp) |
100 | { |
101 | return srcu_readers_active_idx(sp, 0) + srcu_readers_active_idx(sp, 1); |
102 | } |
103 | |
104 | /** |
105 | * cleanup_srcu_struct - deconstruct a sleep-RCU structure |
106 | * @sp: structure to clean up. |
107 | * |
108 | * Must invoke this after you are finished using a given srcu_struct that |
109 | * was initialized via init_srcu_struct(), else you leak memory. |
110 | */ |
111 | void cleanup_srcu_struct(struct srcu_struct *sp) |
112 | { |
113 | int sum; |
114 | |
115 | sum = srcu_readers_active(sp); |
116 | WARN_ON(sum); /* Leakage unless caller handles error. */ |
117 | if (sum != 0) |
118 | return; |
119 | free_percpu(sp->per_cpu_ref); |
120 | sp->per_cpu_ref = NULL; |
121 | } |
122 | EXPORT_SYMBOL_GPL(cleanup_srcu_struct); |
123 | |
124 | /* |
125 | * Counts the new reader in the appropriate per-CPU element of the |
126 | * srcu_struct. Must be called from process context. |
127 | * Returns an index that must be passed to the matching srcu_read_unlock(). |
128 | */ |
129 | int __srcu_read_lock(struct srcu_struct *sp) |
130 | { |
131 | int idx; |
132 | |
133 | preempt_disable(); |
134 | idx = sp->completed & 0x1; |
135 | barrier(); /* ensure compiler looks -once- at sp->completed. */ |
136 | per_cpu_ptr(sp->per_cpu_ref, smp_processor_id())->c[idx]++; |
137 | srcu_barrier(); /* ensure compiler won't misorder critical section. */ |
138 | preempt_enable(); |
139 | return idx; |
140 | } |
141 | EXPORT_SYMBOL_GPL(__srcu_read_lock); |
142 | |
143 | /* |
144 | * Removes the count for the old reader from the appropriate per-CPU |
145 | * element of the srcu_struct. Note that this may well be a different |
146 | * CPU than that which was incremented by the corresponding srcu_read_lock(). |
147 | * Must be called from process context. |
148 | */ |
149 | void __srcu_read_unlock(struct srcu_struct *sp, int idx) |
150 | { |
151 | preempt_disable(); |
152 | srcu_barrier(); /* ensure compiler won't misorder critical section. */ |
153 | per_cpu_ptr(sp->per_cpu_ref, smp_processor_id())->c[idx]--; |
154 | preempt_enable(); |
155 | } |
156 | EXPORT_SYMBOL_GPL(__srcu_read_unlock); |
157 | |
158 | /* |
159 | * We use an adaptive strategy for synchronize_srcu() and especially for |
160 | * synchronize_srcu_expedited(). We spin for a fixed time period |
161 | * (defined below) to allow SRCU readers to exit their read-side critical |
162 | * sections. If there are still some readers after 10 microseconds, |
163 | * we repeatedly block for 1-millisecond time periods. This approach |
164 | * has done well in testing, so there is no need for a config parameter. |
165 | */ |
166 | #define SYNCHRONIZE_SRCU_READER_DELAY 10 |
167 | |
168 | /* |
169 | * Helper function for synchronize_srcu() and synchronize_srcu_expedited(). |
170 | */ |
171 | static void __synchronize_srcu(struct srcu_struct *sp, void (*sync_func)(void)) |
172 | { |
173 | int idx; |
174 | |
175 | idx = sp->completed; |
176 | mutex_lock(&sp->mutex); |
177 | |
178 | /* |
179 | * Check to see if someone else did the work for us while we were |
180 | * waiting to acquire the lock. We need -two- advances of |
181 | * the counter, not just one. If there was but one, we might have |
182 | * shown up -after- our helper's first synchronize_sched(), thus |
183 | * having failed to prevent CPU-reordering races with concurrent |
184 | * srcu_read_unlock()s on other CPUs (see comment below). So we |
185 | * either (1) wait for two or (2) supply the second ourselves. |
186 | */ |
187 | |
188 | if ((sp->completed - idx) >= 2) { |
189 | mutex_unlock(&sp->mutex); |
190 | return; |
191 | } |
192 | |
193 | sync_func(); /* Force memory barrier on all CPUs. */ |
194 | |
195 | /* |
196 | * The preceding synchronize_sched() ensures that any CPU that |
197 | * sees the new value of sp->completed will also see any preceding |
198 | * changes to data structures made by this CPU. This prevents |
199 | * some other CPU from reordering the accesses in its SRCU |
200 | * read-side critical section to precede the corresponding |
201 | * srcu_read_lock() -- ensuring that such references will in |
202 | * fact be protected. |
203 | * |
204 | * So it is now safe to do the flip. |
205 | */ |
206 | |
207 | idx = sp->completed & 0x1; |
208 | sp->completed++; |
209 | |
210 | sync_func(); /* Force memory barrier on all CPUs. */ |
211 | |
212 | /* |
213 | * At this point, because of the preceding synchronize_sched(), |
214 | * all srcu_read_lock() calls using the old counters have completed. |
215 | * Their corresponding critical sections might well be still |
216 | * executing, but the srcu_read_lock() primitives themselves |
217 | * will have finished executing. We initially give readers |
218 | * an arbitrarily chosen 10 microseconds to get out of their |
219 | * SRCU read-side critical sections, then loop waiting 1/HZ |
220 | * seconds per iteration. The 10-microsecond value has done |
221 | * very well in testing. |
222 | */ |
223 | |
224 | if (srcu_readers_active_idx(sp, idx)) |
225 | udelay(SYNCHRONIZE_SRCU_READER_DELAY); |
226 | while (srcu_readers_active_idx(sp, idx)) |
227 | schedule_timeout_interruptible(1); |
228 | |
229 | sync_func(); /* Force memory barrier on all CPUs. */ |
230 | |
231 | /* |
232 | * The preceding synchronize_sched() forces all srcu_read_unlock() |
233 | * primitives that were executing concurrently with the preceding |
234 | * for_each_possible_cpu() loop to have completed by this point. |
235 | * More importantly, it also forces the corresponding SRCU read-side |
236 | * critical sections to have also completed, and the corresponding |
237 | * references to SRCU-protected data items to be dropped. |
238 | * |
239 | * Note: |
240 | * |
241 | * Despite what you might think at first glance, the |
242 | * preceding synchronize_sched() -must- be within the |
243 | * critical section ended by the following mutex_unlock(). |
244 | * Otherwise, a task taking the early exit can race |
245 | * with a srcu_read_unlock(), which might have executed |
246 | * just before the preceding srcu_readers_active() check, |
247 | * and whose CPU might have reordered the srcu_read_unlock() |
248 | * with the preceding critical section. In this case, there |
249 | * is nothing preventing the synchronize_sched() task that is |
250 | * taking the early exit from freeing a data structure that |
251 | * is still being referenced (out of order) by the task |
252 | * doing the srcu_read_unlock(). |
253 | * |
254 | * Alternatively, the comparison with "2" on the early exit |
255 | * could be changed to "3", but this increases synchronize_srcu() |
256 | * latency for bulk loads. So the current code is preferred. |
257 | */ |
258 | |
259 | mutex_unlock(&sp->mutex); |
260 | } |
261 | |
262 | /** |
263 | * synchronize_srcu - wait for prior SRCU read-side critical-section completion |
264 | * @sp: srcu_struct with which to synchronize. |
265 | * |
266 | * Flip the completed counter, and wait for the old count to drain to zero. |
267 | * As with classic RCU, the updater must use some separate means of |
268 | * synchronizing concurrent updates. Can block; must be called from |
269 | * process context. |
270 | * |
271 | * Note that it is illegal to call synchronize_srcu() from the corresponding |
272 | * SRCU read-side critical section; doing so will result in deadlock. |
273 | * However, it is perfectly legal to call synchronize_srcu() on one |
274 | * srcu_struct from some other srcu_struct's read-side critical section. |
275 | */ |
276 | void synchronize_srcu(struct srcu_struct *sp) |
277 | { |
278 | __synchronize_srcu(sp, synchronize_sched); |
279 | } |
280 | EXPORT_SYMBOL_GPL(synchronize_srcu); |
281 | |
282 | /** |
283 | * synchronize_srcu_expedited - like synchronize_srcu, but less patient |
284 | * @sp: srcu_struct with which to synchronize. |
285 | * |
286 | * Flip the completed counter, and wait for the old count to drain to zero. |
287 | * As with classic RCU, the updater must use some separate means of |
288 | * synchronizing concurrent updates. Can block; must be called from |
289 | * process context. |
290 | * |
291 | * Note that it is illegal to call synchronize_srcu_expedited() |
292 | * from the corresponding SRCU read-side critical section; doing so |
293 | * will result in deadlock. However, it is perfectly legal to call |
294 | * synchronize_srcu_expedited() on one srcu_struct from some other |
295 | * srcu_struct's read-side critical section. |
296 | */ |
297 | void synchronize_srcu_expedited(struct srcu_struct *sp) |
298 | { |
299 | __synchronize_srcu(sp, synchronize_sched_expedited); |
300 | } |
301 | EXPORT_SYMBOL_GPL(synchronize_srcu_expedited); |
302 | |
303 | /** |
304 | * srcu_batches_completed - return batches completed. |
305 | * @sp: srcu_struct on which to report batch completion. |
306 | * |
307 | * Report the number of batches, correlated with, but not necessarily |
308 | * precisely the same as, the number of grace periods that have elapsed. |
309 | */ |
310 | |
311 | long srcu_batches_completed(struct srcu_struct *sp) |
312 | { |
313 | return sp->completed; |
314 | } |
315 | EXPORT_SYMBOL_GPL(srcu_batches_completed); |
316 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9