Root/
Source at commit 2604e7f9a98c27be50a0c3ff7503b6a5ea8f6cfe created 12 years 7 months ago. By Maarten ter Huurne, cpufreq_stats: Support runtime changes to frequency table | |
---|---|
1 | /* |
2 | * async.c: Asynchronous function calls for boot performance |
3 | * |
4 | * (C) Copyright 2009 Intel Corporation |
5 | * Author: Arjan van de Ven <arjan@linux.intel.com> |
6 | * |
7 | * This program is free software; you can redistribute it and/or |
8 | * modify it under the terms of the GNU General Public License |
9 | * as published by the Free Software Foundation; version 2 |
10 | * of the License. |
11 | */ |
12 | |
13 | |
14 | /* |
15 | |
16 | Goals and Theory of Operation |
17 | |
18 | The primary goal of this feature is to reduce the kernel boot time, |
19 | by doing various independent hardware delays and discovery operations |
20 | decoupled and not strictly serialized. |
21 | |
22 | More specifically, the asynchronous function call concept allows |
23 | certain operations (primarily during system boot) to happen |
24 | asynchronously, out of order, while these operations still |
25 | have their externally visible parts happen sequentially and in-order. |
26 | (not unlike how out-of-order CPUs retire their instructions in order) |
27 | |
28 | Key to the asynchronous function call implementation is the concept of |
29 | a "sequence cookie" (which, although it has an abstracted type, can be |
30 | thought of as a monotonically incrementing number). |
31 | |
32 | The async core will assign each scheduled event such a sequence cookie and |
33 | pass this to the called functions. |
34 | |
35 | The asynchronously called function should before doing a globally visible |
36 | operation, such as registering device numbers, call the |
37 | async_synchronize_cookie() function and pass in its own cookie. The |
38 | async_synchronize_cookie() function will make sure that all asynchronous |
39 | operations that were scheduled prior to the operation corresponding with the |
40 | cookie have completed. |
41 | |
42 | Subsystem/driver initialization code that scheduled asynchronous probe |
43 | functions, but which shares global resources with other drivers/subsystems |
44 | that do not use the asynchronous call feature, need to do a full |
45 | synchronization with the async_synchronize_full() function, before returning |
46 | from their init function. This is to maintain strict ordering between the |
47 | asynchronous and synchronous parts of the kernel. |
48 | |
49 | */ |
50 | |
51 | #include <linux/async.h> |
52 | #include <linux/atomic.h> |
53 | #include <linux/ktime.h> |
54 | #include <linux/export.h> |
55 | #include <linux/wait.h> |
56 | #include <linux/sched.h> |
57 | #include <linux/slab.h> |
58 | #include <linux/workqueue.h> |
59 | |
60 | static async_cookie_t next_cookie = 1; |
61 | |
62 | #define MAX_WORK 32768 |
63 | |
64 | static LIST_HEAD(async_pending); |
65 | static ASYNC_DOMAIN(async_running); |
66 | static LIST_HEAD(async_domains); |
67 | static DEFINE_SPINLOCK(async_lock); |
68 | static DEFINE_MUTEX(async_register_mutex); |
69 | |
70 | struct async_entry { |
71 | struct list_head list; |
72 | struct work_struct work; |
73 | async_cookie_t cookie; |
74 | async_func_ptr *func; |
75 | void *data; |
76 | struct async_domain *running; |
77 | }; |
78 | |
79 | static DECLARE_WAIT_QUEUE_HEAD(async_done); |
80 | |
81 | static atomic_t entry_count; |
82 | |
83 | |
84 | /* |
85 | * MUST be called with the lock held! |
86 | */ |
87 | static async_cookie_t __lowest_in_progress(struct async_domain *running) |
88 | { |
89 | struct async_entry *entry; |
90 | |
91 | if (!list_empty(&running->domain)) { |
92 | entry = list_first_entry(&running->domain, typeof(*entry), list); |
93 | return entry->cookie; |
94 | } |
95 | |
96 | list_for_each_entry(entry, &async_pending, list) |
97 | if (entry->running == running) |
98 | return entry->cookie; |
99 | |
100 | return next_cookie; /* "infinity" value */ |
101 | } |
102 | |
103 | static async_cookie_t lowest_in_progress(struct async_domain *running) |
104 | { |
105 | unsigned long flags; |
106 | async_cookie_t ret; |
107 | |
108 | spin_lock_irqsave(&async_lock, flags); |
109 | ret = __lowest_in_progress(running); |
110 | spin_unlock_irqrestore(&async_lock, flags); |
111 | return ret; |
112 | } |
113 | |
114 | /* |
115 | * pick the first pending entry and run it |
116 | */ |
117 | static void async_run_entry_fn(struct work_struct *work) |
118 | { |
119 | struct async_entry *entry = |
120 | container_of(work, struct async_entry, work); |
121 | unsigned long flags; |
122 | ktime_t uninitialized_var(calltime), delta, rettime; |
123 | struct async_domain *running = entry->running; |
124 | |
125 | /* 1) move self to the running queue */ |
126 | spin_lock_irqsave(&async_lock, flags); |
127 | list_move_tail(&entry->list, &running->domain); |
128 | spin_unlock_irqrestore(&async_lock, flags); |
129 | |
130 | /* 2) run (and print duration) */ |
131 | if (initcall_debug && system_state == SYSTEM_BOOTING) { |
132 | printk(KERN_DEBUG "calling %lli_%pF @ %i\n", |
133 | (long long)entry->cookie, |
134 | entry->func, task_pid_nr(current)); |
135 | calltime = ktime_get(); |
136 | } |
137 | entry->func(entry->data, entry->cookie); |
138 | if (initcall_debug && system_state == SYSTEM_BOOTING) { |
139 | rettime = ktime_get(); |
140 | delta = ktime_sub(rettime, calltime); |
141 | printk(KERN_DEBUG "initcall %lli_%pF returned 0 after %lld usecs\n", |
142 | (long long)entry->cookie, |
143 | entry->func, |
144 | (long long)ktime_to_ns(delta) >> 10); |
145 | } |
146 | |
147 | /* 3) remove self from the running queue */ |
148 | spin_lock_irqsave(&async_lock, flags); |
149 | list_del(&entry->list); |
150 | if (running->registered && --running->count == 0) |
151 | list_del_init(&running->node); |
152 | |
153 | /* 4) free the entry */ |
154 | kfree(entry); |
155 | atomic_dec(&entry_count); |
156 | |
157 | spin_unlock_irqrestore(&async_lock, flags); |
158 | |
159 | /* 5) wake up any waiters */ |
160 | wake_up(&async_done); |
161 | } |
162 | |
163 | static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct async_domain *running) |
164 | { |
165 | struct async_entry *entry; |
166 | unsigned long flags; |
167 | async_cookie_t newcookie; |
168 | |
169 | /* allow irq-off callers */ |
170 | entry = kzalloc(sizeof(struct async_entry), GFP_ATOMIC); |
171 | |
172 | /* |
173 | * If we're out of memory or if there's too much work |
174 | * pending already, we execute synchronously. |
175 | */ |
176 | if (!entry || atomic_read(&entry_count) > MAX_WORK) { |
177 | kfree(entry); |
178 | spin_lock_irqsave(&async_lock, flags); |
179 | newcookie = next_cookie++; |
180 | spin_unlock_irqrestore(&async_lock, flags); |
181 | |
182 | /* low on memory.. run synchronously */ |
183 | ptr(data, newcookie); |
184 | return newcookie; |
185 | } |
186 | INIT_WORK(&entry->work, async_run_entry_fn); |
187 | entry->func = ptr; |
188 | entry->data = data; |
189 | entry->running = running; |
190 | |
191 | spin_lock_irqsave(&async_lock, flags); |
192 | newcookie = entry->cookie = next_cookie++; |
193 | list_add_tail(&entry->list, &async_pending); |
194 | if (running->registered && running->count++ == 0) |
195 | list_add_tail(&running->node, &async_domains); |
196 | atomic_inc(&entry_count); |
197 | spin_unlock_irqrestore(&async_lock, flags); |
198 | |
199 | /* schedule for execution */ |
200 | queue_work(system_unbound_wq, &entry->work); |
201 | |
202 | return newcookie; |
203 | } |
204 | |
205 | /** |
206 | * async_schedule - schedule a function for asynchronous execution |
207 | * @ptr: function to execute asynchronously |
208 | * @data: data pointer to pass to the function |
209 | * |
210 | * Returns an async_cookie_t that may be used for checkpointing later. |
211 | * Note: This function may be called from atomic or non-atomic contexts. |
212 | */ |
213 | async_cookie_t async_schedule(async_func_ptr *ptr, void *data) |
214 | { |
215 | return __async_schedule(ptr, data, &async_running); |
216 | } |
217 | EXPORT_SYMBOL_GPL(async_schedule); |
218 | |
219 | /** |
220 | * async_schedule_domain - schedule a function for asynchronous execution within a certain domain |
221 | * @ptr: function to execute asynchronously |
222 | * @data: data pointer to pass to the function |
223 | * @running: running list for the domain |
224 | * |
225 | * Returns an async_cookie_t that may be used for checkpointing later. |
226 | * @running may be used in the async_synchronize_*_domain() functions |
227 | * to wait within a certain synchronization domain rather than globally. |
228 | * A synchronization domain is specified via the running queue @running to use. |
229 | * Note: This function may be called from atomic or non-atomic contexts. |
230 | */ |
231 | async_cookie_t async_schedule_domain(async_func_ptr *ptr, void *data, |
232 | struct async_domain *running) |
233 | { |
234 | return __async_schedule(ptr, data, running); |
235 | } |
236 | EXPORT_SYMBOL_GPL(async_schedule_domain); |
237 | |
238 | /** |
239 | * async_synchronize_full - synchronize all asynchronous function calls |
240 | * |
241 | * This function waits until all asynchronous function calls have been done. |
242 | */ |
243 | void async_synchronize_full(void) |
244 | { |
245 | mutex_lock(&async_register_mutex); |
246 | do { |
247 | struct async_domain *domain = NULL; |
248 | |
249 | spin_lock_irq(&async_lock); |
250 | if (!list_empty(&async_domains)) |
251 | domain = list_first_entry(&async_domains, typeof(*domain), node); |
252 | spin_unlock_irq(&async_lock); |
253 | |
254 | async_synchronize_cookie_domain(next_cookie, domain); |
255 | } while (!list_empty(&async_domains)); |
256 | mutex_unlock(&async_register_mutex); |
257 | } |
258 | EXPORT_SYMBOL_GPL(async_synchronize_full); |
259 | |
260 | /** |
261 | * async_unregister_domain - ensure no more anonymous waiters on this domain |
262 | * @domain: idle domain to flush out of any async_synchronize_full instances |
263 | * |
264 | * async_synchronize_{cookie|full}_domain() are not flushed since callers |
265 | * of these routines should know the lifetime of @domain |
266 | * |
267 | * Prefer ASYNC_DOMAIN_EXCLUSIVE() declarations over flushing |
268 | */ |
269 | void async_unregister_domain(struct async_domain *domain) |
270 | { |
271 | mutex_lock(&async_register_mutex); |
272 | spin_lock_irq(&async_lock); |
273 | WARN_ON(!domain->registered || !list_empty(&domain->node) || |
274 | !list_empty(&domain->domain)); |
275 | domain->registered = 0; |
276 | spin_unlock_irq(&async_lock); |
277 | mutex_unlock(&async_register_mutex); |
278 | } |
279 | EXPORT_SYMBOL_GPL(async_unregister_domain); |
280 | |
281 | /** |
282 | * async_synchronize_full_domain - synchronize all asynchronous function within a certain domain |
283 | * @domain: running list to synchronize on |
284 | * |
285 | * This function waits until all asynchronous function calls for the |
286 | * synchronization domain specified by the running list @domain have been done. |
287 | */ |
288 | void async_synchronize_full_domain(struct async_domain *domain) |
289 | { |
290 | async_synchronize_cookie_domain(next_cookie, domain); |
291 | } |
292 | EXPORT_SYMBOL_GPL(async_synchronize_full_domain); |
293 | |
294 | /** |
295 | * async_synchronize_cookie_domain - synchronize asynchronous function calls within a certain domain with cookie checkpointing |
296 | * @cookie: async_cookie_t to use as checkpoint |
297 | * @running: running list to synchronize on |
298 | * |
299 | * This function waits until all asynchronous function calls for the |
300 | * synchronization domain specified by running list @running submitted |
301 | * prior to @cookie have been done. |
302 | */ |
303 | void async_synchronize_cookie_domain(async_cookie_t cookie, struct async_domain *running) |
304 | { |
305 | ktime_t uninitialized_var(starttime), delta, endtime; |
306 | |
307 | if (!running) |
308 | return; |
309 | |
310 | if (initcall_debug && system_state == SYSTEM_BOOTING) { |
311 | printk(KERN_DEBUG "async_waiting @ %i\n", task_pid_nr(current)); |
312 | starttime = ktime_get(); |
313 | } |
314 | |
315 | wait_event(async_done, lowest_in_progress(running) >= cookie); |
316 | |
317 | if (initcall_debug && system_state == SYSTEM_BOOTING) { |
318 | endtime = ktime_get(); |
319 | delta = ktime_sub(endtime, starttime); |
320 | |
321 | printk(KERN_DEBUG "async_continuing @ %i after %lli usec\n", |
322 | task_pid_nr(current), |
323 | (long long)ktime_to_ns(delta) >> 10); |
324 | } |
325 | } |
326 | EXPORT_SYMBOL_GPL(async_synchronize_cookie_domain); |
327 | |
328 | /** |
329 | * async_synchronize_cookie - synchronize asynchronous function calls with cookie checkpointing |
330 | * @cookie: async_cookie_t to use as checkpoint |
331 | * |
332 | * This function waits until all asynchronous function calls prior to @cookie |
333 | * have been done. |
334 | */ |
335 | void async_synchronize_cookie(async_cookie_t cookie) |
336 | { |
337 | async_synchronize_cookie_domain(cookie, &async_running); |
338 | } |
339 | EXPORT_SYMBOL_GPL(async_synchronize_cookie); |
340 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9